From 6104b0a85b0c0132686269f8af906140ec5d29c7 Mon Sep 17 00:00:00 2001
From: Selim Lakhdar <selim.lakhdar@gmail.com>
Date: Sun, 27 Feb 2022 14:29:46 +0100
Subject: [PATCH] WIP

---
 Makefile                      |  15 +-
 README.md                     |  93 +++-----
 doc/doc.md                    |  61 +++++
 notebooks/google_collab.ipynb | 413 ++++++++++++++++++++++++++++++++++
 4 files changed, 519 insertions(+), 63 deletions(-)
 create mode 100644 doc/doc.md
 create mode 100644 notebooks/google_collab.ipynb

diff --git a/Makefile b/Makefile
index 98092078..a7aee7e5 100644
--- a/Makefile
+++ b/Makefile
@@ -7,14 +7,17 @@ detect_base_weights:
 detect_base_weights_test:
 	python3 tools/yolov5/detect.py --weights data/base_weights/best.pt --source data/videos/2022-02-15-154943.mp4
 
-detect_colab_weights:
-	python3 tools/yolov5/detect.py --weights data/colab_weights/best.pt --source data/videos/VID_20220208_150059.mp4
+detect_colab_weights_v1:
+	python3 tools/yolov5/detect.py --weights data/colab_weights/v1/best.pt --source data/videos/VID_20220208_150059.mp4
 
-detect_colab_weights2:
-	python3 tools/yolov5/detect.py --weights data/colab_weights/best.pt --source data/videos/2022-02-15-154638.mp4
+detect_colab_weights_v2:
+	python3 tools/yolov5/detect.py --weights data/colab_weights/v2/best.pt --source data/videos/2022-02-15-154638.mp4
 
-live1:
-	python3 tools/yolov5/detect.py --weights data/colab_weights/best.pt --source 2
+live_cam0:
+	python3 tools/yolov5/detect.py --weights data/colab_weights/v2/best.pt --source 0
+
+live_cam2:
+	python3 tools/yolov5/detect.py --weights data/colab_weights/v2/best.pt --source 2
 
 annotation:
 	cd tools/Yolo-Annotation-Tool-New- && python3 main.py
diff --git a/README.md b/README.md
index c4572a1b..3d3c5f23 100644
--- a/README.md
+++ b/README.md
@@ -1,75 +1,54 @@
 PFE: Drone Detection & Position Estimation
 --------------------------------------
 
-b2e56c607ce767cf19ee86df662e0a3b6f69ee25
-
 # Author
 - Selim Lakhdar
   - selim.lakhdar.etu@univ-lille.fr
   - selim.lakhdar@gmail.com
 
-# Objectif
-- caméra débarquée: 
-  - détecter la flotte de drones en temps réel et localiser chaque robot en X, Y et idéalement avec l'attitude.
+# Project Objectif
+- Detect the drone swarm in real time.
+- Localize each drone (x,y,z).
 
-# Articles
+# Detect Drone
+This project uses the weights generated by [Detect Drone](https://github.com/tusharsarkar3/Detect_Drone).
 
-- [Dist-YOLO: Fast Object Detection with Distance Estimation](doc/articles/Dist-YOLO_Fast_Object_Detection_with_Distance_Estimation.pdf)
-  - source code: https://gitlab.com/EnginCZ/yolo-with-distance
-  - abstract:
-``` We present a scheme of how YOLO can be improved in order to predict the absolute distance of objects using only information from a monocular camera. 
-It is fully integrated into the original architecture by extending the prediction vectors, sharing the backbone’s weights with the bounding box regressor, and updating the original loss function by a part responsible for distance estimation.
-We designed two ways of handling the distance, class-agnostic and class-aware, proving class-agnostic creates smaller prediction vectors than class-aware and achieves better results. 
-We demonstrate that the subtasks of object detection and distance measurement are in synergy, resulting in the increase of the precision of the original bounding box functionality. 
-We show that using the KITTI dataset, the proposed scheme yields a mean relative error of 11% considering all eight classes and the distance range within [0, 150] m, which makes the solution highly competitive with existing
-approaches. Finally, we show that the inference speed is identical to the unmodified YOLO, 45 frames per second.
-```
+This drone detection system uses YOLOv5 which is a family of object detection architectures which was trained on [Drone Dataset](https://www.kaggle.com/dasmehdixtr/drone-dataset-uav). 
 
-- [Automated Drone Detection Using YOLOv4](doc/articles/Automated_Drone_Detection_Using_YOLOv4.pdf)
-  - abstract:
-``` Drones are increasing in popularity and are reaching the public faster than ever before. Consequently, the chances of a drone being misused are multiplying. Automated drone detection is
-necessary to prevent unauthorized and unwanted drone interventions. In this research, we designed an automated drone detection system using YOLOv4. The model was trained using drone and
-bird datasets. We then evaluated the trained YOLOv4 model on the testing dataset, using mean average precision (mAP), frames per second (FPS), precision, recall, and F1-score as evaluation
-parameters. We next collected our own two types of drone videos, performed drone detections, and calculated the FPS to identify the speed of detection at three altitudes. Our methodology showed
-better performance than what has been found in previous similar studies, achieving a mAP of 74.36%, precision of 0.95, recall of 0.68, and F1-score of 0.79. For video detection, we achieved an FPS of 20.5
-on the DJI Phantom III and an FPS of 19.0 on the DJI Mavic Pro.
-```
+Weights are stored under [./data/base_weights](./data/base_weights])
 
-- [Learning Object-Specific Distance From a Monocular Image](doc/articles/Zhu_Learning_Object-Specific_Distance_From_a_Monocular_Image_ICCV_2019_paper.pdf)
-  - abstract:
-``` Environment perception, including object detection and distance estimation, is one of the most crucial tasks for autonomous driving. Many attentions have been paid on the object detection task, but distance estimation only arouse
-few interests in the computer vision community. Observing that the traditional inverse perspective mapping algorithm performs poorly for objects far away from the camera or on the curved road, in this paper, we address the challenging
-distance estimation problem by developing the first end-to-end learning-based model to directly predict distances for given objects in the images. Besides the introduction of a learning-based base model, we further design an enhanced
-model with a keypoint regressor, where a projection loss is defined to enforce a better distance estimation, especially for objects close to the camera. To facilitate the research on this task, we construct the extented KITTI and nuScenes
-(mini) object detection datasets with a distance for each object. Our experiments demonstrate that our proposed methods outperform alternative approaches (e.g., the traditional IPM, SVR) on object-specific distance estimation, particularly for the challenging cases that objects are on a curved
-road. Moreover, the performance margin implies the effectiveness of our enhanced method
-```
+# Install
+### [Yolov5](https://github.com/ultralytics/yolov5)
 
-- [YOLODrone: Improved YOLO Architecture for Object Detection in Drone Images](doc/articles/YOLODrone_Improved_YOLO_Architecture_for_Object_Detection_in_Drone_Images.pdf)
-  - abstract:
-``` Recent advances in robotics and computer vision fields yield emerging new applications for camera equipped drones. One such application is aerial-based object detection. However, despite the recent advances in the relevant literature,
-object detection remains as a challenging task in computer vision. Existing object detection algorithms demonstrate even lower performance on drone (or aerial) images since the object detection problem is a more challenging problem in aerial images,
-when compared to the detection task in ground-taken images. There are many reasons for that including: (i) the lack of large drone datasets with large object variance, (ii) the larger variance in both scale and orientation in drone images, and (iii) the
-difference in shape and texture features between the ground and the aerial images. In this paper, we introduce an improved YOLO algorithm: YOLODrone for detecting objects in drone images. We evaluate our algorithm on VisDrone2019 dataset and report
-improved results when compared to YOLOv3 algorithm.
+<div align="center">
+<p>
+   <a align="left" href="https://ultralytics.com/yolov5" target="_blank">
+   <img width="850" src="https://github.com/ultralytics/yolov5/releases/download/v1.0/splash.jpg"></a>
+</p>
+<br>
+<p>
+YOLOv5 🚀 is a family of object detection architectures and models pretrained on the COCO dataset, and represents <a href="https://ultralytics.com">Ultralytics</a>
+ open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development.
+</p>
+</div>
+
+```shell
+pip install -r tools/yolov5/requirements.txt
 ```
 
-# Links
-## YOLO
-- https://machinelearningmastery.com/how-to-perform-object-detection-with-yolov3-in-keras/
-- https://github.com/chuanenlin/drone-net
-  - DroneNet is Joseph Redmon's YOLO real-time object detection system retrained on 2664 images of DJI drones, labeled.
-- https://pjreddie.com/darknet/
-- https://github.com/dnjstjr93/Drone-Detection_YOLOv3_PyTorch
+### [Yolo-Annotation-Tool-New](https://github.com/ManivannanMurugavel/Yolo-Annotation-Tool-New-)
+
+This tool is used to annotate new images for YOLO.
 
-## Distance
-- https://www.pyimagesearch.com/2015/01/19/find-distance-camera-objectmarker-using-python-opencv/
-- https://github.com/paul-pias/Object-Detection-and-Distance-Measurement
-- http://emaraic.com/blog/distance-measurement
+```shell
+pip install Pillow tk
+```
 
-## ArUco Markers
+# HowTO
+## Train
+- To train with base weights. It use [./data/train.txt](./data/train.txt) images for training and [./data/val.txt](./data/val.txt) images for validation.
+```shell
+make train_from_base
+```
 
-- https://www.pyimagesearch.com/2020/12/21/detecting-aruco-markers-with-opencv-and-python/
-- https://automaticaddison.com/how-to-detect-aruco-markers-using-opencv-and-python/
-- https://pysource.com/2021/05/28/measure-size-of-an-object-with-opencv-aruco-marker-and-python/
-- https://docs.opencv.org/4.x/d5/dae/tutorial_aruco_detection.html
\ No newline at end of file
+## Detect
\ No newline at end of file
diff --git a/doc/doc.md b/doc/doc.md
new file mode 100644
index 00000000..1e92245f
--- /dev/null
+++ b/doc/doc.md
@@ -0,0 +1,61 @@
+# Articles
+
+- [Dist-YOLO: Fast Object Detection with Distance Estimation](doc/articles/Dist-YOLO_Fast_Object_Detection_with_Distance_Estimation.pdf)
+  - source code: https://gitlab.com/EnginCZ/yolo-with-distance
+  - abstract:
+``` We present a scheme of how YOLO can be improved in order to predict the absolute distance of objects using only information from a monocular camera. 
+It is fully integrated into the original architecture by extending the prediction vectors, sharing the backbone’s weights with the bounding box regressor, and updating the original loss function by a part responsible for distance estimation.
+We designed two ways of handling the distance, class-agnostic and class-aware, proving class-agnostic creates smaller prediction vectors than class-aware and achieves better results. 
+We demonstrate that the subtasks of object detection and distance measurement are in synergy, resulting in the increase of the precision of the original bounding box functionality. 
+We show that using the KITTI dataset, the proposed scheme yields a mean relative error of 11% considering all eight classes and the distance range within [0, 150] m, which makes the solution highly competitive with existing
+approaches. Finally, we show that the inference speed is identical to the unmodified YOLO, 45 frames per second.
+```
+
+- [Automated Drone Detection Using YOLOv4](doc/articles/Automated_Drone_Detection_Using_YOLOv4.pdf)
+  - abstract:
+``` Drones are increasing in popularity and are reaching the public faster than ever before. Consequently, the chances of a drone being misused are multiplying. Automated drone detection is
+necessary to prevent unauthorized and unwanted drone interventions. In this research, we designed an automated drone detection system using YOLOv4. The model was trained using drone and
+bird datasets. We then evaluated the trained YOLOv4 model on the testing dataset, using mean average precision (mAP), frames per second (FPS), precision, recall, and F1-score as evaluation
+parameters. We next collected our own two types of drone videos, performed drone detections, and calculated the FPS to identify the speed of detection at three altitudes. Our methodology showed
+better performance than what has been found in previous similar studies, achieving a mAP of 74.36%, precision of 0.95, recall of 0.68, and F1-score of 0.79. For video detection, we achieved an FPS of 20.5
+on the DJI Phantom III and an FPS of 19.0 on the DJI Mavic Pro.
+```
+
+- [Learning Object-Specific Distance From a Monocular Image](doc/articles/Zhu_Learning_Object-Specific_Distance_From_a_Monocular_Image_ICCV_2019_paper.pdf)
+  - abstract:
+``` Environment perception, including object detection and distance estimation, is one of the most crucial tasks for autonomous driving. Many attentions have been paid on the object detection task, but distance estimation only arouse
+few interests in the computer vision community. Observing that the traditional inverse perspective mapping algorithm performs poorly for objects far away from the camera or on the curved road, in this paper, we address the challenging
+distance estimation problem by developing the first end-to-end learning-based model to directly predict distances for given objects in the images. Besides the introduction of a learning-based base model, we further design an enhanced
+model with a keypoint regressor, where a projection loss is defined to enforce a better distance estimation, especially for objects close to the camera. To facilitate the research on this task, we construct the extented KITTI and nuScenes
+(mini) object detection datasets with a distance for each object. Our experiments demonstrate that our proposed methods outperform alternative approaches (e.g., the traditional IPM, SVR) on object-specific distance estimation, particularly for the challenging cases that objects are on a curved
+road. Moreover, the performance margin implies the effectiveness of our enhanced method
+```
+
+- [YOLODrone: Improved YOLO Architecture for Object Detection in Drone Images](doc/articles/YOLODrone_Improved_YOLO_Architecture_for_Object_Detection_in_Drone_Images.pdf)
+  - abstract:
+``` Recent advances in robotics and computer vision fields yield emerging new applications for camera equipped drones. One such application is aerial-based object detection. However, despite the recent advances in the relevant literature,
+object detection remains as a challenging task in computer vision. Existing object detection algorithms demonstrate even lower performance on drone (or aerial) images since the object detection problem is a more challenging problem in aerial images,
+when compared to the detection task in ground-taken images. There are many reasons for that including: (i) the lack of large drone datasets with large object variance, (ii) the larger variance in both scale and orientation in drone images, and (iii) the
+difference in shape and texture features between the ground and the aerial images. In this paper, we introduce an improved YOLO algorithm: YOLODrone for detecting objects in drone images. We evaluate our algorithm on VisDrone2019 dataset and report
+improved results when compared to YOLOv3 algorithm.
+```
+
+# Links
+## YOLO
+- https://machinelearningmastery.com/how-to-perform-object-detection-with-yolov3-in-keras/
+- https://github.com/chuanenlin/drone-net
+  - DroneNet is Joseph Redmon's YOLO real-time object detection system retrained on 2664 images of DJI drones, labeled.
+- https://pjreddie.com/darknet/
+- https://github.com/dnjstjr93/Drone-Detection_YOLOv3_PyTorch
+
+## Distance
+- https://www.pyimagesearch.com/2015/01/19/find-distance-camera-objectmarker-using-python-opencv/
+- https://github.com/paul-pias/Object-Detection-and-Distance-Measurement
+- http://emaraic.com/blog/distance-measurement
+
+## ArUco Markers
+
+- https://www.pyimagesearch.com/2020/12/21/detecting-aruco-markers-with-opencv-and-python/
+- https://automaticaddison.com/how-to-detect-aruco-markers-using-opencv-and-python/
+- https://pysource.com/2021/05/28/measure-size-of-an-object-with-opencv-aruco-marker-and-python/
+- https://docs.opencv.org/4.x/d5/dae/tutorial_aruco_detection.html
\ No newline at end of file
diff --git a/notebooks/google_collab.ipynb b/notebooks/google_collab.ipynb
new file mode 100644
index 00000000..bfb29290
--- /dev/null
+++ b/notebooks/google_collab.ipynb
@@ -0,0 +1,413 @@
+{
+ "nbformat": 4,
+ "nbformat_minor": 0,
+ "metadata": {
+  "colab": {
+   "name": "pfe_drone.ipynb",
+   "provenance": [],
+   "collapsed_sections": []
+  },
+  "kernelspec": {
+   "name": "python3",
+   "display_name": "Python 3"
+  },
+  "language_info": {
+   "name": "python"
+  },
+  "accelerator": "GPU"
+ },
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "source": [
+    "PFE: Drone Detection & Position Estimation\n",
+    "--------------------------------------\n",
+    "\n",
+    "# Author\n",
+    "- Selim Lakhdar\n",
+    "  - selim.lakhdar.etu@univ-lille.fr\n",
+    "  - selim.lakhdar@gmail.com\n",
+    "\n",
+    "This notebook was used with Google Colab to train YOLO network"
+   ],
+   "metadata": {
+    "collapsed": false,
+    "pycharm": {
+     "name": "#%% md\n"
+    }
+   }
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "outputs": [
+    {
+     "output_type": "stream",
+     "name": "stdout",
+     "text": [
+      "Cloning into 'pfe-drone-position-detection'...\n",
+      "remote: Enumerating objects: 15189, done.\u001B[K\n",
+      "remote: Counting objects: 100% (1295/1295), done.\u001B[K\n",
+      "remote: Compressing objects: 100% (1234/1234), done.\u001B[K\n",
+      "remote: Total 15189 (delta 71), reused 1262 (delta 50), pack-reused 13894\u001B[K\n",
+      "Receiving objects: 100% (15189/15189), 1.31 GiB | 30.93 MiB/s, done.\n",
+      "Resolving deltas: 100% (228/228), done.\n",
+      "Checking out files: 100% (8152/8152), done.\n",
+      "mv: cannot move 'pfe-drone-position-detection/data' to './data': Directory not empty\n",
+      "mv: cannot move 'pfe-drone-position-detection/doc' to './doc': Directory not empty\n",
+      "mv: cannot move 'pfe-drone-position-detection/scripts' to './scripts': Directory not empty\n",
+      "mv: cannot move 'pfe-drone-position-detection/tools' to './tools': Directory not empty\n",
+      "mv: cannot move 'pfe-drone-position-detection/.git' to './.git': Directory not empty\n",
+      "data  doc  Makefile  README.md\tscripts  tools\twandb\n"
+     ]
+    }
+   ],
+   "source": [
+    "!git clone https://gitlab.univ-lille.fr/selim.lakhdar.etu/pfe-drone-position-detection.git\n",
+    "!rm -rf sample_data/\n",
+    "!mv pfe-drone-position-detection/* .\n",
+    "!mv pfe-drone-position-detection/.git .\n",
+    "!mv pfe-drone-position-detection/.gitignore .\n",
+    "!rm -rf pfe-drone-position-detection\n",
+    "!ls"
+   ],
+   "metadata": {
+    "collapsed": false,
+    "pycharm": {
+     "name": "#%%\n"
+    }
+   }
+  },
+  {
+   "cell_type": "code",
+   "source": [
+    "!pip install -r tools/yolov5/requirements.txt\n",
+    "!pip install wandb"
+   ],
+   "metadata": {
+    "colab": {
+     "base_uri": "https://localhost:8080/"
+    },
+    "id": "v74mhjO14PV4",
+    "outputId": "a4b32607-8c60-4c75-cbb5-610b05a5c03a"
+   },
+   "execution_count": null,
+   "outputs": [
+    {
+     "output_type": "stream",
+     "name": "stdout",
+     "text": [
+      "Requirement already satisfied: matplotlib>=3.2.2 in /usr/local/lib/python3.7/dist-packages (from -r tools/yolov5/requirements.txt (line 4)) (3.2.2)\n",
+      "Requirement already satisfied: numpy>=1.18.5 in /usr/local/lib/python3.7/dist-packages (from -r tools/yolov5/requirements.txt (line 5)) (1.21.5)\n",
+      "Requirement already satisfied: opencv-python>=4.1.2 in /usr/local/lib/python3.7/dist-packages (from -r tools/yolov5/requirements.txt (line 6)) (4.1.2.30)\n",
+      "Requirement already satisfied: Pillow>=7.1.2 in /usr/local/lib/python3.7/dist-packages (from -r tools/yolov5/requirements.txt (line 7)) (7.1.2)\n",
+      "Requirement already satisfied: PyYAML>=5.3.1 in /usr/local/lib/python3.7/dist-packages (from -r tools/yolov5/requirements.txt (line 8)) (6.0)\n",
+      "Requirement already satisfied: requests>=2.23.0 in /usr/local/lib/python3.7/dist-packages (from -r tools/yolov5/requirements.txt (line 9)) (2.23.0)\n",
+      "Requirement already satisfied: scipy>=1.4.1 in /usr/local/lib/python3.7/dist-packages (from -r tools/yolov5/requirements.txt (line 10)) (1.4.1)\n",
+      "Requirement already satisfied: torch>=1.7.0 in /usr/local/lib/python3.7/dist-packages (from -r tools/yolov5/requirements.txt (line 11)) (1.10.0+cu111)\n",
+      "Requirement already satisfied: torchvision>=0.8.1 in /usr/local/lib/python3.7/dist-packages (from -r tools/yolov5/requirements.txt (line 12)) (0.11.1+cu111)\n",
+      "Requirement already satisfied: tqdm>=4.41.0 in /usr/local/lib/python3.7/dist-packages (from -r tools/yolov5/requirements.txt (line 13)) (4.62.3)\n",
+      "Requirement already satisfied: tensorboard>=2.4.1 in /usr/local/lib/python3.7/dist-packages (from -r tools/yolov5/requirements.txt (line 16)) (2.8.0)\n",
+      "Requirement already satisfied: pandas>=1.1.4 in /usr/local/lib/python3.7/dist-packages (from -r tools/yolov5/requirements.txt (line 20)) (1.3.5)\n",
+      "Requirement already satisfied: seaborn>=0.11.0 in /usr/local/lib/python3.7/dist-packages (from -r tools/yolov5/requirements.txt (line 21)) (0.11.2)\n",
+      "Requirement already satisfied: thop in /usr/local/lib/python3.7/dist-packages (from -r tools/yolov5/requirements.txt (line 37)) (0.0.31.post2005241907)\n",
+      "Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib>=3.2.2->-r tools/yolov5/requirements.txt (line 4)) (2.8.2)\n",
+      "Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib>=3.2.2->-r tools/yolov5/requirements.txt (line 4)) (3.0.7)\n",
+      "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib>=3.2.2->-r tools/yolov5/requirements.txt (line 4)) (1.3.2)\n",
+      "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib>=3.2.2->-r tools/yolov5/requirements.txt (line 4)) (0.11.0)\n",
+      "Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests>=2.23.0->-r tools/yolov5/requirements.txt (line 9)) (3.0.4)\n",
+      "Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests>=2.23.0->-r tools/yolov5/requirements.txt (line 9)) (1.24.3)\n",
+      "Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests>=2.23.0->-r tools/yolov5/requirements.txt (line 9)) (2.10)\n",
+      "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests>=2.23.0->-r tools/yolov5/requirements.txt (line 9)) (2021.10.8)\n",
+      "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from torch>=1.7.0->-r tools/yolov5/requirements.txt (line 11)) (3.10.0.2)\n",
+      "Requirement already satisfied: protobuf>=3.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.4.1->-r tools/yolov5/requirements.txt (line 16)) (3.17.3)\n",
+      "Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.4.1->-r tools/yolov5/requirements.txt (line 16)) (3.3.6)\n",
+      "Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.4.1->-r tools/yolov5/requirements.txt (line 16)) (0.4.6)\n",
+      "Requirement already satisfied: grpcio>=1.24.3 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.4.1->-r tools/yolov5/requirements.txt (line 16)) (1.43.0)\n",
+      "Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.4.1->-r tools/yolov5/requirements.txt (line 16)) (1.8.1)\n",
+      "Requirement already satisfied: tensorboard-data-server<0.7.0,>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.4.1->-r tools/yolov5/requirements.txt (line 16)) (0.6.1)\n",
+      "Requirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.4.1->-r tools/yolov5/requirements.txt (line 16)) (0.37.1)\n",
+      "Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.4.1->-r tools/yolov5/requirements.txt (line 16)) (1.0.1)\n",
+      "Requirement already satisfied: absl-py>=0.4 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.4.1->-r tools/yolov5/requirements.txt (line 16)) (1.0.0)\n",
+      "Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.4.1->-r tools/yolov5/requirements.txt (line 16)) (57.4.0)\n",
+      "Requirement already satisfied: google-auth<3,>=1.6.3 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.4.1->-r tools/yolov5/requirements.txt (line 16)) (1.35.0)\n",
+      "Requirement already satisfied: pytz>=2017.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=1.1.4->-r tools/yolov5/requirements.txt (line 20)) (2018.9)\n",
+      "Requirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from absl-py>=0.4->tensorboard>=2.4.1->-r tools/yolov5/requirements.txt (line 16)) (1.15.0)\n",
+      "Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from google-auth<3,>=1.6.3->tensorboard>=2.4.1->-r tools/yolov5/requirements.txt (line 16)) (0.2.8)\n",
+      "Requirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from google-auth<3,>=1.6.3->tensorboard>=2.4.1->-r tools/yolov5/requirements.txt (line 16)) (4.2.4)\n",
+      "Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.7/dist-packages (from google-auth<3,>=1.6.3->tensorboard>=2.4.1->-r tools/yolov5/requirements.txt (line 16)) (4.8)\n",
+      "Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard>=2.4.1->-r tools/yolov5/requirements.txt (line 16)) (1.3.1)\n",
+      "Requirement already satisfied: importlib-metadata>=4.4 in /usr/local/lib/python3.7/dist-packages (from markdown>=2.6.8->tensorboard>=2.4.1->-r tools/yolov5/requirements.txt (line 16)) (4.11.1)\n",
+      "Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=4.4->markdown>=2.6.8->tensorboard>=2.4.1->-r tools/yolov5/requirements.txt (line 16)) (3.7.0)\n",
+      "Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.7/dist-packages (from pyasn1-modules>=0.2.1->google-auth<3,>=1.6.3->tensorboard>=2.4.1->-r tools/yolov5/requirements.txt (line 16)) (0.4.8)\n",
+      "Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard>=2.4.1->-r tools/yolov5/requirements.txt (line 16)) (3.2.0)\n",
+      "Requirement already satisfied: wandb in /usr/local/lib/python3.7/dist-packages (0.12.10)\n",
+      "Requirement already satisfied: PyYAML in /usr/local/lib/python3.7/dist-packages (from wandb) (6.0)\n",
+      "Requirement already satisfied: six>=1.13.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (1.15.0)\n",
+      "Requirement already satisfied: docker-pycreds>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (0.4.0)\n",
+      "Requirement already satisfied: yaspin>=1.0.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (2.1.0)\n",
+      "Requirement already satisfied: protobuf>=3.12.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (3.17.3)\n",
+      "Requirement already satisfied: Click!=8.0.0,>=7.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (7.1.2)\n",
+      "Requirement already satisfied: requests<3,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (2.23.0)\n",
+      "Requirement already satisfied: shortuuid>=0.5.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (1.0.8)\n",
+      "Requirement already satisfied: pathtools in /usr/local/lib/python3.7/dist-packages (from wandb) (0.1.2)\n",
+      "Requirement already satisfied: sentry-sdk>=1.0.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (1.5.6)\n",
+      "Requirement already satisfied: promise<3,>=2.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (2.3)\n",
+      "Requirement already satisfied: GitPython>=1.0.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (3.1.27)\n",
+      "Requirement already satisfied: python-dateutil>=2.6.1 in /usr/local/lib/python3.7/dist-packages (from wandb) (2.8.2)\n",
+      "Requirement already satisfied: psutil>=5.0.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (5.4.8)\n",
+      "Requirement already satisfied: gitdb<5,>=4.0.1 in /usr/local/lib/python3.7/dist-packages (from GitPython>=1.0.0->wandb) (4.0.9)\n",
+      "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.7/dist-packages (from GitPython>=1.0.0->wandb) (3.10.0.2)\n",
+      "Requirement already satisfied: smmap<6,>=3.0.1 in /usr/local/lib/python3.7/dist-packages (from gitdb<5,>=4.0.1->GitPython>=1.0.0->wandb) (5.0.0)\n",
+      "Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.0.0->wandb) (1.24.3)\n",
+      "Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.0.0->wandb) (3.0.4)\n",
+      "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.0.0->wandb) (2021.10.8)\n",
+      "Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.0.0->wandb) (2.10)\n",
+      "Requirement already satisfied: termcolor<2.0.0,>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from yaspin>=1.0.0->wandb) (1.1.0)\n"
+     ]
+    }
+   ]
+  },
+  {
+   "cell_type": "code",
+   "source": [
+    "import wandb\n",
+    "wandb.login()"
+   ],
+   "metadata": {
+    "colab": {
+     "base_uri": "https://localhost:8080/"
+    },
+    "id": "ArIkwV7a6kEw",
+    "outputId": "3a508361-7385-452f-ea37-19b0798901fa"
+   },
+   "execution_count": null,
+   "outputs": [
+    {
+     "output_type": "stream",
+     "name": "stderr",
+     "text": [
+      "\u001B[34m\u001B[1mwandb\u001B[0m: Currently logged in as: \u001B[33mcapsian\u001B[0m (use `wandb login --relogin` to force relogin)\n"
+     ]
+    },
+    {
+     "output_type": "execute_result",
+     "data": {
+      "text/plain": [
+       "True"
+      ]
+     },
+     "metadata": {},
+     "execution_count": 3
+    }
+   ]
+  },
+  {
+   "cell_type": "code",
+   "source": [
+    "!make train_from_base"
+   ],
+   "metadata": {
+    "colab": {
+     "base_uri": "https://localhost:8080/"
+    },
+    "outputId": "352655c5-e6aa-4b4e-f18d-c1a24d35fee4",
+    "id": "CSaDACb36F6G"
+   },
+   "execution_count": null,
+   "outputs": [
+    {
+     "output_type": "stream",
+     "name": "stdout",
+     "text": [
+      "python3 tools/yolov5/train.py --img 640 --batch 64 --epochs 15 --data my-drone.yaml --weights data/base_weights/best.pt\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m: Currently logged in as: \u001B[33mcapsian\u001B[0m (use `wandb login --relogin` to force relogin)\n",
+      "\u001B[34m\u001B[1mtrain: \u001B[0mweights=data/base_weights/best.pt, cfg=, data=my-drone.yaml, hyp=tools/yolov5/data/hyps/hyp.scratch.yaml, epochs=15, batch_size=64, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=tools/yolov5/runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n",
+      "\u001B[34m\u001B[1mgithub: \u001B[0mskipping check (not a git repository), for updates see https://github.com/ultralytics/yolov5\n",
+      "YOLOv5 🚀 e1937ed torch 1.10.0+cu111 CUDA:0 (Tesla T4, 15110MiB)\n",
+      "\n",
+      "\u001B[34m\u001B[1mhyperparameters: \u001B[0mlr0=0.01, lrf=0.1, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
+      "\u001B[34m\u001B[1mTensorBoard: \u001B[0mStart with 'tensorboard --logdir tools/yolov5/runs/train', view at http://localhost:6006/\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m: Tracking run with wandb version 0.12.10\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m: Syncing run \u001B[33meager-eon-8\u001B[0m\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m: ⭐️ View project at \u001B[34m\u001B[4mhttps://wandb.ai/capsian/train\u001B[0m\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m: 🚀 View run at \u001B[34m\u001B[4mhttps://wandb.ai/capsian/train/runs/1iikjrtr\u001B[0m\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m: Run data is saved locally in /content/wandb/run-20220225_221152-1iikjrtr\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m: Run `wandb offline` to turn off syncing.\n",
+      "\n",
+      "\n",
+      "                 from  n    params  module                                  arguments                     \n",
+      "  0                -1  1      3520  models.common.Focus                     [3, 32, 3]                    \n",
+      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
+      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
+      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
+      "  4                -1  3    156928  models.common.C3                        [128, 128, 3]                 \n",
+      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
+      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
+      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
+      "  8                -1  1    656896  models.common.SPP                       [512, 512, [5, 9, 13]]        \n",
+      "  9                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
+      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
+      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
+      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
+      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
+      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
+      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
+      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
+      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
+      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
+      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
+      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
+      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
+      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
+      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
+      " 24      [17, 20, 23]  1     16182  models.yolo.Detect                      [1, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
+      "Model Summary: 283 layers, 7063542 parameters, 7063542 gradients, 16.4 GFLOPs\n",
+      "\n",
+      "Transferred 361/361 items from data/base_weights/best.pt\n",
+      "Scaled weight_decay = 0.0005\n",
+      "\u001B[34m\u001B[1moptimizer:\u001B[0m SGD with parameter groups 59 weight (no decay), 62 weight, 62 bias\n",
+      "\u001B[34m\u001B[1malbumentations: \u001B[0mversion 1.0.3 required by YOLOv5, but version 0.1.12 is currently installed\n",
+      "\u001B[34m\u001B[1mtrain: \u001B[0mScanning '/content/data/train.cache' images and labels... 2620 found, 0 missing, 1298 empty, 0 corrupt: 100% 2620/2620 [00:00<?, ?it/s]\n",
+      "\u001B[34m\u001B[1mval: \u001B[0mScanning '/content/data/val.cache' images and labels... 2620 found, 0 missing, 1298 empty, 0 corrupt: 100% 2620/2620 [00:00<?, ?it/s]\n",
+      "Plotting labels to tools/yolov5/runs/train/exp7/labels.jpg... \n",
+      "\n",
+      "\u001B[34m\u001B[1mAutoAnchor: \u001B[0m4.35 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
+      "Image sizes 640 train, 640 val\n",
+      "Using 2 dataloader workers\n",
+      "Logging results to \u001B[1mtools/yolov5/runs/train/exp7\u001B[0m\n",
+      "Starting training for 15 epochs...\n",
+      "\n",
+      "     Epoch   gpu_mem       box       obj       cls    labels  img_size\n",
+      "      0/14     13.1G   0.05833   0.02065         0       158       640: 100% 41/41 [02:24<00:00,  3.52s/it]\n",
+      "               Class     Images     Labels          P          R     mAP@.5 mAP@.5:.95: 100% 21/21 [00:43<00:00,  2.08s/it]\n",
+      "                 all       2620       3495      0.717       0.57      0.661      0.209\n",
+      "\n",
+      "     Epoch   gpu_mem       box       obj       cls    labels  img_size\n",
+      "      1/14     13.2G   0.04846   0.01885         0       148       640: 100% 41/41 [02:17<00:00,  3.35s/it]\n",
+      "               Class     Images     Labels          P          R     mAP@.5 mAP@.5:.95: 100% 21/21 [00:43<00:00,  2.05s/it]\n",
+      "                 all       2620       3495      0.781      0.739       0.79      0.344\n",
+      "\n",
+      "     Epoch   gpu_mem       box       obj       cls    labels  img_size\n",
+      "      2/14     13.2G   0.04516   0.01728         0       163       640: 100% 41/41 [02:16<00:00,  3.33s/it]\n",
+      "               Class     Images     Labels          P          R     mAP@.5 mAP@.5:.95: 100% 21/21 [00:44<00:00,  2.12s/it]\n",
+      "                 all       2620       3495      0.526      0.795      0.693      0.256\n",
+      "\n",
+      "     Epoch   gpu_mem       box       obj       cls    labels  img_size\n",
+      "      3/14     13.2G   0.04498   0.01324         0       102       640: 100% 41/41 [02:17<00:00,  3.36s/it]\n",
+      "               Class     Images     Labels          P          R     mAP@.5 mAP@.5:.95: 100% 21/21 [00:44<00:00,  2.13s/it]\n",
+      "                 all       2620       3495      0.875      0.879       0.93      0.429\n",
+      "\n",
+      "     Epoch   gpu_mem       box       obj       cls    labels  img_size\n",
+      "      4/14     13.2G   0.04372   0.01084         0       116       640: 100% 41/41 [02:16<00:00,  3.33s/it]\n",
+      "               Class     Images     Labels          P          R     mAP@.5 mAP@.5:.95: 100% 21/21 [00:43<00:00,  2.08s/it]\n",
+      "                 all       2620       3495      0.946      0.929      0.963      0.509\n",
+      "\n",
+      "     Epoch   gpu_mem       box       obj       cls    labels  img_size\n",
+      "      5/14     13.2G    0.0415  0.009547         0       129       640: 100% 41/41 [02:17<00:00,  3.36s/it]\n",
+      "               Class     Images     Labels          P          R     mAP@.5 mAP@.5:.95: 100% 21/21 [00:43<00:00,  2.07s/it]\n",
+      "                 all       2620       3495      0.933      0.962      0.972      0.539\n",
+      "\n",
+      "     Epoch   gpu_mem       box       obj       cls    labels  img_size\n",
+      "      6/14     13.2G   0.03943  0.008741         0       145       640: 100% 41/41 [02:15<00:00,  3.30s/it]\n",
+      "               Class     Images     Labels          P          R     mAP@.5 mAP@.5:.95: 100% 21/21 [00:43<00:00,  2.07s/it]\n",
+      "                 all       2620       3495      0.941      0.963      0.975      0.585\n",
+      "\n",
+      "     Epoch   gpu_mem       box       obj       cls    labels  img_size\n",
+      "      7/14     13.2G   0.03854  0.008306         0       121       640: 100% 41/41 [02:14<00:00,  3.29s/it]\n",
+      "               Class     Images     Labels          P          R     mAP@.5 mAP@.5:.95: 100% 21/21 [00:43<00:00,  2.09s/it]\n",
+      "                 all       2620       3495      0.935      0.951      0.968       0.57\n",
+      "\n",
+      "     Epoch   gpu_mem       box       obj       cls    labels  img_size\n",
+      "      8/14     13.2G   0.03647   0.00781         0       154       640: 100% 41/41 [02:16<00:00,  3.34s/it]\n",
+      "               Class     Images     Labels          P          R     mAP@.5 mAP@.5:.95: 100% 21/21 [00:42<00:00,  2.04s/it]\n",
+      "                 all       2620       3495      0.959      0.973      0.971      0.588\n",
+      "\n",
+      "     Epoch   gpu_mem       box       obj       cls    labels  img_size\n",
+      "      9/14     13.2G   0.03522  0.007714         0       119       640: 100% 41/41 [02:14<00:00,  3.28s/it]\n",
+      "               Class     Images     Labels          P          R     mAP@.5 mAP@.5:.95: 100% 21/21 [00:42<00:00,  2.04s/it]\n",
+      "                 all       2620       3495      0.969      0.978      0.984      0.619\n",
+      "\n",
+      "     Epoch   gpu_mem       box       obj       cls    labels  img_size\n",
+      "     10/14     13.2G   0.03185  0.007365         0       160       640: 100% 41/41 [02:15<00:00,  3.30s/it]\n",
+      "               Class     Images     Labels          P          R     mAP@.5 mAP@.5:.95: 100% 21/21 [00:42<00:00,  2.02s/it]\n",
+      "                 all       2620       3495      0.962      0.978      0.985      0.629\n",
+      "\n",
+      "     Epoch   gpu_mem       box       obj       cls    labels  img_size\n",
+      "     11/14     13.2G   0.03115  0.007297         0       192       640: 100% 41/41 [02:15<00:00,  3.32s/it]\n",
+      "               Class     Images     Labels          P          R     mAP@.5 mAP@.5:.95: 100% 21/21 [00:43<00:00,  2.05s/it]\n",
+      "                 all       2620       3495      0.971      0.982      0.987      0.666\n",
+      "\n",
+      "     Epoch   gpu_mem       box       obj       cls    labels  img_size\n",
+      "     12/14     13.2G   0.02999   0.00688         0       166       640: 100% 41/41 [02:13<00:00,  3.26s/it]\n",
+      "               Class     Images     Labels          P          R     mAP@.5 mAP@.5:.95: 100% 21/21 [00:42<00:00,  2.01s/it]\n",
+      "                 all       2620       3495       0.97      0.984      0.987      0.665\n",
+      "\n",
+      "     Epoch   gpu_mem       box       obj       cls    labels  img_size\n",
+      "     13/14     13.2G   0.02763  0.006638         0       184       640: 100% 41/41 [02:09<00:00,  3.17s/it]\n",
+      "               Class     Images     Labels          P          R     mAP@.5 mAP@.5:.95: 100% 21/21 [00:41<00:00,  1.97s/it]\n",
+      "                 all       2620       3495      0.972      0.983      0.986      0.673\n",
+      "\n",
+      "     Epoch   gpu_mem       box       obj       cls    labels  img_size\n",
+      "     14/14     13.2G   0.02522  0.006546         0       168       640: 100% 41/41 [02:08<00:00,  3.12s/it]\n",
+      "               Class     Images     Labels          P          R     mAP@.5 mAP@.5:.95: 100% 21/21 [00:41<00:00,  2.00s/it]\n",
+      "                 all       2620       3495      0.968      0.987      0.989      0.691\n",
+      "\n",
+      "15 epochs completed in 0.757 hours.\n",
+      "Optimizer stripped from tools/yolov5/runs/train/exp7/weights/last.pt, 14.5MB\n",
+      "Optimizer stripped from tools/yolov5/runs/train/exp7/weights/best.pt, 14.5MB\n",
+      "\n",
+      "Validating tools/yolov5/runs/train/exp7/weights/best.pt...\n",
+      "Fusing layers... \n",
+      "Model Summary: 224 layers, 7053910 parameters, 0 gradients, 16.3 GFLOPs\n",
+      "               Class     Images     Labels          P          R     mAP@.5 mAP@.5:.95: 100% 21/21 [00:37<00:00,  1.78s/it]\n",
+      "                 all       2620       3495      0.968      0.987      0.989      0.692\n",
+      "\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m: Waiting for W&B process to finish, PID 882... (success).\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:                                                                                \n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m: Run history:\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:        metrics/mAP_0.5 ▁▄▂▇▇███████████\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:   metrics/mAP_0.5:0.95 ▁▃▂▄▅▆▆▆▆▇▇█████\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:      metrics/precision ▄▅▁▆█▇█▇████████\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:         metrics/recall ▁▄▅▆▇██▇████████\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:         train/box_loss █▆▅▅▅▄▄▄▃▃▂▂▂▂▁\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:         train/cls_loss ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:         train/obj_loss █▇▆▄▃▂▂▂▂▂▁▁▁▁▁\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:           val/box_loss █▅▇▅▄▄▅▃▃▂▃▁▂▁▁▁\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:           val/cls_loss ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:           val/obj_loss █▇▅▄▃▃▂▂▂▂▁▁▁▁▁▁\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:                  x/lr0 ▁▃▄▅▆▇████▇▇▆▅▄\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:                  x/lr1 ▁▃▄▅▆▇████▇▇▆▅▄\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:                  x/lr2 ██▇▇▆▆▅▅▄▄▃▃▂▂▁\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m: \n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m: Run summary:\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:             best/epoch 14\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:           best/mAP_0.5 0.98907\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:      best/mAP_0.5:0.95 0.69145\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:         best/precision 0.96771\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:            best/recall 0.98684\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:        metrics/mAP_0.5 0.98909\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:   metrics/mAP_0.5:0.95 0.69166\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:      metrics/precision 0.9677\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:         metrics/recall 0.98684\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:         train/box_loss 0.02522\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:         train/cls_loss 0.0\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:         train/obj_loss 0.00655\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:           val/box_loss 0.01644\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:           val/cls_loss 0.0\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:           val/obj_loss 0.00565\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:                  x/lr0 0.00098\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:                  x/lr1 0.00098\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m:                  x/lr2 0.03958\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m: \n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m: Synced 5 W&B file(s), 497 media file(s), 1 artifact file(s) and 0 other file(s)\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m: Synced \u001B[33meager-eon-8\u001B[0m: \u001B[34mhttps://wandb.ai/capsian/train/runs/1iikjrtr\u001B[0m\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m: Find logs at: ./wandb/run-20220225_221152-1iikjrtr/logs/debug.log\n",
+      "\u001B[34m\u001B[1mwandb\u001B[0m: \n",
+      "Results saved to \u001B[1mtools/yolov5/runs/train/exp7\u001B[0m\n"
+     ]
+    }
+   ]
+  }
+ ]
+}
\ No newline at end of file
-- 
GitLab