Skip to content

Commit 3de5629

Browse files
committed
Add SimpleTracker examples
1 parent 1ea3387 commit 3de5629

File tree

6 files changed

+934
-5
lines changed

6 files changed

+934
-5
lines changed
Lines changed: 265 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,265 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "code",
5+
"execution_count": 1,
6+
"metadata": {},
7+
"outputs": [],
8+
"source": [
9+
"import numpy as np\n",
10+
"import cv2 as cv\n",
11+
"from motrackers import SimpleTracker\n",
12+
"from motrackers.utils import select_caffemodel, select_videofile"
13+
]
14+
},
15+
{
16+
"cell_type": "code",
17+
"execution_count": 2,
18+
"metadata": {},
19+
"outputs": [
20+
{
21+
"data": {
22+
"application/vnd.jupyter.widget-view+json": {
23+
"model_id": "1d3c3a3a86324074b3461a40bda983ea",
24+
"version_major": 2,
25+
"version_minor": 0
26+
},
27+
"text/plain": [
28+
"FileChooser(path='..', filename='', show_hidden='False')"
29+
]
30+
},
31+
"metadata": {},
32+
"output_type": "display_data"
33+
},
34+
{
35+
"data": {
36+
"application/vnd.jupyter.widget-view+json": {
37+
"model_id": "46b604d3ae4d4853aa0a644f9edfe463",
38+
"version_major": 2,
39+
"version_minor": 0
40+
},
41+
"text/plain": [
42+
"FileChooser(path='..', filename='', show_hidden='False')"
43+
]
44+
},
45+
"metadata": {},
46+
"output_type": "display_data"
47+
},
48+
{
49+
"data": {
50+
"application/vnd.jupyter.widget-view+json": {
51+
"model_id": "9f31f72dfb7446b29e258b97fa8f1a2f",
52+
"version_major": 2,
53+
"version_minor": 0
54+
},
55+
"text/plain": [
56+
"FileChooser(path='..', filename='', show_hidden='False')"
57+
]
58+
},
59+
"metadata": {},
60+
"output_type": "display_data"
61+
}
62+
],
63+
"source": [
64+
"video_file = select_videofile('..')\n",
65+
"prototxt, weights = select_caffemodel('..')\n",
66+
"display(video_file, prototxt, weights)"
67+
]
68+
},
69+
{
70+
"cell_type": "code",
71+
"execution_count": 3,
72+
"metadata": {},
73+
"outputs": [],
74+
"source": [
75+
"video = video_file.selected"
76+
]
77+
},
78+
{
79+
"cell_type": "code",
80+
"execution_count": 4,
81+
"metadata": {},
82+
"outputs": [],
83+
"source": [
84+
"model = {\"prototxt\": prototxt.selected,\n",
85+
" \"weights\": weights.selected,\n",
86+
" \"object_names\": {0: 'background', \n",
87+
" 1: 'aeroplane', \n",
88+
" 2: 'bicycle', \n",
89+
" 3: 'bird',\n",
90+
" 4: 'boat',\n",
91+
" 5: 'bottle',\n",
92+
" 6: 'bus', \n",
93+
" 7: 'car', \n",
94+
" 8: 'cat', \n",
95+
" 9: 'chair',\n",
96+
" 10: 'cow', \n",
97+
" 11: 'diningtable', \n",
98+
" 12: 'dog', \n",
99+
" 13: 'horse',\n",
100+
" 14: 'motorbike', \n",
101+
" 15: 'person', \n",
102+
" 16: 'pottedplant',\n",
103+
" 17: 'sheep', \n",
104+
" 18: 'sofa', \n",
105+
" 19: 'train',\n",
106+
" 20: 'tvmonitor'},\n",
107+
" \"threshold\": 0.2,\n",
108+
" \"confidence_threshold\": 0.2,\n",
109+
" \"pixel_std\":1/127.5,\n",
110+
" \"pixel_mean\": 127.5,\n",
111+
" \"input_size\": (300, 300)\n",
112+
" }\n",
113+
"\n",
114+
"max_object_lost_count = 5 # maximum number of object losts counted when the object is being tracked\n",
115+
"\n",
116+
"np.random.seed(12345)\n",
117+
"bbox_colors = {key: np.random.randint(0, 255, size=(3,)).tolist() for key in model['object_names'].keys()}"
118+
]
119+
},
120+
{
121+
"cell_type": "code",
122+
"execution_count": 5,
123+
"metadata": {},
124+
"outputs": [],
125+
"source": [
126+
"cap = cv.VideoCapture(video)\n",
127+
"net = cv.dnn.readNetFromCaffe(model[\"prototxt\"], model[\"weights\"])\n",
128+
"tracker = SimpleTracker(max_lost=max_object_lost_count)"
129+
]
130+
},
131+
{
132+
"cell_type": "code",
133+
"execution_count": 6,
134+
"metadata": {
135+
"scrolled": false
136+
},
137+
"outputs": [],
138+
"source": [
139+
"(H, W) = (None, None)\n",
140+
"writer = None\n",
141+
"\n",
142+
"while True:\n",
143+
" ok, image = cap.read()\n",
144+
" \n",
145+
" if not ok:\n",
146+
" print(\"Cannot read the video feed.\")\n",
147+
" break\n",
148+
" \n",
149+
" if W is None or H is None: \n",
150+
" (H, W) = image.shape[:2]\n",
151+
" \n",
152+
" image_resized = cv.resize(image, model[\"input_size\"])\n",
153+
"\n",
154+
" blob = cv.dnn.blobFromImage(image_resized, \n",
155+
" model[\"pixel_std\"], \n",
156+
" model[\"input_size\"], \n",
157+
" (model[\"pixel_mean\"], model[\"pixel_mean\"], model[\"pixel_mean\"]), \n",
158+
" False)\n",
159+
"\n",
160+
" net.setInput(blob)\n",
161+
" detections = net.forward()\n",
162+
"\n",
163+
" rows = image_resized.shape[0]\n",
164+
" cols = image_resized.shape[1]\n",
165+
" \n",
166+
" boxes, confidences, classIDs, detections_bbox = [], [], [], []\n",
167+
"\n",
168+
" for i in range(detections.shape[2]):\n",
169+
" confidence = detections[0, 0, i, 2]\n",
170+
" if confidence > model['confidence_threshold']:\n",
171+
" class_id = int(detections[0, 0, i, 1])\n",
172+
"\n",
173+
" # object location \n",
174+
" left = int(detections[0, 0, i, 3] * cols) \n",
175+
" top = int(detections[0, 0, i, 4] * rows)\n",
176+
" right = int(detections[0, 0, i, 5] * cols)\n",
177+
" bottom = int(detections[0, 0, i, 6] * rows)\n",
178+
" \n",
179+
" # scaling factor of image\n",
180+
" height_factor = image.shape[0]/float(model[\"input_size\"][0])\n",
181+
" width_factor = image.shape[1]/float(model[\"input_size\"][1])\n",
182+
" \n",
183+
" # scale object detection bounding box to original image\n",
184+
" left = int(width_factor * left) \n",
185+
" top = int(height_factor * top)\n",
186+
" right = int(width_factor * right)\n",
187+
" bottom = int(height_factor * bottom)\n",
188+
" \n",
189+
" width, height = right - left, bottom-top\n",
190+
" \n",
191+
" boxes.append([left, top, width, height])\n",
192+
" confidences.append(float(confidence))\n",
193+
" classIDs.append(int(class_id))\n",
194+
" \n",
195+
" indices = cv.dnn.NMSBoxes(boxes, confidences, model[\"confidence_threshold\"], model[\"threshold\"])\n",
196+
" \n",
197+
" if len(indices)>0:\n",
198+
" for i in indices.flatten():\n",
199+
" x, y, w, h = boxes[i][0], boxes[i][1], boxes[i][2], boxes[i][3]\n",
200+
" \n",
201+
" detections_bbox.append((x, y, x+w, y+h))\n",
202+
" \n",
203+
" clr = [int(c) for c in bbox_colors[classIDs[i]]]\n",
204+
" cv.rectangle(image, (x, y), (x+w, y+h), clr, 2)\n",
205+
" \n",
206+
" label = \"{}:{:.4f}\".format(model[\"object_names\"][classIDs[i]], confidences[i])\n",
207+
" (label_width, label_height), baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 2)\n",
208+
" y_label = max(y, label_height)\n",
209+
" cv.rectangle(image, (x, y_label-label_height),\n",
210+
" (x+label_width, y_label+baseLine), (255, 255, 255), cv.FILLED)\n",
211+
" cv.putText(image, label, (x, y_label), cv.FONT_HERSHEY_SIMPLEX, 0.5, clr, 2)\n",
212+
" \n",
213+
" objects = tracker.update(detections_bbox)\n",
214+
" \n",
215+
" for (objectID, centroid) in objects.items():\n",
216+
" text = \"ID {}\".format(objectID)\n",
217+
" cv.putText(image, text, (centroid[0] - 10, centroid[1] - 10), cv.FONT_HERSHEY_SIMPLEX,\n",
218+
" 0.5, (0, 255, 0), 2)\n",
219+
" cv.circle(image, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)\n",
220+
" \n",
221+
" cv.imshow(\"image\", image)\n",
222+
" \n",
223+
" if cv.waitKey(1) & 0xFF == ord('q'):\n",
224+
" break\n",
225+
" \n",
226+
" if writer is None:\n",
227+
" fourcc = cv.VideoWriter_fourcc(*\"MJPG\")\n",
228+
" writer = cv.VideoWriter(\"output.avi\", fourcc, 30, (W, H), True)\n",
229+
" writer.write(image)\n",
230+
"\n",
231+
"writer.release()\n",
232+
"cap.release()\n",
233+
"cv.destroyWindow(\"image\")"
234+
]
235+
},
236+
{
237+
"cell_type": "code",
238+
"execution_count": null,
239+
"metadata": {},
240+
"outputs": [],
241+
"source": []
242+
}
243+
],
244+
"metadata": {
245+
"kernelspec": {
246+
"display_name": "Python 3",
247+
"language": "python",
248+
"name": "python3"
249+
},
250+
"language_info": {
251+
"codemirror_mode": {
252+
"name": "ipython",
253+
"version": 3
254+
},
255+
"file_extension": ".py",
256+
"mimetype": "text/x-python",
257+
"name": "python",
258+
"nbconvert_exporter": "python",
259+
"pygments_lexer": "ipython3",
260+
"version": "3.6.9"
261+
}
262+
},
263+
"nbformat": 4,
264+
"nbformat_minor": 2
265+
}

0 commit comments

Comments
 (0)