Skip to content

Instantly share code, notes, and snippets.

@ZhangMenghe
Created August 16, 2019 15:55
Show Gist options
  • Select an option

  • Save ZhangMenghe/a753bf8d68bccd1a144cbf0358f39400 to your computer and use it in GitHub Desktop.

Select an option

Save ZhangMenghe/a753bf8d68bccd1a144cbf0358f39400 to your computer and use it in GitHub Desktop.
ball_tracking
from collections import deque # use to draw the last position line
import numpy as np
import argparse
import imutils # helpful library (pyimagesearch.com)
import cv2
from centroidtracker import CentroidTracker
import time
# construct argument parse input
# ap = argparse.ArgumentParser()
# ap.add_argument("-v", "--video", help="path to the (optional) video file")
# ap.add_argument("-b", "--buffer", type=int, default=64, help="max buffer size")
# args = vars(ap.parse_args())
# define the lower and upper boundaries of the "green"
# ball in the HSV color space, then initialize the list
# of tracked points.
greenLower = (29, 86, 6)
greenUpper = (64, 255, 255)
pts = deque(maxlen=64)
ctracker = CentroidTracker()
tracked_centers = []
# if a video path was not supplied grab the reference
# to webcam
# if not args.get("video", False):
# print ("Video path does not exist or assign")
camera = cv2.VideoCapture(0)
track_radius_threshold = 10
last_time = time.time()
current_time = 0
# other wise, grab a reference to video file
# else:
# camera = cv2.VideoCapture(args["video"])
# looping and process images sequence from video
while True:
# grab the current frame
(grabbed, frame) = camera.read()
# if we can not view video and grab the frame,
# # then exit the looping
# if args.get("video") and not grabbed:
# break
# 1. resize the frame
# 2. blur it
# 3. and convert it to HSV , color space
frame = imutils.resize(frame, width=600)
# blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# construct a mask for the color "green", the perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
# more on dilations and erosions: http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.html
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
# array slice of -2 to make the cv2.findContours
# function compatible with both OpenCV 2.4 and OpenCV 3.
# read more: http://www.pyimagesearch.com/2015/08/10/checking-your-opencv-version-using-python/
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, \
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
# only process if at least one contour was found
if len(cnts) > 0:
mc = (0,0)
max_radius = 0
best_cnt=((0,0), 0)
for cnt in cnts:
((x,y), radius) = cv2.minEnclosingCircle(cnt)
if(radius > track_radius_threshold):
M = cv2.moments(cnt)
# more on: http://docs.opencv.org/trunk/dd/d49/tutorial_py_contour_features.html
mc = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
tracked_centers.append(mc)
if(radius > max_radius):
max_radius = radius
best_cnt = (mc, radius)
tracked_objects = ctracker.update(np.array(tracked_centers))
if(len(tracked_centers) == 0):
pts.clear()
# show the frame to our screen
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
continue
unfind = True
# loop over the tracked objects
for (objectID, traj) in tracked_objects.items():
centroid = traj[0]
# print(traj)
# draw both the ID of the object and the centroid of the
# object on the output frame
if len(tracked_centers)!=0:
text = "ID {}".format(objectID)
cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
# cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
# cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
# print(centroid)
# print(best_cnt[0])
# print("----")
if(unfind and (centroid[0] == best_cnt[0][0]) and (centroid[1] == best_cnt[0][1])):
pts = traj.copy()
# import time
# current_time = time.time()
# print(current_time - last_time)
# last_time = current_time
unfind = False
cv2.circle(frame, (int(centroid[0]), int(centroid[1])), int(best_cnt[1]),
(0, 255, 255), 2)
# print(centroid)
cv2.circle(frame, tuple(centroid), 5, (0, 0, 255), -1)
# find the largest contour in the mask, then
# use it to compute the minimum enclosing circle
# and centroid
# c = max(cnts, key=cv2.contourArea)
# ((x,y), radius) = cv2.minEnclosingCircle(c)
# M = cv2.moments(c)
# # more on: http://docs.opencv.org/trunk/dd/d49/tutorial_py_contour_features.html
# center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# # only proceed if the radius meets a minimum size
# if radius > 10:
# # draw the circle and centroid on the frame
# # the update the list of tracked points
# cv2.circle(frame, (int(x), int(y)), int(radius),
# (0, 255, 255), 2)
# cv2.circle(frame, center, 5, (0, 0, 255), -1)
# # update the points queue
# # pts.appendleft(center)
tracked_centers.clear()
# loop over the set of tracked points
# print(pts)
for i in range(1, len(pts)):
# if either of the tracked points are None, ignore them
if pts[i - 1] is None or pts[i] is None:
continue
# otherwise, compute the thickness of the line
# and the connecting lines
thickness = int(np.sqrt(64/ float(i + 1)) * 2.5)
cv2.line(frame, tuple(pts[i - 1]), tuple(pts[i]), (0, 0, 255), thickness)
# show the frame to our screen
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
camera.release()
cv2.destroyAllWindows()
# https://www.pyimagesearch.com/2018/07/23/simple-object-tracking-with-opencv/
# import the necessary packages
from scipy.spatial import distance as dist
from collections import OrderedDict
import numpy as np
from collections import deque
class CentroidTracker():
def __init__(self, maxDisappeared=50):
# initialize the next unique object ID along with two ordered
# dictionaries used to keep track of mapping a given object
# ID to its centroid and number of consecutive frames it has
# been marked as "disappeared", respectively
self.nextObjectID = 0
self.objects = OrderedDict()
self.disappeared = OrderedDict()
# store the number of maximum consecutive frames a given
# object is allowed to be marked as "disappeared" until we
# need to deregister the object from tracking
self.maxDisappeared = maxDisappeared
def register(self, centroid):
# when registering an object we use the next available object
# ID to store the centroid
self.objects[self.nextObjectID] = deque([centroid])
self.disappeared[self.nextObjectID] = 0
self.nextObjectID += 1
def deregister(self, objectID):
# to deregister an object ID we delete the object ID from
# both of our respective dictionaries
del self.objects[objectID]
del self.disappeared[objectID]
def update(self, inputCentroids):
# # check to see if the list of input bounding box rectangles
# # is empty
if inputCentroids.size == 0:
# loop over any existing tracked objects and mark them
# as disappeared
for objectID in list(self.disappeared.keys()):
self.disappeared[objectID] += 1
# if we have reached a maximum number of consecutive
# frames where a given object has been marked as
# missing, deregister it
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
# return early as there are no centroids or tracking info
# to update
return self.objects
# # initialize an array of input centroids for the current frame
# inputCentroids = np.zeros((len(rects), 2), dtype="int")
# # loop over the bounding box rectangles
# for (i, (startX, startY, endX, endY)) in enumerate(rects):
# # use the bounding box coordinates to derive the centroid
# cX = int((startX + endX) / 2.0)
# cY = int((startY + endY) / 2.0)
# inputCentroids[i] = (cX, cY)
# if we are currently not tracking any objects take the input
# centroids and register each of them
if len(self.objects) == 0:
for i in range(0, len(inputCentroids)):
self.register(inputCentroids[i])
# otherwise, are are currently tracking objects so we need to
# try to match the input centroids to existing object
# centroids
else:
# grab the set of object IDs and corresponding centroids
objectIDs = list(self.objects.keys())
objectCentroids = []
obj_trajetories = list(self.objects.values())
for path in obj_trajetories:
objectCentroids.append(path[0])
# compute the distance between each pair of object
# centroids and input centroids, respectively -- our
# goal will be to match an input centroid to an existing
# object centroid
D = dist.cdist(np.array(objectCentroids), inputCentroids)
# in order to perform this matching we must (1) find the
# smallest value in each row and then (2) sort the row
# indexes based on their minimum values so that the row
# with the smallest value as at the *front* of the index
# list
rows = D.min(axis=1).argsort()
# next, we perform a similar process on the columns by
# finding the smallest value in each column and then
# sorting using the previously computed row index list
cols = D.argmin(axis=1)[rows]
# in order to determine if we need to update, register,
# or deregister an object we need to keep track of which
# of the rows and column indexes we have already examined
usedRows = set()
usedCols = set()
# loop over the combination of the (row, column) index
# tuples
for (row, col) in zip(rows, cols):
# if we have already examined either the row or
# column value before, ignore it
# val
if row in usedRows or col in usedCols:
continue
# otherwise, grab the object ID for the current row,
# set its new centroid, and reset the disappeared
# counter
objectID = objectIDs[row]
#change!
# self.objects[objectID] = inputCentroids[col]
if objectID in self.objects:
self.objects[objectID].extendleft([inputCentroids[col]])
else:
self.objects[objectID] = deque([inputCentroids[col]]) #[inputCentroids[col]]
self.disappeared[objectID] = 0
# indicate that we have examined each of the row and
# column indexes, respectively
usedRows.add(row)
usedCols.add(col)
# compute both the row and column index we have NOT yet
# examined
unusedRows = set(range(0, D.shape[0])).difference(usedRows)
unusedCols = set(range(0, D.shape[1])).difference(usedCols)
# in the event that the number of object centroids is
# equal or greater than the number of input centroids
# we need to check and see if some of these objects have
# potentially disappeared
if D.shape[0] >= D.shape[1]:
# loop over the unused row indexes
for row in unusedRows:
# grab the object ID for the corresponding row
# index and increment the disappeared counter
objectID = objectIDs[row]
self.disappeared[objectID] += 1
# check to see if the number of consecutive
# frames the object has been marked "disappeared"
# for warrants deregistering the object
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
# otherwise, if the number of input centroids is greater
# than the number of existing object centroids we need to
# register each new input centroid as a trackable object
else:
for col in unusedCols:
self.register(inputCentroids[col])
# return the set of trackable objects
return self.objects
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment