1.质心跟踪算法工作原理
- 获取到待跟踪目标的边界框
- 计算质心分配ID
- 计算新质心与现有对象质心之间的距离(欧几里得距离)
- 更新现有对象坐标
- 若出现无关联的新质心,则添加为新对象
- N个连续帧,旧对象不能与任何现有对象匹配关联则消掉
2.代码
2.1定义一个质心追踪器CentroidTracking
类:
from scipy.spatial import distance as dist
from collections import OrderedDict
import numpy as np
class CentroidTracking():
def __init__(self,maxDisappeared=50):
# 达到最大连续帧数删除
self.maxDisappeared = maxDisappeared
# 分配ID
self.nextObjectId = 0
# 对象字典
self.objects = OrderedDict()
self.disappeared = OrderedDict()
# 增加新对象函数
def register(self,centroid):
self.objects[self.nextObjectId] = centroid
self.disappeared[self.nextObjectId] = 0
self.nextObjectId += 1
# 移除对象函数
def dergister(self, objectID):
del self.objects[objectID]
del self.disappeared[objectID]
# 更新函数
def update(self,rects):
if len(rects) == 0:
for objectID in list(self.disappeared.keys()):
self.disappeared[objectID] +=1
if self.disappeared[objectID] > self.maxDisappeared:
self.dergister(objectID)
return self.objects
# 存储质心
inputCentroids = np.zeros((len(rects), 2),dtype="int")
# 质心坐标
for (i, (startX, startY, endX, endY)) in enumerate(rects):
cX = int((startX +endX)/2.0)
cY = int((startY + endY) /2.0)
inputCentroids[i] = (cX, cY)
if len(self.objects) == 0:
for i in range(0, len(inputCentroids)):
self.register(inputCentroids[i])
else:
# 获取目标id,质心坐标
objectIDs = list(self.objects.keys())
objectCentroids = list(self.objects.values())
# 计算距离排序
D = dist.cdist(np.array(objectCentroids),inputCentroids)
rows = D.min(axis=1).argsort()
cols = D.argmin(axis=1)[rows]
usedRows = set()
usedCols = set()
for (row,col) in zip(rows, cols):
if row in usedRows or col in usedCols:
continue
objectID = objectIDs[row]
self.objects[objectID] = inputCentroids[col]
self.disappeared[objectID] = 0
usedRows.add(row)
usedCols.add(col)
# 未处理的质心
unusedRows = set(range(0, D.shape[0])).difference(usedRows)
unusedCols = set(range(0, D.shape[1])).difference(usedCols)
if D.shape[0] >= D.shape[1]:
for row in unusedRows:
objectID = objectIDs[row]
self.disappeared[objectID] += 1
if self.disappeared[objectID] > self.maxDisappeared:
self.dergister(objectID)
else:
for col in unusedCols:
self.register(inputCentroids[col])
return self.objects
2.2 用Caffe模型追踪人脸:
from centroidtracking import CentroidTracking
from imutils.video import VideoStream
from imutils.video import FileVideoStream
import numpy as np
import imutils
import cv2
import time
# 参数路径
prototxt_path = "./deploy.prototxt"
model_path = "./res10_300x300_ssd_iter_140000_fp16.caffemodel"
video_path = "./test.mp4"
centTrack = CentroidTracking()
(H , W) = (None, None)
print("loading model ...")
net = cv2.dnn.readNetFromCaffe(prototxt_path,model_path)
print("starting video stream ...")
# 视频读取
vs = FileVideoStream(video_path).start()
# 摄像头读取
# vs = VideoStream(src=0).start()
time.sleep(1.0)
mean_value =(104, 177, 123)
while True:
frame =vs.read()
frame = imutils.resize(frame, width=360)
if W is None or H is None:
(H, W) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame,1.0, (W,H),mean_value)
net.setInput(blob)
detections = net.forward()
rects = []
for i in range(0, detections.shape[2]):
confidence = 0.5
if detections[0, 0, i, 2] > confidence:
box = detections[0,0,i,3:7] * np.array([W, H, W, H])
rects.append(box.astype("int"))
(startX, startY, endX, endY) = box.astype("int")
cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 255, 0),2)
objects = centTrack.update(rects)
for(objectID, centroid) in objects.items():
text = "ID {}".format(objectID)
cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX,0.5, (0,255,0),2)
cv2.circle(frame, (centroid[0],centroid[1]), 4, (0,255,0),-1)
cv2.imshow("Test", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
vs.release()
cv2.destroyAllWindows()
3.测试
单目标效果:
多目标效果:
缺点:
- 视频的每帧运行一个对象检测器,计算量大,耗资源
- 处理重叠目标对象效果差
- 使用欧几里得距离,有时质心会出现交换ID情况
来源:CSDN
作者:圆滚熊
链接:https://blog.csdn.net/y459541195/article/details/103663948