You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

210 lines
7.2 KiB

3 years ago
from operator import index
import sys
from typing import Optional
from numpy.core.fromnumeric import size
from numpy.lib.nanfunctions import nanstd
sys.path.insert(0, './yolov5')
from yolov5.utils.datasets import LoadImages, LoadStreams
# from yolov5.utils.general import check_img_size, non_max_suppression, scale_coords
from yolov5.utils.general import check_img_size, non_max_suppression, scale_coords, xyxy2xywh
from yolov5.utils.plots import Annotator, colors
from yolov5.utils.torch_utils import select_device, time_synchronized
from deep_sort_pytorch.utils.parser import get_config
from deep_sort_pytorch.deep_sort import DeepSort
import argparse
import os
import platform
import shutil
import time
import zmq
import json
from pathlib import Path
import cv2
import torch
import torch.backends.cudnn as cudnn
from yolov5.utils.Point2GPS import *
from ec.countunit import addcolorimg,addtitlerectangle,addcount,countlist,clsnames
from detector import Detector
# palette = (2 ** 11 - 1, 2 ** 15 - 1, 2 ** 20 - 1)
def detectimage(opt,ip,filename, save_img=False):
inpath,out, weights, view_img, imgsz = \
opt.inpath,opt.output, opt.weights, opt.view_img, opt.img_size
# Get names and colors
detector,deepsort=opt.detector,opt.deepsort
names= detector.names
draw_box=[]
# Set Dataloader
vid_path, vid_writer = None, None
save_path = str(Path(out))
txt_path = str(Path(out)) + '/results.txt'
imgpath =inpath+filename
t0=time.time()
im0 =cv2.imread(imgpath)
# 缩小尺寸,1920x1080->960x540
# im0 = cv2.resize(im, (1024, 640))
draw_box=[]
img, pred = detector.detect(im0)
t1=time.time()
print('%sDone. (%.3fs)' % (filename, t1 - t0))
# Apply NMS
pred = non_max_suppression(
pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
t2 = time_synchronized()
# Process detections
for i, det in enumerate(pred): # detections per image
if det is not None and len(det):
det[:, :4] = scale_coords(
img.shape[2:], det[:, :4], im0.shape).round()
# Print results
s=''
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
xywhs = xyxy2xywh(det[:, 0:4])
confs = det[:, 4]
clss = det[:, 5]
print("xywhs=",xywhs.cpu())
outputs = deepsort.update(xywhs.cpu(), confs.cpu(), clss.cpu(), im0)
t2=time.time()
print('%sDone. (%.3fs)' % (s, t2 - t1))
print('outputs=',outputs)
if outputs is not None and len(outputs) > 0:
for j, (output, conf) in enumerate(zip(outputs, confs)):
bboxes = output[0:4]
id = output[4]
cls = output[5]
c = int(cls) # integer class
# label = f'{names[c]} {conf:.2f}'
label = f'{id} {names[c]} {conf:.2f}'
# print('label=',label)
# label = f'{names[c]} {conf:.2f}'
color=colors(c, True)
draw_box.append([bboxes,label,color])
# annotator.box_label(bboxes, label, color=colors(c, True))
else:
deepsort.increment_ages()
# Print time (inference + NMS)
# Stream results
if view_img:
annotator = Annotator(im0, line_width=2, pil=False)
for bboxes,label,color in draw_box:
annotator.box_label(bboxes, label, color)
im0=annotator.cvimg()
cv2.imshow("monitor", im0)
if cv2.waitKey(1) == ord('q'): # q to quit
raise StopIteration
# Save results (image with detections)
if save_img and len(draw_box)>0:
savefilename=save_path+"/"+filename
cv2.imwrite(savefilename, im0)
# print('Done. (%.3fs)' % (time.time() - t0))
def main(opt):
# initialize deepsort
cfg = get_config()
cfg.merge_from_file(opt.config_deepsort)
deepsort = DeepSort(cfg.DEEPSORT.REID_CKPT,
max_dist=cfg.DEEPSORT.MAX_DIST, min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE,
max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,
max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET,
use_cuda=True)
# Initialize
# 初始化 yolov5
detector = Detector(opt.img_size,opt.view_img_size,opt.weights)
opt.deepsort=deepsort
opt.detector=detector
while True:
try:
filename = input("输入文件").strip()
detectimage(opt,'',filename,True)
except BaseException as ex:
print('错误...',ex)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str,
default='./yolov5/weights/yolov5s.pt', help='model.pt path')
parser.add_argument('--inpath', type=str, default='F:/TestImage/input/',
help='inpath folder') # output folder
parser.add_argument('--output', type=str, default='inference/output/',
help='output folder') # output folder
parser.add_argument('--img-size', type=int, default=960,
help='inference size (pixels)')
# conf 相似度
parser.add_argument('--conf-thres', type=float,
default=0.5, help='object confidence threshold')
# 重叠去除 针对可能是 两种东西
parser.add_argument('--iou-thres', type=float,
default=0.5, help='IOU threshold for NMS')
parser.add_argument('--fourcc', type=str, default='mp4v',
help='output video codec (verify ffmpeg support)')
parser.add_argument('--device', default='',
help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true',default=True,
help='display results')
#显示窗体大小--- 注意 countunit.py img_size =(1920,1080)也要设置
parser.add_argument('--view-img-size', type=int, default=(1920,1080),
help='inference size (pixels)')
# parser.add_argument('--save-txt', action='store_true',
# help='save results to *.txt')
# class 0 is person 跟踪类别
parser.add_argument('--classes', nargs='+', type=int,
default=[0,1,2,3,5,7,16], help='filter by class')
parser.add_argument('--agnostic-nms', action='store_true',
help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true',
help='augmented inference')
parser.add_argument("--config_deepsort", type=str,
default="deep_sort_pytorch/configs/deep_sort.yaml")
args = parser.parse_args()
args.img_size = check_img_size(args.img_size)
print(args)
with torch.no_grad():
main(args)