from operator import index import sys from numpy.core.fromnumeric import size from numpy.lib.nanfunctions import nanstd sys.path.insert(0, './yolov5') from yolov5.utils.datasets import LoadImages, LoadStreams # from yolov5.utils.general import check_img_size, non_max_suppression, scale_coords from yolov5.utils.general import check_img_size, non_max_suppression, scale_coords, xyxy2xywh from yolov5.utils.plots import Annotator, colors from yolov5.utils.torch_utils import select_device, time_synchronized from deep_sort_pytorch.utils.parser import get_config from deep_sort_pytorch.deep_sort import DeepSort import argparse import os import platform import shutil import time from pathlib import Path import cv2 import torch import torch.backends.cudnn as cudnn from yolov5.utils.Point2GPS import * from ec.countunit import addcolorimg,addtitlerectangle,addcount,countlist,clsnames from detector import Detector # palette = (2 ** 11 - 1, 2 ** 15 - 1, 2 ** 20 - 1) def detect(opt, save_img=False): out, source, weights, view_img, save_txt, imgsz = \ opt.output, opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size webcam = source == '0' or source.startswith( 'rtsp') or source.startswith('rtmp') or source.startswith('http') or source.endswith('.txt') # initialize deepsort cfg = get_config() cfg.merge_from_file(opt.config_deepsort) deepsort = DeepSort(cfg.DEEPSORT.REID_CKPT, max_dist=cfg.DEEPSORT.MAX_DIST, min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE, max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE, max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET, use_cuda=True) # Initialize # 初始化 yolov5 detector = Detector(opt.img_size,opt.view_img_size,opt.weights) # Set Dataloader vid_path, vid_writer = None, None # Get names and colors names=detector.names draw_box=[] save_path = str(Path(out)) txt_path = str(Path(out)) + '/results.txt' vid_cap = cv2.VideoCapture('rtsp://admin:hk123456@120.240.37.42:554') n=0 while True: # 读取每帧图片 _, im0 = vid_cap.read() if im0 is None: time.sleep(1) continue n+=1 # if n!=6: # continue # n=0 # 缩小尺寸,1920x1080->960x540 # im = cv2.resize(im, (960, 540)) im0 = cv2.resize(im0, (1920, 1080)) if n==6: n=0 draw_box=[] img, pred = detector.detect(im0) # Apply NMS pred = non_max_suppression( pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms) t2 = time_synchronized() # Process detections for i, det in enumerate(pred): # detections per image if det is not None and len(det): det[:, :4] = scale_coords( img.shape[2:], det[:, :4], im0.shape).round() # Print results s='' for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string xywhs = xyxy2xywh(det[:, 0:4]) confs = det[:, 4] clss = det[:, 5] outputs = deepsort.update(xywhs.cpu(), confs.cpu(), clss.cpu(), im0) if outputs is not None and len(outputs) > 0: for j, (output, conf) in enumerate(zip(outputs, confs)): bboxes = output[0:4] id = output[4] cls = output[5] c = int(cls) # integer class # label = f'{names[c]} {conf:.2f}' label = f'{id} {names[c]} {conf:.2f}' # print('label=',label) # label = f'{names[c]} {conf:.2f}' color=colors(c, True) draw_box.append([bboxes,label,color]) # annotator.box_label(bboxes, label, color=colors(c, True)) addcount(outputs,im0) else: deepsort.increment_ages() # Print time (inference + NMS) # print('%sDone. (%.3fs)' % (s, t2 - t1)) # Stream results if view_img: im0=addtitlerectangle(im0) annotator = Annotator(im0, line_width=2, pil=True) for bboxes,label,color in draw_box: annotator.box_label(bboxes, label, color) annotator.drawcount(countlist,clsnames) im0=annotator.cvimg() im0=addcolorimg(im0) cv2.imshow("monitor", im0) if cv2.waitKey(1) == ord('q'): # q to quit raise StopIteration # Save results (image with detections) # if save_img: # print('saving img!') # if dataset.mode == 'images': # cv2.imwrite(save_path, im0) # else: # print('saving video!') # if vid_path != save_path: # new video # vid_path = save_path # if isinstance(vid_writer, cv2.VideoWriter): # vid_writer.release() # release previous video writer # fps = vid_cap.get(cv2.CAP_PROP_FPS) # w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) # h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # vid_writer = cv2.VideoWriter( # save_path, cv2.VideoWriter_fourcc(*opt.fourcc), fps, (w, h)) # vid_writer.write(im0) # if save_txt or save_img: # print('Results saved to %s' % os.getcwd() + os.sep + out) # if platform == 'darwin': # MacOS # os.system('open ' + save_path) # print('Done. (%.3fs)' % (time.time() - t0)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--weights', type=str, default='./yolov5/weights/yolov5s.pt', help='model.pt path') # file/folder, 0 for webcam parser.add_argument('--source', type=str, # default='F:/AIData/video/test.MP4', help='source') default='rtsp://admin:hk123456@120.240.37.42:554', help='source') parser.add_argument('--output', type=str, default='inference/output', help='output folder') # output folder parser.add_argument('--img-size', type=int, default=960, help='inference size (pixels)') # conf 相似度 parser.add_argument('--conf-thres', type=float, default=0.5, help='object confidence threshold') # 重叠去除 针对可能是 两种东西 parser.add_argument('--iou-thres', type=float, default=0.5, help='IOU threshold for NMS') parser.add_argument('--fourcc', type=str, default='mp4v', help='output video codec (verify ffmpeg support)') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true',default=True, help='display results') #显示窗体大小--- 注意 countunit.py img_size =(1920,1080)也要设置 parser.add_argument('--view-img-size', type=int, default=(1920,1080), help='inference size (pixels)') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') # class 0 is person 跟踪类别 parser.add_argument('--classes', nargs='+', type=int, default=[0,1,2,3,5,7,16], help='filter by class') parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument("--config_deepsort", type=str, default="deep_sort_pytorch/configs/deep_sort.yaml") args = parser.parse_args() args.img_size = check_img_size(args.img_size) print(args) with torch.no_grad(): detect(args)