Browse Source

rtsppushserver

master
qiyun996 3 years ago
parent
commit
9c963d7845
  1. 48
      ec/rtsppushserver.py
  2. 146
      ec/tortsp.py
  3. 10
      main.py
  4. 9
      main2.py
  5. 9
      yolov5/utils/datasets.py

48
ec/rtsppushserver.py

@ -0,0 +1,48 @@
# -*- coding:utf-8 -*-
# @Time : 2021/12/31 16:17
# @Author : lxc
# @File : tortsp.py
# https://github.com/aler9/rtsp-simple-server
import cv2
import subprocess as sp
import signal
import numpy as npbool
class RtstPushServer:
def __init__(self, fps=25,width=1080,height=960,rtsp='rtsp://192.168.1.182:8554/video'):
print('fps=',fps,' weitht=',width,' height=',height,' rtsp=',rtsp)
#此处换为你自己的地址
self.lrtsp_url =rtsp
# Get video information
self.lfps = fps
self.lwidth = width
self.lheight = height
self.command = ['ffmpeg',
'-y',
'-f', 'rawvideo',
'-vcodec', 'rawvideo',
'-pix_fmt', 'bgr24',
'-s', "{}x{}".format(self.lwidth, self.lheight),
'-r', str(self.lfps),
'-i', '-',
'-c:v', 'libx264',
# '-c:v', 'libx265',
'-pix_fmt', 'yuv420p',
'-preset', 'ultrafast',
'-f', 'rtsp',
self.lrtsp_url]
self.p = sp.Popen(self.command, stdin=sp.PIPE)
def write(self,frame):
if frame is None:
print("frame is None")
return
# frame = 你的图像处理的函数(frame)
self.p.stdin.write(frame.tostring())
# cv2.imshow('img',frame)
# if cv2.waitKey(1) == ord('q'): # q to quit
# raise StopIteration

146
ec/tortsp.py

@ -1,109 +1,47 @@
# -*- coding:utf-8 -*-
# @Time : 2021/11/4 10:17
# @Author : JulyLi
# @File : rtsp2.py
# @Software: PyCharm
# @Time : 2021/12/3010:17
# @Author : lxc
# @File : tortsp.py
# https://github.com/aler9/rtsp-simple-server
import cv2
import gi
import sys
import json
import time
import signal
import numpy as np
gi.require_version('Gst', '1.0')
gi.require_version('GstRtspServer', '1.0')
from gi.repository import Gst, GstRtspServer, GObject
#cv2.namedWindow('video_realtime_face', cv2.WINDOW_NORMAL)
def to_node(type, message):
# convert to json and print (node helper will read from stdout)
try:
print(json.dumps({type: message}))
except Exception:
pass
# stdout has to be flushed manually to prevent delays in the node helper communication
sys.stdout.flush()
to_node("status", "Facerecognition started...")
def shutdown(self, signum):
to_node("status", 'Shutdown: Cleaning up camera...')
quit()
signal.signal(signal.SIGINT, shutdown)
class SensorFactory(GstRtspServer.RTSPMediaFactory):
def __init__(self, **properties):
super(SensorFactory, self).__init__(**properties)
self.cap = cv2.VideoCapture("rtsp://admin:admin123@192.168.2.190:554/sub")
# self.cap = cv2.VideoCapture("shmsrc socket-path=/tmp/foo2 ! video/x-raw, format=BGR ,width=1920,height=1080,framerate=30/1 ! videoconvert ! video/x-raw, format=BGR ! appsink")
#self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
#self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
self.number_frames = 0
self.fps = 30.0
self.duration = 1 / self.fps * Gst.SECOND # duration of a frame in nanoseconds
self.launch_string = 'appsrc name=source is-live=true block=true format=GST_FORMAT_TIME ' \
'caps=video/x-raw,format=BGR,width=1920,height=1080,framerate=30/1 ' \
'! videoconvert ! video/x-raw,format=I420 ' \
'! x264enc speed-preset=ultrafast tune=zerolatency threads=4 ' \
'! rtph264pay config-interval=1 name=pay0 pt=96'
def on_need_data(self, src, lenght):
if self.cap.isOpened():
ret, frame = self.cap.read()
if ret:
#cv2.imshow("video_realtime_face", frame)
#if cv2.waitKey(1) & 0xFF == ord('q'):
# return
data = frame.tostring()
buf = Gst.Buffer.new_allocate(None, len(data), None)
buf.fill(0, data)
buf.duration = self.duration
timestamp = self.number_frames * self.duration
buf.pts = buf.dts = int(timestamp)
buf.offset = timestamp
self.number_frames += 1
retval = src.emit('push-buffer', buf)
print('pushed buffer, frame {}, duration {} ns, durations {} s'.format(self.number_frames,
self.duration,
self.duration / Gst.SECOND))
if retval != Gst.FlowReturn.OK:
print(retval)
def do_create_element(self, url):
return Gst.parse_launch(self.launch_string)
def do_configure(self, rtsp_media):
self.number_frames = 0
appsrc = rtsp_media.get_element().get_child_by_name('source')
appsrc.connect('need-data', self.on_need_data)
class GstServer(GstRtspServer.RTSPServer):
def __init__(self, **properties):
super(GstServer, self).__init__(**properties)
self.factory = SensorFactory()
self.factory.set_shared(True)
self.get_mount_points().add_factory("/test", self.factory)
self.attach(None)
def run():
GObject.threads_init()
Gst.init(None)
server = GstServer()
rtsp_port_num = 8554
print("\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/test ***\n\n" % rtsp_port_num)
loop = GObject.MainLoop()
loop.run()
if __name__ == "__main__":
run()
import subprocess as sp
import signal
import numpy as npbool
#此处换为你自己的地址
rtsp_url = 'rtsp://192.168.1.182:8554/video'
cap = cv2.VideoCapture("rtsp://admin:hk123456@192.168.1.65:554")
# Get video information
fps = int(cap.get(cv2.CAP_PROP_FPS))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
command = ['ffmpeg',
'-y',
'-f', 'rawvideo',
'-vcodec', 'rawvideo',
'-pix_fmt', 'bgr24',
'-s', "{}x{}".format(width, height),
'-r', str(fps),
'-i', '-',
# '-c:v', 'libx264',
'-c:v', 'libx265',
'-pix_fmt', 'yuv420p',
'-preset', 'ultrafast',
'-f', 'rtsp',
rtsp_url]
p = sp.Popen(command, stdin=sp.PIPE)
while (cap.isOpened()):
ret, frame = cap.read()
frame=cv2.resize(frame,(1080,640))
if not ret:
print("Opening camera is failed")
break
# frame = 你的图像处理的函数(frame)
p.stdin.write(frame.tostring())
# cv2.imshow('img',frame)
# if cv2.waitKey(1) == ord('q'): # q to quit
# raise StopIteration

10
main.py

@ -24,6 +24,7 @@ import torch.backends.cudnn as cudnn
from yolov5.utils.Point2GPS import *
import ec.countunit
from ec.rtsppushserver import RtstPushServer
palette = (2 ** 11 - 1, 2 ** 15 - 1, 2 ** 20 - 1)
@ -81,7 +82,9 @@ def detect(opt, save_img=False):
save_path = str(Path(out))
txt_path = str(Path(out)) + '/results.txt'
framenumber=0
rtstPushServer = RtstPushServer(9,opt.view_img_size[0],opt.view_img_size[1],'rtsp://192.168.1.182:8554/video')
for frame_idx, (path, img, im0s, vid_cap) in enumerate(dataset):
draw_box=[]
# if(framenumber==0):
@ -182,6 +185,7 @@ def detect(opt, save_img=False):
im0=annotator.cvimg()
im0= ec.countunit.addcolorimg(im0)
rtstPushServer.write(im0)
cv2.imshow(p, im0)
if cv2.waitKey(1) == ord('q'): # q to quit
raise StopIteration
@ -233,9 +237,9 @@ if __name__ == '__main__':
default='yolov5/weights/yolov5s.pt', help='model.pt path')
# file/folder, 0 for webcam
parser.add_argument('--source', type=str,
# default='rtsp://admin:hk123456@192.168.1.65:554', help='source')
default='rtsp://admin:hk123456@192.168.1.65:554', help='source')
# default='F:/AIData/video/hqxjy.MP4', help='source')
default='rtsp://admin:hk123456@120.240.37.42:554', help='source')
# default='rtsp://admin:hk123456@120.240.37.42:554', help='source')
parser.add_argument('--output', type=str, default='inference/output',
help='output folder') # output folder

9
main2.py

@ -25,6 +25,7 @@ import torch.backends.cudnn as cudnn
from yolov5.utils.Point2GPS import *
from ec.countunit import addcolorimg,addtitlerectangle,addcount,countlist,clsnames
from detector import Detector
from ec.rtsppushserver import RtstPushServer
# palette = (2 ** 11 - 1, 2 ** 15 - 1, 2 ** 20 - 1)
@ -59,7 +60,9 @@ def detect(opt, save_img=False):
save_path = str(Path(out))
txt_path = str(Path(out)) + '/results.txt'
vid_cap = cv2.VideoCapture('rtsp://admin:hk123456@120.240.37.42:554')
vid_cap = cv2.VideoCapture(opt.source)#'rtsp://admin:hk123456@120.240.37.42:554')
fps = vid_cap.get(cv2.CAP_PROP_FPS) % 100
rtstPushServer = RtstPushServer(5,opt.view_img_size[0],opt.view_img_size[1],'rtsp://192.168.1.182:8554/video')
n=0
while True:
# 读取每帧图片
@ -145,6 +148,7 @@ def detect(opt, save_img=False):
im0=annotator.cvimg()
im0=addcolorimg(im0)
rtstPushServer.write(im0)
cv2.imshow("monitor", im0)
if cv2.waitKey(1) == ord('q'): # q to quit
raise StopIteration
@ -184,7 +188,8 @@ if __name__ == '__main__':
# file/folder, 0 for webcam
parser.add_argument('--source', type=str,
# default='F:/AIData/video/test.MP4', help='source')
default='rtsp://admin:hk123456@120.240.37.42:554', help='source')
# default='rtsp://admin:hk123456@120.240.37.42:554', help='source')
default='rtsp://admin:hk123456@192.168.1.65:554', help='source')
parser.add_argument('--output', type=str, default='inference/output',
help='output folder') # output folder
parser.add_argument('--img-size', type=int, default=960,

9
yolov5/utils/datasets.py

@ -274,12 +274,13 @@ class LoadStreams: # multiple IP or RTSP cameras
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
self.w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.fps = cap.get(cv2.CAP_PROP_FPS) % 100
self.outfps=int(self.fps/3)
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
print(' success (%gx%g at %.2f FPS).' % (self.w, self.h, self.fps))
thread.start()
print('') # newline

Loading…
Cancel
Save