@@ -1,11 +1,16 @@
import argparse
import argparse
import cv2 as cv
import cv2 as cv
import numpy as np
import numpy as np
from tqdm import tqdm
import os
import os
import signal
import multiprocessing as mp
os.environ['DISPLAY'] = ':0'
os.environ['DISPLAY'] = ':0'
from config.config import PARAMS
from config.config import PARAMS
from src.numberPlateRoiDetection import NumberPlateROIDetection
from src.numberPlateRoiDetection import NumberPlateROIDetection
from src.objectDetection import ObjectDetection
from src.objectDetection import ObjectDetection
@@ -13,6 +18,9 @@ from src.ocrNumberPlate import get_number_plate_ocr_from_rois
from src.parkingDetection import ParkingDetection
from src.parkingDetection import ParkingDetection
from src.trackingManager import TrackerManager
from src.trackingManager import TrackerManager
# TODO 2 processes for objectdetection using queues (queue1)
# TODO implement queue for taking frames, frameid from queue1
class TrafficApp(object):
class TrafficApp(object):
def __init__(self,args):
def __init__(self,args):
self.args = args
self.args = args
@@ -28,9 +36,16 @@ class TrafficApp(object):
if self.args.video is not None:
if self.args.video is not None:
self.vid_writer = None
self.vid_writer = None
self.runVideoFlow()
self.runVideoFlow()
def runVideoFlow(self):
def runVideoFlow(self):
frame_count = 0
frame_count = 0
frame1_id=0
frame2_id= 1
vehicleDetectionQueue = mp.Queue()
framesList=[]
outputfile = "output-{}.mp4".format(os.path.basename(args.video)[:-4])
if args.video is not None:
if args.video is not None:
try:
try:
videoObj = cv.VideoCapture(args.video)
videoObj = cv.VideoCapture(args.video)
@@ -67,62 +82,85 @@ class TrafficApp(object):
print('Frames-{},Height-{}, Width-{}'.format(totalFrames,imgH,imgW))
print('Frames-{},Height-{}, Width-{}'.format(totalFrames,imgH,imgW))
if self.args.saveoutput and (imgH > 0 and imgW > 0):
if self.args.saveoutput and (imgH > 0 and imgW > 0):
self.vid_writer = cv.VideoWriter(self.args. outputfile,
self.vid_writer = cv.VideoWriter(outputfile,
cv.VideoWriter_fourcc(*"MJPG"), 30,
cv.VideoWriter_fourcc(*"MJPG"), 30,
(round(imgW),round(imgH)))
(round(imgW),round(imgH)))
progress_bar=tqdm(total = totalFrames)
# start reading frame
# start reading frame
while True:
grabbed, frame = videoObj.read()
while True :
grabbed1, frame1 = videoObj.read()
grabbed2, frame2 = videoObj.read()
frame1_id+=1
frame2_id+=2
#frame[:,450:,:] = 0
#frame[:,450:,:] = 0
# end of frame
# end of frame
if not grabbed:
if not grabbed1 :
break
break
frame_count +=1
frame_count +=1
#print('Frame_count-',frame_count)
#print('Frame_count-',frame_count)
#Use jump argument to skip frames.
#Use jump argument to skip frames.
if (frame_count % self.args.jump == 0):
# get object detection on this frame
img_objectMarking, boxes, confidences, classids, idxs,status = self.objectDetection.run_object_detection(frame.copy(),imageH=imgH,imageW=imgW)
'''Assign Trcakers'''
object_detect_info = [boxes, confidences, classids, idxs, status]
bbox_labels_tracking = self.parseObjDetectInfo(object_detect_info)
TrackerManager.FrameCount = frame_count
TrackerManager.manageTracker(bbox_labels_tracking)
''' Get Parking Status'''
if PARAMS._ALGO_MODE_PARKING:
self.parkingDetection.getParkingStatus(TrackerManager.TrackerList)
'''Filter ROIs for Number Plate Detection'''
tentative_numberplate_rios = self.objectDetection.filterRoiforNumberPlate(boxes, classids, idxs)
''' Get Number Plate ROI'''
detected_np_info = self.numberPlateDetection.run_number_plate_detection_rois(image=frame.copy(),rois=tentative_numberplate_rios)
''' Get Number plate OCR '''
number_plate_ocr_dict = get_number_plate_ocr_from_rois(frame.copy(),detected_np_info, False)
#Display frame
displayFrame = self.displayFrame(frame.copy(),detected_np_info,number_plate_ocr_dict,object_detect_info)
winName = 'YOLOV3 Object Detection'
cv.namedWindow(winName, cv.WINDOW_NORMAL)
#cv.imshow(winName, displayFrame)
#cv.resizeWindow('objectDetection',680,420)
if self.vid_writer:
self.vid_writer.write(displayFrame.astype(np.uint8))
c = cv.waitKey(1)
if c & 0xFF == ord('q'):
self.vid_writer.release()
videoObj.release()
break
progress_bar.close()
# get object detection on this frame
objectDetectionProcess1=mp.Process(name='Object Detection Process 1',target=self.objectDetection.run_object_detection, args=(frame1.copy(),frame1_id,imgH,imgW,vehicleDetectionQueue))
objectDetectionProcess2=mp.Process(name='Object Detection Process 2',target=self.objectDetection.run_object_detection, args=(frame2.copy(),frame2_id,imgH,imgW,vehicleDetectionQueue))
objectDetectionProcess1.start()
objectDetectionProcess2.start()
print(f'{objectDetectionProcess1.name},{objectDetectionProcess1.pid},\n' )
print(f'{objectDetectionProcess2.name},{objectDetectionProcess2.pid} \n' )
#print(f'Vehicle detection Queue size = {vehicleDetectionQueue.qsize()}')
#img_objectMarking, boxes, confidences, classids, idxs,status = self.objectDetection.run_object_detection(frame.copy(),imageH=imgH,imageW=imgW)
img, frameId, boxes, confidences, classids, idxs,status = vehicleDetectionQueue.get()
#append the frames and frameid.
framesList.append((img,frameId))
print(f'frames stored = {len(framesList)} \n')
'''Assign Trackers'''
object_detect_info = [boxes, confidences, classids, idxs, status]
bbox_labels_tracking = self.parseObjDetectInfo(object_detect_info)
TrackerManager.FrameCount = frame_count
TrackerManager.manageTracker(bbox_labels_tracking)
''' Get Parking Status'''
if PARAMS._ALGO_MODE_PARKING:
self.parkingDetection.getParkingStatus(TrackerManager.TrackerList)
'''Filter ROIs for Number Plate Detection'''
tentative_numberplate_rios = self.objectDetection.filterRoiforNumberPlate(boxes, classids, idxs)
''' Get Number Plate ROI'''
detected_np_info = self.numberPlateDetection.run_number_plate_detection_rois(image=img,rois=tentative_numberplate_rios)
''' Get Number plate OCR '''
number_plate_ocr_dict = get_number_plate_ocr_from_rois(img,detected_np_info, False)
objectDetectionProcess1.join()
objectDetectionProcess2.join()
#Display frame
displayFrame = self.displayFrame(img,detected_np_info,number_plate_ocr_dict,object_detect_info)
# ISSUE how to kil the processes? New processes spawn on every iteration.
print(f'objectDetectionProcess1 is alive = {objectDetectionProcess1.is_alive()}\n')
print(f'objectDetectionProcess2 is alive = {objectDetectionProcess2.is_alive()}\n')
print("+++++++++++++++++++end of cycle++++++++++++++++++")
#winName = 'YOLOV3 Object Detection'
#cv.namedWindow(winName, cv.WINDOW_NORMAL)
#cv.imshow(winName, displayFrame)
#cv.resizeWindow('objectDetection',680,420)
if self.vid_writer:
self.vid_writer.write(displayFrame.astype(np.uint8))
c = cv.waitKey(1)
if c & 0xFF == ord('q'):
self.vid_writer.release()
videoObj.release()
break
def parseObjDetectInfo(self,object_roi_info):
def parseObjDetectInfo(self,object_roi_info):
boxes, confidences, classids, idxs, status = object_roi_info
boxes, confidences, classids, idxs, status = object_roi_info
#[[list of bbox ][list of conf and labels]]
#[[list of bbox ][list of conf and labels]]
@@ -247,7 +285,7 @@ class TrafficApp(object):
if __name__ == '__main__':
if __name__ == '__main__':
import cProfile, pstats
import cProfile
app_profiler = cProfile.Profile()
app_profiler = cProfile.Profile()
parser = argparse.ArgumentParser(description='BitSilica Traffic Analysis Solution')
parser = argparse.ArgumentParser(description='BitSilica Traffic Analysis Solution')
@@ -256,12 +294,14 @@ if __name__ == '__main__':
parser.add_argument('--realtime', help='Camera Connected Input')
parser.add_argument('--realtime', help='Camera Connected Input')
parser.add_argument('--target', type=str,default = 'CPU',help='Target for CNN to run')
parser.add_argument('--target', type=str,default = 'CPU',help='Target for CNN to run')
parser.add_argument('--saveoutput',type=bool,default=True, help='save video or not')
parser.add_argument('--saveoutput',type=bool,default=True, help='save video or not')
parser.add_argument('--outputfile',type=str,default='./result.avi', help='save video path')
parser.add_argument('--debug',type=bool,default=False, help='print time taken by function')
parser.add_argument('--jump',type=int,default=1,help='integer value for jumping frames')
parser.add_argument('--debug',type=bool,default=False, help='print time taken by function')
args = parser.parse_args()
args = parser.parse_args()
#enable profiler here.
app_profiler.enable()
app_profiler.enable()
app = TrafficApp(args = args)
app = TrafficApp(args = args)
#disable profiler here.
app_profiler.disable()
app_profiler.disable()
profile_name = str('profile_info-{}.prof'.format(args.jump))
profile_name = str('{}.prof'.format(os.path.basename(args.video)[0:-4] ))
app_profiler.dump_stats(profile_name)
app_profiler.dump_stats(profile_name)