diff --git a/config/config.py b/config/config.py index 4e6dbcb..a316f73 100644 --- a/config/config.py +++ b/config/config.py @@ -49,4 +49,9 @@ class PARAMS(object): #logo - LOGO_FILE_PATH = os.path.join(_CMD_PMT_,'resources/logo.png') \ No newline at end of file + LOGO_FILE_PATH = os.path.join(_CMD_PMT_,'resources/logo.png') + + #Sharable variables MAX_SIZES + _MAX_INPUT_QUEUE_SIZE = 200 + _MAX_VEHICLE_DETECTION_QUEUE_SIZE = 200 + _MAX_OUTPUT_QUEUE_SIZE = 200 \ No newline at end of file diff --git a/src/trafficApp.ipynb b/src/trafficApp.ipynb deleted file mode 100644 index 908889d..0000000 --- a/src/trafficApp.ipynb +++ /dev/null @@ -1,343 +0,0 @@ -{ - "metadata": { - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": 3 - }, - "orig_nbformat": 2 - }, - "nbformat": 4, - "nbformat_minor": 2, - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import argparse\n", - "\n", - "import cv2 as cv\n", - "import numpy as np\n", - "from tqdm import tqdm\n", - "import os\n", - "os.environ['DISPLAY'] = ':0'\n", - "\n", - "from config.config import PARAMS\n", - "from src.numberPlateRoiDetection import NumberPlateROIDetection\n", - "from src.objectDetection import ObjectDetection\n", - "from src.ocrNumberPlate import get_number_plate_ocr_from_rois\n", - "from src.parkingDetection import ParkingDetection\n", - "from src.trackingManager import TrackerManager\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "class TrafficApp(object):\n", - " def __init__(self,args):\n", - " self.args = args\n", - "\n", - " #get Object Detection Up\n", - " self.objectDetection = ObjectDetection(debug=args.debug,target=args.target)\n", - " self.numberPlateDetection = NumberPlateROIDetection(args= args,algoType='NumberPlate')\n", - " self.parkingDetection = None #intilize later when we will have height/width\n", - " np.random.seed(41)\n", - "\n", - " #fix color\n", - " self.colorToDisplay = {'numberplate':(0,255,255),'car':(0,255,0),'bus':(128,255,0),'truck':(0,0,255),'moterbike':(255,0,255),'ocr':(0,140,240)}\n", - " if self.args.video is not None:\n", - " self.vid_writer = None\n", - " self.runVideoFlow()\n", - "\n", - " def runVideoFlow(self):\n", - " frame_count = 0\n", - " if args.video is not None:\n", - " try:\n", - " videoObj = cv.VideoCapture(args.video)\n", - " imgH, imgW = None, None\n", - " writer = None\n", - " except:\n", - " raise Exception('Video cannot be loaded! Please check the path provided!')\n", - "\n", - " finally:\n", - " try:\n", - " totalFrames = videoObj.get(cv.cv.CV_CAP_PROP_FRAME_COUNT)\n", - " except:\n", - " totalFrames = -1\n", - "\n", - " try:\n", - " totalFrames = videoObj.get(cv.CAP_PROP_FRAME_COUNT)\n", - " except:\n", - " totalFrames = -1\n", - " try:\n", - " imgH = int(videoObj.get(cv.CAP_PROP_FRAME_HEIGHT))\n", - " imgW = int(videoObj.get(cv.CAP_PROP_FRAME_WIDTH))\n", - " TrackerManager.FrameHeight = imgH\n", - " TrackerManager.FrameWidth = imgW\n", - " print('Height, Width',imgH,imgW)\n", - " if PARAMS._ALGO_MODE_PARKING:\n", - " self.parkingDetection = ParkingDetection(imgW=imgW,imgH=imgH)\n", - " self.parkingDetection.getParkingRegionMask()\n", - " #videoObj.set(cv.CAP_PROP_POS_FRAMES, 225)\n", - " except:\n", - " imgH = -1\n", - " imgW = -1\n", - " raise ValueError('Issue with video')\n", - " if self.args.debug:\n", - " print('Frames-{},Height-{}, Width-{}'.format(totalFrames,imgH,imgW))\n", - "\n", - " if self.args.saveoutput and (imgH > 0 and imgW > 0):\n", - " self.vid_writer = cv.VideoWriter(self.args.outputfile,\n", - " cv.VideoWriter_fourcc(*\"MJPG\"), 30,\n", - " (round(imgW),round(imgH)))\n", - " \n", - " progress_bar=tqdm(total = totalFrames)\n", - " # start reading frame\n", - " while True:\n", - " grabbed, frame = videoObj.read()\n", - " #frame[:,450:,:] = 0\n", - " # end of frame\n", - " if not grabbed:\n", - " break\n", - " frame_count +=1\n", - "\n", - " #print('Frame_count-',frame_count)\n", - " #Use jump argument to skip frames.\n", - " if (frame_count % self.args.jump == 0):\n", - " \n", - " # get object detection on this frame\n", - " img_objectMarking, boxes, confidences, classids, idxs,status = self.objectDetection.run_object_detection(frame.copy(),imageH=imgH,imageW=imgW)\n", - " '''Assign Trcakers'''\n", - " object_detect_info = [boxes, confidences, classids, idxs, status]\n", - " bbox_labels_tracking = self.parseObjDetectInfo(object_detect_info)\n", - " TrackerManager.FrameCount = frame_count\n", - " TrackerManager.manageTracker(bbox_labels_tracking)\n", - "\n", - " ''' Get Parking Status'''\n", - " if PARAMS._ALGO_MODE_PARKING:\n", - " self.parkingDetection.getParkingStatus(TrackerManager.TrackerList)\n", - "\n", - " '''Filter ROIs for Number Plate Detection'''\n", - " tentative_numberplate_rios = self.objectDetection.filterRoiforNumberPlate(boxes, classids, idxs)\n", - "\n", - "\n", - " ''' Get Number Plate ROI'''\n", - " detected_np_info = self.numberPlateDetection.run_number_plate_detection_rois(image=frame.copy(),rois=tentative_numberplate_rios)\n", - "\n", - "\n", - " ''' Get Number plate OCR '''\n", - " number_plate_ocr_dict = get_number_plate_ocr_from_rois(frame.copy(),detected_np_info, False)\n", - "\n", - " #Display frame\n", - " displayFrame = self.displayFrame(frame.copy(),detected_np_info,number_plate_ocr_dict,object_detect_info)\n", - "\n", - " winName = 'YOLOV3 Object Detection'\n", - " cv.namedWindow(winName, cv.WINDOW_NORMAL)\n", - " cv.imshow(winName, displayFrame)\n", - " #cv.resizeWindow('objectDetection',680,420)\n", - " if self.vid_writer:\n", - " self.vid_writer.write(displayFrame.astype(np.uint8))\n", - " c = cv.waitKey(1)\n", - " if c & 0xFF == ord('q'):\n", - " self.vid_writer.release()\n", - " videoObj.release()\n", - " break\n", - " progress_bar.close()\n", - " def parseObjDetectInfo(self,object_roi_info):\n", - " boxes, confidences, classids, idxs, status = object_roi_info\n", - " #[[list of bbox ][list of conf and labels]]\n", - " bboxList =[]\n", - " confidence_labels = []\n", - " if len(idxs) > 0 and status:\n", - " for i in idxs.flatten():\n", - " # Get the bounding box coordinates\n", - " if self.objectDetection.labels[classids[i]] not in PARAMS._TRACKER_OBJECT_LIST +\\\n", - " PARAMS._YOLOV3_OD_NUMBER_PLATE_OBJECT_LIST:\n", - " continue\n", - " x, y = boxes[i][0], boxes[i][1]\n", - " w, h = boxes[i][2], boxes[i][3]\n", - " bboxList.append ([x,y,w,h])\n", - " confidence_labels.append([confidences[i],self.objectDetection.labels[classids[i]]])\n", - " return [bboxList,confidence_labels]\n", - "\n", - " def displayFrame(self,displayFrame,numberplate_roi,number_plate_ocr_dict,object_roi_info):\n", - " debug = self.args.debug\n", - " if PARAMS._ALGO_MODE_NUMBER_PLATE:\n", - " #for nuber plate\n", - " for idx,roiinfo in enumerate(numberplate_roi):\n", - " conf, classID, roi = roiinfo\n", - " x, y, w, h = roi\n", - " cv.rectangle(displayFrame, (x, y), (x + w, y + h), self.colorToDisplay['numberplate'], 2)\n", - " text = \"{}: {:.3f}\".format(self.numberPlateDetection.labels[classID], conf)\n", - " #cv.putText(displayFrame, text, (x, y - 10), cv.FONT_HERSHEY_SIMPLEX, 0.5, self.colorToDisplay['numberplate'], 1)\n", - "\n", - " #add Number plate OCR\n", - " if number_plate_ocr_dict[idx]:\n", - " thickness = 4\n", - " font_face = cv.FONT_HERSHEY_SIMPLEX\n", - " font_scale = 1\n", - " cv.putText(displayFrame, number_plate_ocr_dict[idx], (x, y-5), font_face, font_scale,\\\n", - " self.colorToDisplay['ocr'], thickness)\n", - "\n", - " if False:\n", - " #for objects\n", - " boxes, confidences, classids, idxs, status = object_roi_info\n", - " if len(idxs) > 0 and status:\n", - " for i in idxs.flatten():\n", - " # Get the bounding box coordinates\n", - " x, y = boxes[i][0], boxes[i][1]\n", - " w, h = boxes[i][2], boxes[i][3]\n", - "\n", - " # Get the unique color for this class\n", - " if self.objectDetection.labels[classids[i]] in self.colorToDisplay:\n", - " color = self.colorToDisplay[self.objectDetection.labels[classids[i]]]\n", - " else:\n", - " color = [int(c) for c in self.objectDetection.colors[classids[i]]]\n", - " #color = (255,255,255)\n", - " # Draw the bounding box rectangle and label on the image\n", - " cv.rectangle(displayFrame, (x, y), (x + w, y + h), color, 2)\n", - " text = \"{}: {:.3f}\".format(self.objectDetection.labels[classids[i]], confidences[i])\n", - " cv.putText(displayFrame, text, (x, y - 5), cv.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\n", - "\n", - "\n", - " if True:\n", - " if len(TrackerManager.DetectionWithNoTracker)>0:\n", - " color = (0,0,0)\n", - " for item in TrackerManager.DetectionWithNoTracker:\n", - " bbox,(conf,label) = item\n", - " x,y,w,h = bbox\n", - " # Draw the bounding box rectangle and label on the image\n", - " cv.rectangle(displayFrame, (x, y), (x + w, y + h), color, 2)\n", - " if debug:\n", - " text = \"NotTrack-{}: {:.3f}\".format(label,conf)\n", - " cv.putText(displayFrame, text, (x, y - 5), cv.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\n", - "\n", - " if PARAMS._ALGO_MODE_PARKING:\n", - " cv.line(displayFrame,PARAMS._NO_PARAKING_LINE_POINT_1_XY,PARAMS._NO_PARAKING_LINE_POINT_2_XY,\\\n", - " (0,0,255),3,2)\n", - "\n", - " if PARAMS._ALGO_MODE_KALMAN_TRCAKING:\n", - " if len(TrackerManager.TrackerList) > 0:\n", - " color = (0,255,0)\n", - " for tracker in TrackerManager.TrackerList:\n", - " bbox = tracker.curr_frame_predict_bbox\n", - " x,y,w,h = np.int32(bbox)\n", - " missframe = tracker.objectInfo.ObjectTrackerMissedFrame\n", - " direction = 'XX' if tracker.objectInfo.ObjectDirection is None else tracker.objectInfo.ObjectDirection\n", - " objectType = tracker.objectInfo.ObjectType\n", - " objectID = tracker.objectID\n", - " if not tracker.objectInfo.ObjectParkingStatus:\n", - " cv.rectangle(displayFrame, (x, y), (x + w, y + h), color, 2)\n", - " else:\n", - " cv.rectangle(displayFrame, (x, y), (x + w, y + h), (0,0,0), 3)\n", - "\n", - " #update curr box by which it was updated\n", - " if False:\n", - " bbox_detect = tracker.curr_frame_update_bbox\n", - " xp,yp,wp,hp = bbox_detect\n", - " cv.rectangle(displayFrame, (xp, yp), (xp + wp, yp + hp), (0,255,255), 2)\n", - " if debug:\n", - " text = \"{}-f{}-{}\".format(objectID,missframe,direction)\n", - " else:\n", - " text = \"{}\".format(direction)\n", - "\n", - " if tracker.objectInfo.ObjectParkingStatus and PARAMS._ALGO_MODE_PARKING:\n", - " if tracker.objectInfo.ObjectType in PARAMS._YOLOV3_OD_NUMBER_PLATE_OBJECT_LIST:\n", - " text = \"{}\".format(PARAMS._PARKING_STRING)\n", - " font_scale = 1.5\n", - " font = cv.FONT_HERSHEY_SIMPLEX #PLAIN #cv.FONT_HERSHEY_SIMPLEX\n", - " # set the rect bg - BLACK\n", - " rect_bgr = (0,0,0)\n", - " # get the width and height of the text box\n", - " (text_width, text_height) = np.int32(cv.getTextSize(text, font, fontScale=font_scale, thickness=2)[0])\n", - " # make the coords of the box with a small padding of two pixels\n", - " box_coords = ((x, y), (x + text_width + 5, y - text_height - 5))\n", - " cv.rectangle(displayFrame, box_coords[0], box_coords[1], rect_bgr, cv.FILLED)\n", - " cv.putText(displayFrame, text, (x, y), font, fontScale=font_scale, color=(0, 0, 255),thickness=2)\n", - " if True:\n", - " imglogo = cv.imread(PARAMS.LOGO_FILE_PATH)\n", - " logo = cv.resize(imglogo,dsize=(300,100),interpolation=cv.INTER_LINEAR)\n", - " h,w,c = logo.shape\n", - " H,W,C = displayFrame.shape\n", - " displayFrame[0:h,W-w-10:W-10,:] = logo\n", - "\n", - " return displayFrame\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ] -} \ No newline at end of file diff --git a/src/trafficAppFast.py b/src/trafficAppFast.py new file mode 100644 index 0000000..0498f99 --- /dev/null +++ b/src/trafficAppFast.py @@ -0,0 +1,190 @@ +import argparse +import multiprocessing as mp +import os +import threading as th +import time +from datetime import datetime +from queue import Queue +import numpy as np +from config.config import PARAMS +from src.numberPlateRoiDetection import NumberPlateROIDetection +from src.objectDetection import ObjectDetection +from src.ocrNumberPlate import get_number_plate_ocr_from_rois +from src.parkingDetection import ParkingDetection +from src.trackingManager import TrackerManager +os.environ['DISPLAY'] = ':0' + +import cv2 + +inputQueue = mp.Queue(PARAMS._MAX_INPUT_QUEUE_SIZE) +vehicleDetectionQueue = mp.Queue(PARAMS._MAX_VEHICLE_DETECTION_QUEUE_SIZE) +outputQueue = mp.Queue(PARAMS._MAX_OUTPUT_QUEUE_SIZE) +IMAGE_HEIGHT = mp.Value('i',0) +IMAGE_WIDTH = mp.Value('i',0) +colorToDisplay = {'numberplate':(0,255,255),'car':(0,255,0),'bus':(128,255,0),'truck':(0,0,255),'moterbike':(255,0,255),'ocr':(0,140,240)} + + +class ReadFrame(th.Thread): + global inputQueue + global IMAGE_HEIGHT,IMAGE_WIDTH + def __init__(self,args,name='Input thread',custom_id=1) -> None: + super().__init__() + self.frameId = 1 + self.stopped = False + self.grabbed = True + self.name = f'{name} {custom_id}' + self.videoCaptureObject = cv2.VideoCapture(args.video) + if(self.videoCaptureObject.isOpened()): + IMAGE_HEIGHT.value = int(self.videoCaptureObject.get(cv2.CAP_PROP_FRAME_HEIGHT)) + IMAGE_WIDTH.value = int(self.videoCaptureObject.get(cv2.CAP_PROP_FRAME_WIDTH)) + print(f'Reading from source = {args.video}') + + + def run(self): + while (self.grabbed): + (self.grabbed, self.frame) = self.videoCaptureObject.read() + IMAGE_HEIGHT.value = int(self.videoCaptureObject.get(cv2.CAP_PROP_FRAME_HEIGHT)) + IMAGE_WIDTH.value = int(self.videoCaptureObject.get(cv2.CAP_PROP_FRAME_WIDTH)) + inputQueue.put((self.frame,self.frameId)) + print(f"{self.name} frame added with id {self.frameId}\n") + self.frameId+=1 + print('--Done reading frames--\n') + self.videoCaptureObject.release() + return + +class VehicleDetection(mp.Process): + global inputQueue + global vehicleDetectionQueue + def __init__(self,args,name='Vehicle Detection Process',custom_id=1): + super(VehicleDetection,self).__init__() + self.name = f'{name} {custom_id}' + self.objectDetection = ObjectDetection(debug=args.debug,target=args.target) + TrackerManager.FrameHeight = IMAGE_HEIGHT.value + TrackerManager.FrameWidth = IMAGE_WIDTH.value + if (PARAMS._ALGO_MODE_PARKING): + self.parkingDetection = ParkingDetection(imgW=IMAGE_WIDTH.value,imgH=IMAGE_HEIGHT.value) + self.parkingDetection.getParkingRegionMask() + + def parseObjDetectInfo(self,object_roi_info): + boxes, confidences, classids, idxs, status = object_roi_info + #[[list of bbox ][list of conf and labels]] + bboxList =[] + confidence_labels = [] + if len(idxs) > 0 and status: + for i in idxs.flatten(): + # Get the bounding box coordinates + if self.objectDetection.labels[classids[i]] not in PARAMS._TRACKER_OBJECT_LIST +\ + PARAMS._YOLOV3_OD_NUMBER_PLATE_OBJECT_LIST: + continue + x, y = boxes[i][0], boxes[i][1] + w, h = boxes[i][2], boxes[i][3] + bboxList.append ([x,y,w,h]) + confidence_labels.append([confidences[i],self.objectDetection.labels[classids[i]]]) + return [bboxList,confidence_labels] + + def run(self): + while (True): + if(inputQueue.qsize() == 0): #Exit Condition + vehicleDetectionQueue.put(None) + print(f'{self.name} exiting !! \n') + return + (frame,frameId) = inputQueue.get() + print(f'{self.name} {frameId}\n') + (img_objectMarking, boxes, confidences, classids, idxs,status) = self.objectDetection.run_object_detection(frame.copy(),imageH=IMAGE_HEIGHT.value,imageW=IMAGE_WIDTH.value) + + ''' Assigning Trackers''' + object_detect_info = [boxes, confidences, classids, idxs, status] + bbox_labels_tracking = self.parseObjDetectInfo(object_detect_info) + TrackerManager.FrameCount = frameId + TrackerManager.manageTracker(bbox_labels_tracking) + + '''Getting Parking status''' + if PARAMS._ALGO_MODE_PARKING: + self.parkingDetection.getParkingStatus(TrackerManager.TrackerList) + '''Filter ROIs for Number Plate Detection''' + tentative_numberplate_rios = self.objectDetection.filterRoiforNumberPlate(boxes, classids, idxs) + + vehicleDetectionQueue.put((frame,frameId,tentative_numberplate_rios)) + +class NumberPlateOcr(mp.Process): + global vehicleDetectionQueue + global outputQueue + def __init__(self,name='Number plate OCR Process',custom_id=1): + super(NumberPlateOcr,self).__init__() + self.name=f'{name} {custom_id}' + + def run(self): + while True: + value = vehicleDetectionQueue.get() + if(value == None): + print(f'{self.name} exiting !! \n') + outputQueue.put(None) + return + (frame,frameId) = value + print(f"{self.name} Got frame with ID {frameId}\n") + #do some processing here. + time.sleep(.25) + outputQueue.put((frame,frameId)) + +class OutputFrame(th.Thread): + global IMAGE_HEIGHT,IMAGE_WIDTH + global outputQueue + def __init__(self,name='output thread',custom_id=1,outputfilename="output.avi"): + super().__init__() + self.name = f'{name} {custom_id}' + self.outputfilename = outputfilename + print(f'frame size {IMAGE_HEIGHT.value} {IMAGE_WIDTH.value}') + self.videoWriterObject = cv2.VideoWriter(outputfilename,cv2.VideoWriter_fourcc(*'MJPG'),30,(IMAGE_WIDTH.value,IMAGE_HEIGHT.value)) + + + def run(self): + while True: + try: + value = outputQueue.get() + if(value == None): + return + (frame,frameId) = value + print(f'{self.name} got frame with ID {frameId} shape = {frame.shape}') + self.videoWriterObject.write(frame.astype(np.uint8)) + except(AttributeError): + continue + + +if __name__ == '__main__': + import cProfile + + app_profiler = cProfile.Profile() + parser = argparse.ArgumentParser(description='BitSilica Traffic Analysis Solution') + parser.add_argument('--image', help=' Full Path to image file.') + parser.add_argument('--video', help='Full Path to video file.') + parser.add_argument('--realtime', help='Camera Connected Input') + parser.add_argument('--target', type=str,default = 'CPU',help='Target for CNN to run') + parser.add_argument('--saveoutput',type=bool,default=True, help='save video or not') + parser.add_argument('--debug',type=bool,default=False, help='print time taken by function') + + args = parser.parse_args() + #Name of the video file. + outputvideo = f'output {os.path.basename(args.video)[:-4]} {datetime.now()}.avi' + print(f'-----> Writing to file {outputvideo} <-------\n') + #enable profiler here. + + app_profiler.enable() + + readFramesThread = ReadFrame(args) + vehicleDetectionProcess = VehicleDetection(args) + readFramesThread.start() + time.sleep(.25) + vehicleDetectionProcess.start() + + + + + readFramesThread.join() + vehicleDetectionProcess.join() + + #disable profiler here. + app_profiler.disable() + + profile_name = str('{}.prof'.format(os.path.basename(args.video)[0:-4])) + print("------------------------\nEnd of execution, dumping profile stats\n-------------------------") + app_profiler.dump_stats(profile_name)