@@ -0,0 +1,30 @@ | |||
from threading import Thread | |||
import time | |||
import random | |||
from queue import Queue | |||
queue = Queue(10) | |||
class ProducerThread(Thread): | |||
def run(self): | |||
nums = range(40) | |||
global queue | |||
while True: | |||
num = random.choice(nums) | |||
queue.put(num) | |||
print ("Produced", num) | |||
time.sleep(random.random()) | |||
class ConsumerThread(Thread): | |||
def run(self): | |||
global queue | |||
while True: | |||
num = queue.get() | |||
queue.task_done() | |||
print ("Consumed", num) | |||
time.sleep(random.random()) | |||
ProducerThread().start() | |||
ConsumerThread().start() |
@@ -0,0 +1,44 @@ | |||
from threading import Thread | |||
import multiprocessing as mp | |||
import time | |||
import random | |||
from queue import Queue | |||
inputQueue = mp.Queue(10) | |||
inputQueue2 = mp.Queue(10) | |||
class ProducerThread(Thread): | |||
global inputQueue | |||
def run(self): | |||
numbers = range(40) | |||
while True: | |||
num = random.choice(numbers) | |||
inputQueue.put(num) | |||
print("\nPut",num) | |||
time.sleep(.05) | |||
class ConsumerProcess1(mp.Process): | |||
global inputQueue | |||
def run(self): | |||
while True: | |||
num = inputQueue.get() | |||
print("\nGot", num) | |||
time.sleep(.1) | |||
class ConsumerProcess2(mp.Process): | |||
global | |||
if __name__ == "__main__": | |||
print("this is example 2") | |||
time.sleep(2) | |||
a=ProducerThread() | |||
b=ConsumerProcess1() | |||
a.start() | |||
b.start() | |||
a.join() | |||
b.join() | |||
@@ -0,0 +1,100 @@ | |||
import os | |||
import cv2 | |||
import argparse | |||
import time | |||
import random | |||
import multiprocessing as mp | |||
import threading as th | |||
from queue import Queue | |||
inputQueue = mp.Queue() | |||
vehicleDetectionQueue = mp.Queue() | |||
numberPlateOcrQueue = mp.Queue() | |||
displayQueue = mp.Queue() | |||
outputQueue = mp.Queue() | |||
class ReadFrame(th.Thread): | |||
def __init__(self,source) -> None: | |||
super().__init__() | |||
self.frameId = 1 | |||
self.stopped = False | |||
self.grabbed = True | |||
self.videoCaptureObject = cv2.VideoCapture(source) | |||
print(f'Reading from source = {source}') | |||
global inputQueue | |||
def run(self): | |||
while self.grabbed: | |||
(self.grabbed, self.frame) = self.videoCaptureObject.read() | |||
inputQueue.put((self.frame,self.frameId)) | |||
print(f"IP frame added with id {self.frameId}\n") | |||
self.frameId+=1 | |||
class VehicleDetection(mp.Process): | |||
global inputQueue | |||
def run(self): | |||
while True: | |||
(frame,frameId) = inputQueue.get() | |||
#inputQueue.task_done() | |||
print(f"\n VD Got frame with ID {frameId}") | |||
#do some processing here. | |||
vehicleDetectionQueue.put((frame,frameId)) | |||
class NumberPlateOcr(mp.Process): | |||
global inputQueue | |||
global numberPlateOcrQueue | |||
def run(self): | |||
while True: | |||
(frame,frameId) = vehicleDetectionQueue.get() | |||
#inputQueue.task_done() | |||
print(f"\n NP Got frame with ID {frameId}") | |||
#do some processing here. | |||
numberPlateOcrQueue.put((frame,frameId)) | |||
class DisplayFrame(mp.Process): | |||
global numberPlateOcrQueue | |||
global displayQueue | |||
def run(self): | |||
while True: | |||
(frame,frameId) = numberPlateOcrQueue.get() | |||
print(f'DF got frame with ID {frameId}') | |||
#display image here with frame ID | |||
outputQueue.put((frame,frameId)) | |||
class SaveOutput(mp.Process): | |||
global displayQueue | |||
#Takes all the (frame,frameId) and then sorts them and merges them in a video using some tool. | |||
if __name__ == '__main__': | |||
import cProfile | |||
app_profiler = cProfile.Profile() | |||
parser = argparse.ArgumentParser(description='BitSilica Traffic Analysis Solution') | |||
parser.add_argument('--image', help=' Full Path to image file.') | |||
parser.add_argument('--video', help='Full Path to video file.') | |||
parser.add_argument('--realtime',help='Camera Connected Input') | |||
args = parser.parse_args() | |||
#enable profiler here. | |||
app_profiler.enable() | |||
readFramesThread = ReadFrame(args.video) | |||
vehicleDetectionProcess = VehicleDetection() | |||
numberPlateOcrProcess = NumberPlateOcr() | |||
readFramesThread.start() | |||
vehicleDetectionProcess.start() | |||
numberPlateOcrProcess.start() | |||
#disable profiler here. | |||
app_profiler.disable() | |||
profile_name = str('temp.prof'.format(os.path.basename(args.video)[0:-4])) | |||
print("------------------------\nEnd of execution, dumping profile stats\n-------------------------") | |||
app_profiler.dump_stats(profile_name) |
@@ -1,12 +1,11 @@ | |||
import argparse | |||
import ntpath | |||
import cv2 as cv | |||
import numpy as np | |||
from tqdm import tqdm | |||
import os | |||
os.environ['DISPLAY'] = ':0' | |||
from config.config import PARAMS | |||
from src.numberPlateRoiDetection import NumberPlateROIDetection | |||
from src.objectDetection import ObjectDetection | |||
@@ -17,7 +16,7 @@ from src.trackingManager import TrackerManager | |||
class TrafficApp(object): | |||
def __init__(self,args): | |||
self.args = args | |||
#get Object Detection Up | |||
self.objectDetection = ObjectDetection(debug=args.debug,target=args.target) | |||
self.numberPlateDetection = NumberPlateROIDetection(args= args,algoType='NumberPlate') | |||
@@ -32,8 +31,6 @@ class TrafficApp(object): | |||
def runVideoFlow(self): | |||
frame_count = 0 | |||
successfulDetections=0 | |||
if args.video is not None: | |||
try: | |||
videoObj = cv.VideoCapture(args.video) | |||
@@ -70,11 +67,11 @@ class TrafficApp(object): | |||
print('Frames-{},Height-{}, Width-{}'.format(totalFrames,imgH,imgW)) | |||
if self.args.saveoutput and (imgH > 0 and imgW > 0): | |||
outputfile = "output-{}-{}.mp4".format(ntpath.basename(args.video)[0:-4],args.jump) | |||
self.vid_writer = cv.VideoWriter(outputfile,cv.VideoWriter_fourcc(*"MJPG"), 30,(round(imgW),round(imgH))) | |||
self.vid_writer = cv.VideoWriter(self.args.outputfile, | |||
cv.VideoWriter_fourcc(*"MJPG"), 30, | |||
(round(imgW),round(imgH))) | |||
progress_bar=tqdm(total = totalFrames) | |||
total_frame_processed = 0 | |||
# start reading frame | |||
while True: | |||
grabbed, frame = videoObj.read() | |||
@@ -83,10 +80,11 @@ class TrafficApp(object): | |||
if not grabbed: | |||
break | |||
frame_count +=1 | |||
#print('Frame_count-',frame_count) | |||
#Use jump argument to skip frames. | |||
if (frame_count % self.args.jump == 0): | |||
total_frame_processed+=1 | |||
# get object detection on this frame | |||
img_objectMarking, boxes, confidences, classids, idxs,status = self.objectDetection.run_object_detection(frame.copy(),imageH=imgH,imageW=imgW) | |||
'''Assign Trcakers''' | |||
@@ -110,18 +108,11 @@ class TrafficApp(object): | |||
''' Get Number plate OCR ''' | |||
number_plate_ocr_dict = get_number_plate_ocr_from_rois(frame.copy(),detected_np_info, False) | |||
if any(x > PARAMS._YOLOV3_OD_CONFIDENCE for x in confidences): | |||
successfulDetections+=1 | |||
<<<<<<< Updated upstream | |||
======= | |||
>>>>>>> Stashed changes | |||
#Display frame | |||
displayFrame = self.displayFrame(frame.copy(),detected_np_info,number_plate_ocr_dict,object_detect_info) | |||
winName = 'YOLOV3 Object Detection' | |||
#cv.namedWindow(winName, cv.WINDOW_NORMAL) | |||
cv.namedWindow(winName, cv.WINDOW_NORMAL) | |||
#cv.imshow(winName, displayFrame) | |||
#cv.resizeWindow('objectDetection',680,420) | |||
if self.vid_writer: | |||
@@ -132,19 +123,6 @@ class TrafficApp(object): | |||
videoObj.release() | |||
break | |||
progress_bar.close() | |||
<<<<<<< Updated upstream | |||
======= | |||
>>>>>>> Stashed changes | |||
with open("{}-{}-metric.txt".format(ntpath.basename(args.video)[0:-4],args.jump),"w") as f: | |||
line = "Successful detection = {} total_frame_processed = {}".format(successfulDetections,total_frame_processed) | |||
f.write(line) | |||
f.close() | |||
<<<<<<< Updated upstream | |||
======= | |||
>>>>>>> Stashed changes | |||
def parseObjDetectInfo(self,object_roi_info): | |||
boxes, confidences, classids, idxs, status = object_roi_info | |||
#[[list of bbox ][list of conf and labels]] | |||
@@ -271,27 +249,19 @@ if __name__ == '__main__': | |||
import cProfile, pstats | |||
app_profiler = cProfile.Profile() | |||
<<<<<<< Updated upstream | |||
======= | |||
part_files = ["{}.{}".format(i,'mp4') for i in range(1,num_processes+1)] | |||
>>>>>>> Stashed changes | |||
parser = argparse.ArgumentParser(description='BitSilica Traffic Analysis Solution') | |||
parser.add_argument('--image', help=' Full Path to image file.') | |||
parser.add_argument('--video', help='Full Path to video file.') | |||
parser.add_argument('--realtime', help='Camera Connected Input') | |||
parser.add_argument('--target', type=str,default = 'CPU',help='Target for CNN to run') | |||
parser.add_argument('--saveoutput',type=bool,default=True, help='save video or not') | |||
parser.add_argument('--outputfile',type=str,default='./result.avi', help='save video path') | |||
parser.add_argument('--debug',type=bool,default=False, help='print time taken by function') | |||
parser.add_argument('--jump',type=int,default=1,help='integer value for jumping frames') | |||
parser.add_argument('--cores',type=int,default=1,help='enables usage of multiple cores.') | |||
args = parser.parse_args() | |||
app_profiler.enable() | |||
app = TrafficApp(args = args) | |||
app_profiler.disable() | |||
<<<<<<< Updated upstream | |||
profile_name = str('{}-{}.prof'.format(ntpath.basename(args.video)[0:-4],args.jump)) | |||
======= | |||
profile_name = str('{}-{}-{}.prof'.format(ntpath.basename(args.video)[:-4],args.jump,rgs.cores)) | |||
>>>>>>> Stashed changes | |||
profile_name = str('profile_info-{}.prof'.format(args.jump)) | |||
app_profiler.dump_stats(profile_name) |
@@ -0,0 +1,24 @@ | |||
A general flow of the work: | |||
(readFrames) --> [buffer] --> (vehicleDetection) --> [buffer] --> (number plate)--> (OCR) --> [buffer] --> display --> [buffer] --> save video. | |||
.Parallelizing the pipeline | |||
a. Start reading frames and add them in an input buffer. | |||
b. Take the frames from input buffer and run vehicleDetection on them (2-4 processes) | |||
c. Add the frames in an output buffer and sort them. | |||
d. Take the frames from output buffer and then perform numberplate and ocr detection on them. | |||
e. display the frames | |||
f. write the frames in an output video buffer and save it. | |||
+++++++++++++++++++++++++ | |||
1. Create a function which can read files and put them into buffer along with frame ID | |||
2. Create the buffer which can hold the object from 1. | |||
3. Integrate these 1.2. -- core 1 -- | |||
4. Consumer/producer for vehicle detection. -- core 1 -- | |||
5. Buffer for storing detected objets and integrate with 4 | |||
6. Consumer/Producer for numberplate and ocr | |||
8. Add these into a buffer. | |||
9. consumer for taking video and sorting and merging the frames. | |||
+++++++++++++++++++++++++++++ | |||
1. >> | |||
a. Create a class for reading frames given the path and store it in a queue. |