Browse Source

refactor(processes/threads exit/join cleanly upto vehicledetectionProcess()): intermediate Commit.

1. readFramesThread() exits cleanly. Populate the inputQueue using time.sleep() after the start()
2. vehicleDetectionProcess() exits cleanly.
3. numberPlateOcr doesn't exit cleanly.

Do not checkout.
refactoring
Atmadeep Arya 3 years ago
parent
commit
94af4b54a3
6 changed files with 146 additions and 89 deletions
  1. +22
    -8
      example2.py
  2. +26
    -13
      skeleton.py
  3. +4
    -3
      src/objectDetection.py
  4. +1
    -1
      src/ocrNumberPlate.py
  5. +0
    -11
      src/schedule_1.py
  6. +93
    -53
      src/trafficApp.py

+ 22
- 8
example2.py View File

@@ -8,39 +8,48 @@ inputQueue = mp.JoinableQueue()
inputQueue2 = mp.Queue(10) inputQueue2 = mp.Queue(10)


class ProducerThread(Thread): class ProducerThread(Thread):
def __init__(self,name='',custom_id,daemon=False):
def __init__(self,name='producer thread',custom_id=0,daemon=False):
super(ProducerThread, self).__init__() super(ProducerThread, self).__init__()
self.name = "{}-{}".format(name,custom_id) self.name = "{}-{}".format(name,custom_id)
self.setDaemon(daemon) self.setDaemon(daemon)
#def threadjoin(self):
# super(ProducerThread,self).join()
global inputQueue global inputQueue
def run(self): def run(self):
numbers = range(40)
numbers = range(10)
counter = 0 counter = 0
while counter < 20: while counter < 20:
num = random.choice(numbers) num = random.choice(numbers)
inputQueue.put(num) inputQueue.put(num)
print("\nPut",num) print("\nPut",num)
time.sleep(.08)
time.sleep(0.5)
counter+=1 counter+=1


class ConsumerProcess1(Thread):
def __init__(self,name,custom_id,daemon=False):
class ConsumerProcess1(mp.Process):
def __init__(self,name='consumer thread',custom_id=0,daemon=False):
super(ConsumerProcess1,self).__init__() super(ConsumerProcess1,self).__init__()
self.name = "{}-{}".format(name,custom_id) self.name = "{}-{}".format(name,custom_id)
self.daemon = daemon self.daemon = daemon


#def consumerjoin(self):
# super(ConsumerProcess1,self).join()
# return

global inputQueue global inputQueue
def run(self): def run(self):
while (not inputQueue.empty()): while (not inputQueue.empty()):
num = inputQueue.get() num = inputQueue.get()
inputQueue.task_done() inputQueue.task_done()
print("\nGot", num) print("\nGot", num)
time.sleep(.1)
time.sleep(1)


if __name__ == "__main__": if __name__ == "__main__":

print("this is example 2") print("this is example 2")
time.sleep(2) time.sleep(2)
a=ProducerThread(name = 'producer thread',custom_id=1,daemon=False) a=ProducerThread(name = 'producer thread',custom_id=1,daemon=False)
@@ -50,4 +59,9 @@ if __name__ == "__main__":
b.start() b.start()


print(a.ident,a.name,"\n") print(a.ident,a.name,"\n")
print(b.ident,b.name,"\n")
print(b.ident,b.name,"\n")

a.join()
b.join()

print('end of program mark')

+ 26
- 13
skeleton.py View File

@@ -1,12 +1,13 @@
import os
import cv2
import argparse import argparse
import time
import random
import multiprocessing as mp import multiprocessing as mp
import os
import random
import threading as th import threading as th
import time
from queue import Queue from queue import Queue


import cv2

inputQueue = mp.Queue() inputQueue = mp.Queue()
vehicleDetectionQueue = mp.Queue() vehicleDetectionQueue = mp.Queue()
outputQueue = mp.Queue() outputQueue = mp.Queue()
@@ -30,6 +31,7 @@ class ReadFrame(th.Thread):
inputQueue.put((self.frame,self.frameId)) inputQueue.put((self.frame,self.frameId))
print(f"{self.name}frame added with id {self.frameId}\n") print(f"{self.name}frame added with id {self.frameId}\n")
self.frameId+=1 self.frameId+=1
print('--Done reading frames--\n')
return return


@@ -41,31 +43,35 @@ class VehicleDetection(mp.Process):
global inputQueue global inputQueue
def run(self): def run(self):
while (True): while (True):
if(inputQueue.qsize() == 0):
return
(frame,frameId) = inputQueue.get() (frame,frameId) = inputQueue.get()
#inputQueue.task_done() #inputQueue.task_done()
print(f"{self.name}Got frame with ID {frameId} qsize = {inputQueue.qsize()}\n") print(f"{self.name}Got frame with ID {frameId} qsize = {inputQueue.qsize()}\n")
#do some processing here. #do some processing here.
vehicleDetectionQueue.put((frame,frameId))
if(inputQueue.qsize() < 1):
return
time.sleep(.5)
vehicleDetectionQueue.put_nowait((frame,frameId))

class NumberPlateOcr(mp.Process): class NumberPlateOcr(mp.Process):
def __init__(self,name='Number plate OCR Process',custom_id=1): def __init__(self,name='Number plate OCR Process',custom_id=1):
super(NumberPlateOcr,self).__init__() super(NumberPlateOcr,self).__init__()
self.name=f'{name} {custom_id}' self.name=f'{name} {custom_id}'



global inputQueue global inputQueue
global numberPlateOcrQueue
global vehicleDetectionQueue
global outputQueue

def run(self): def run(self):
while True: while True:
(frame,frameId) = vehicleDetectionQueue.get() (frame,frameId) = vehicleDetectionQueue.get()
#inputQueue.task_done() #inputQueue.task_done()
print(f"{self.name} Got frame with ID {frameId}\n") print(f"{self.name} Got frame with ID {frameId}\n")
#do some processing here. #do some processing here.
outputQueue.put((frame,frameId))
time.sleep(.25)
outputQueue.put_nowait((frame,frameId))
if((inputQueue.empty()) and (vehicleDetectionQueue.empty())):
return



class outputframe(th.Thread): class outputframe(th.Thread):
def __init__(self,name='output thread',custom_id=1): def __init__(self,name='output thread',custom_id=1):
@@ -98,9 +104,16 @@ if __name__ == '__main__':
vehicleDetectionProcess = VehicleDetection() vehicleDetectionProcess = VehicleDetection()
numberPlateOcrProcess = NumberPlateOcr() numberPlateOcrProcess = NumberPlateOcr()
readFramesThread.start() readFramesThread.start()
time.sleep(.25)
vehicleDetectionProcess.start() vehicleDetectionProcess.start()
numberPlateOcrProcess.start() numberPlateOcrProcess.start()
readFramesThread.join()
print(f'readframesthread {readFramesThread.is_alive()}\n')
vehicleDetectionProcess.join()
print(f'vehicleDetectionProcess {vehicleDetectionProcess.is_alive()}\n')
numberPlateOcrProcess.join()
print(f'numberPlateOcrProcess {numberPlateOcrProcess.is_alive()}\n')


#disable profiler here. #disable profiler here.


+ 4
- 3
src/objectDetection.py View File

@@ -115,7 +115,7 @@ class ObjectDetection(object):


return boxes, confidences, classids return boxes, confidences, classids


def run_object_detection(self,img,imageH,imageW,doPlotBoxNLabel = True):
def run_object_detection(self,img,frameId,imageH,imageW,vehicleDetectionQueue,doPlotBoxNLabel = True):
status = True status = True
# Image preprocess - make RGB,Resize,Scale by 1/255 # Image preprocess - make RGB,Resize,Scale by 1/255
blob = cv.dnn.blobFromImage(img, 1 / 255.0, PARAMS._YOLOV3_OD_INPUT_IMAGE_SIZE, blob = cv.dnn.blobFromImage(img, 1 / 255.0, PARAMS._YOLOV3_OD_INPUT_IMAGE_SIZE,
@@ -149,5 +149,6 @@ class ObjectDetection(object):
if doPlotBoxNLabel: if doPlotBoxNLabel:
# Draw labels and boxes on the image # Draw labels and boxes on the image
img = self.draw_labels_and_boxes(img, boxes, confidences, classids, idxs, self.colors, self.labels) img = self.draw_labels_and_boxes(img, boxes, confidences, classids, idxs, self.colors, self.labels)

return img, boxes, confidences, classids, idxs,status
return img, frameId, boxes, confidences, classids, idxs, status

+ 1
- 1
src/ocrNumberPlate.py View File

@@ -160,7 +160,7 @@ def extract_all_number_plates_text(img, region_of_interests):
# ============================================================================ # ============================================================================




def get_number_plate_ocr_from_rois(img, np_rois_info, save_number_plate):
def get_number_plate_ocr_from_rois(img, np_rois_info,save_number_plate):
np_roi_text_dict= {} np_roi_text_dict= {}
for idx,roiinfo in enumerate(np_rois_info): for idx,roiinfo in enumerate(np_rois_info):
conf, classID, roi = roiinfo conf, classID, roi = roiinfo


+ 0
- 11
src/schedule_1.py View File

@@ -1,11 +0,0 @@
import os
import argparse

parser = argparse.ArgumentParser(description="Job scheduler for sampling decision")
parser.add_argument("--jump",type=int,default=1,help = 'integer value for jumping frames')
args = parser.parse_args()
for i in range (1,11):
videoname = "sample_video_{}.mp4".format(i)
command = "python src/trafficApp.py --video ./resources/{} --saveoutput true --jump {} ".format(videoname,args.jump)
os.system(command)

+ 93
- 53
src/trafficApp.py View File

@@ -1,11 +1,16 @@
import argparse import argparse


import cv2 as cv import cv2 as cv

import numpy as np import numpy as np
from tqdm import tqdm
import os import os
import signal
import multiprocessing as mp
os.environ['DISPLAY'] = ':0' os.environ['DISPLAY'] = ':0'




from config.config import PARAMS from config.config import PARAMS
from src.numberPlateRoiDetection import NumberPlateROIDetection from src.numberPlateRoiDetection import NumberPlateROIDetection
from src.objectDetection import ObjectDetection from src.objectDetection import ObjectDetection
@@ -13,6 +18,9 @@ from src.ocrNumberPlate import get_number_plate_ocr_from_rois
from src.parkingDetection import ParkingDetection from src.parkingDetection import ParkingDetection
from src.trackingManager import TrackerManager from src.trackingManager import TrackerManager


# TODO 2 processes for objectdetection using queues (queue1)
# TODO implement queue for taking frames, frameid from queue1

class TrafficApp(object): class TrafficApp(object):
def __init__(self,args): def __init__(self,args):
self.args = args self.args = args
@@ -28,9 +36,16 @@ class TrafficApp(object):
if self.args.video is not None: if self.args.video is not None:
self.vid_writer = None self.vid_writer = None
self.runVideoFlow() self.runVideoFlow()
def runVideoFlow(self): def runVideoFlow(self):
frame_count = 0 frame_count = 0
frame1_id=0
frame2_id= 1
vehicleDetectionQueue = mp.Queue()
framesList=[]
outputfile = "output-{}.mp4".format(os.path.basename(args.video)[:-4])
if args.video is not None: if args.video is not None:
try: try:
videoObj = cv.VideoCapture(args.video) videoObj = cv.VideoCapture(args.video)
@@ -67,62 +82,85 @@ class TrafficApp(object):
print('Frames-{},Height-{}, Width-{}'.format(totalFrames,imgH,imgW)) print('Frames-{},Height-{}, Width-{}'.format(totalFrames,imgH,imgW))


if self.args.saveoutput and (imgH > 0 and imgW > 0): if self.args.saveoutput and (imgH > 0 and imgW > 0):
self.vid_writer = cv.VideoWriter(self.args.outputfile,
self.vid_writer = cv.VideoWriter(outputfile,
cv.VideoWriter_fourcc(*"MJPG"), 30, cv.VideoWriter_fourcc(*"MJPG"), 30,
(round(imgW),round(imgH))) (round(imgW),round(imgH)))
progress_bar=tqdm(total = totalFrames)
# start reading frame # start reading frame
while True:
grabbed, frame = videoObj.read()
while True :
grabbed1, frame1 = videoObj.read()
grabbed2, frame2 = videoObj.read()
frame1_id+=1
frame2_id+=2


#frame[:,450:,:] = 0 #frame[:,450:,:] = 0
# end of frame # end of frame
if not grabbed:
if not grabbed1:
break break
frame_count +=1 frame_count +=1

#print('Frame_count-',frame_count) #print('Frame_count-',frame_count)
#Use jump argument to skip frames. #Use jump argument to skip frames.
if (frame_count % self.args.jump == 0):
# get object detection on this frame
img_objectMarking, boxes, confidences, classids, idxs,status = self.objectDetection.run_object_detection(frame.copy(),imageH=imgH,imageW=imgW)
'''Assign Trcakers'''
object_detect_info = [boxes, confidences, classids, idxs, status]
bbox_labels_tracking = self.parseObjDetectInfo(object_detect_info)
TrackerManager.FrameCount = frame_count
TrackerManager.manageTracker(bbox_labels_tracking)

''' Get Parking Status'''
if PARAMS._ALGO_MODE_PARKING:
self.parkingDetection.getParkingStatus(TrackerManager.TrackerList)

'''Filter ROIs for Number Plate Detection'''
tentative_numberplate_rios = self.objectDetection.filterRoiforNumberPlate(boxes, classids, idxs)


''' Get Number Plate ROI'''
detected_np_info = self.numberPlateDetection.run_number_plate_detection_rois(image=frame.copy(),rois=tentative_numberplate_rios)


''' Get Number plate OCR '''
number_plate_ocr_dict = get_number_plate_ocr_from_rois(frame.copy(),detected_np_info, False)

#Display frame
displayFrame = self.displayFrame(frame.copy(),detected_np_info,number_plate_ocr_dict,object_detect_info)

winName = 'YOLOV3 Object Detection'
cv.namedWindow(winName, cv.WINDOW_NORMAL)
#cv.imshow(winName, displayFrame)
#cv.resizeWindow('objectDetection',680,420)
if self.vid_writer:
self.vid_writer.write(displayFrame.astype(np.uint8))
c = cv.waitKey(1)
if c & 0xFF == ord('q'):
self.vid_writer.release()
videoObj.release()
break
progress_bar.close()
# get object detection on this frame
objectDetectionProcess1=mp.Process(name='Object Detection Process 1',target=self.objectDetection.run_object_detection, args=(frame1.copy(),frame1_id,imgH,imgW,vehicleDetectionQueue))
objectDetectionProcess2=mp.Process(name='Object Detection Process 2',target=self.objectDetection.run_object_detection, args=(frame2.copy(),frame2_id,imgH,imgW,vehicleDetectionQueue))
objectDetectionProcess1.start()
objectDetectionProcess2.start()

print(f'{objectDetectionProcess1.name},{objectDetectionProcess1.pid},\n' )
print(f'{objectDetectionProcess2.name},{objectDetectionProcess2.pid} \n' )

#print(f'Vehicle detection Queue size = {vehicleDetectionQueue.qsize()}')
#img_objectMarking, boxes, confidences, classids, idxs,status = self.objectDetection.run_object_detection(frame.copy(),imageH=imgH,imageW=imgW)
img, frameId, boxes, confidences, classids, idxs,status = vehicleDetectionQueue.get()
#append the frames and frameid.
framesList.append((img,frameId))
print(f'frames stored = {len(framesList)} \n')
'''Assign Trackers'''
object_detect_info = [boxes, confidences, classids, idxs, status]
bbox_labels_tracking = self.parseObjDetectInfo(object_detect_info)
TrackerManager.FrameCount = frame_count
TrackerManager.manageTracker(bbox_labels_tracking)

''' Get Parking Status'''
if PARAMS._ALGO_MODE_PARKING:
self.parkingDetection.getParkingStatus(TrackerManager.TrackerList)

'''Filter ROIs for Number Plate Detection'''
tentative_numberplate_rios = self.objectDetection.filterRoiforNumberPlate(boxes, classids, idxs)


''' Get Number Plate ROI'''
detected_np_info = self.numberPlateDetection.run_number_plate_detection_rois(image=img,rois=tentative_numberplate_rios)


''' Get Number plate OCR '''
number_plate_ocr_dict = get_number_plate_ocr_from_rois(img,detected_np_info, False)

objectDetectionProcess1.join()
objectDetectionProcess2.join()

#Display frame
displayFrame = self.displayFrame(img,detected_np_info,number_plate_ocr_dict,object_detect_info)
# ISSUE how to kil the processes? New processes spawn on every iteration.
print(f'objectDetectionProcess1 is alive = {objectDetectionProcess1.is_alive()}\n')
print(f'objectDetectionProcess2 is alive = {objectDetectionProcess2.is_alive()}\n')
print("+++++++++++++++++++end of cycle++++++++++++++++++")

#winName = 'YOLOV3 Object Detection'
#cv.namedWindow(winName, cv.WINDOW_NORMAL)
#cv.imshow(winName, displayFrame)
#cv.resizeWindow('objectDetection',680,420)
if self.vid_writer:
self.vid_writer.write(displayFrame.astype(np.uint8))
c = cv.waitKey(1)
if c & 0xFF == ord('q'):
self.vid_writer.release()
videoObj.release()
break
def parseObjDetectInfo(self,object_roi_info): def parseObjDetectInfo(self,object_roi_info):
boxes, confidences, classids, idxs, status = object_roi_info boxes, confidences, classids, idxs, status = object_roi_info
#[[list of bbox ][list of conf and labels]] #[[list of bbox ][list of conf and labels]]
@@ -247,7 +285,7 @@ class TrafficApp(object):


if __name__ == '__main__': if __name__ == '__main__':


import cProfile, pstats
import cProfile
app_profiler = cProfile.Profile() app_profiler = cProfile.Profile()


parser = argparse.ArgumentParser(description='BitSilica Traffic Analysis Solution') parser = argparse.ArgumentParser(description='BitSilica Traffic Analysis Solution')
@@ -256,12 +294,14 @@ if __name__ == '__main__':
parser.add_argument('--realtime', help='Camera Connected Input') parser.add_argument('--realtime', help='Camera Connected Input')
parser.add_argument('--target', type=str,default = 'CPU',help='Target for CNN to run') parser.add_argument('--target', type=str,default = 'CPU',help='Target for CNN to run')
parser.add_argument('--saveoutput',type=bool,default=True, help='save video or not') parser.add_argument('--saveoutput',type=bool,default=True, help='save video or not')
parser.add_argument('--outputfile',type=str,default='./result.avi', help='save video path')
parser.add_argument('--debug',type=bool,default=False, help='print time taken by function')
parser.add_argument('--jump',type=int,default=1,help='integer value for jumping frames')
parser.add_argument('--debug',type=bool,default=False, help='print time taken by function')
args = parser.parse_args() args = parser.parse_args()
#enable profiler here.
app_profiler.enable() app_profiler.enable()

app = TrafficApp(args = args) app = TrafficApp(args = args)

#disable profiler here.
app_profiler.disable() app_profiler.disable()
profile_name = str('profile_info-{}.prof'.format(args.jump))
profile_name = str('{}.prof'.format(os.path.basename(args.video)[0:-4]))
app_profiler.dump_stats(profile_name) app_profiler.dump_stats(profile_name)

Loading…
Cancel
Save