|
@@ -1,12 +1,11 @@ |
|
|
import argparse |
|
|
import argparse |
|
|
import ntpath |
|
|
|
|
|
|
|
|
|
|
|
import cv2 as cv |
|
|
import cv2 as cv |
|
|
import numpy as np |
|
|
import numpy as np |
|
|
from tqdm import tqdm |
|
|
from tqdm import tqdm |
|
|
import os |
|
|
import os |
|
|
os.environ['DISPLAY'] = ':0' |
|
|
os.environ['DISPLAY'] = ':0' |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from config.config import PARAMS |
|
|
from config.config import PARAMS |
|
|
from src.numberPlateRoiDetection import NumberPlateROIDetection |
|
|
from src.numberPlateRoiDetection import NumberPlateROIDetection |
|
|
from src.objectDetection import ObjectDetection |
|
|
from src.objectDetection import ObjectDetection |
|
@@ -17,7 +16,7 @@ from src.trackingManager import TrackerManager |
|
|
class TrafficApp(object): |
|
|
class TrafficApp(object): |
|
|
def __init__(self,args): |
|
|
def __init__(self,args): |
|
|
self.args = args |
|
|
self.args = args |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#get Object Detection Up |
|
|
#get Object Detection Up |
|
|
self.objectDetection = ObjectDetection(debug=args.debug,target=args.target) |
|
|
self.objectDetection = ObjectDetection(debug=args.debug,target=args.target) |
|
|
self.numberPlateDetection = NumberPlateROIDetection(args= args,algoType='NumberPlate') |
|
|
self.numberPlateDetection = NumberPlateROIDetection(args= args,algoType='NumberPlate') |
|
@@ -32,8 +31,6 @@ class TrafficApp(object): |
|
|
|
|
|
|
|
|
def runVideoFlow(self): |
|
|
def runVideoFlow(self): |
|
|
frame_count = 0 |
|
|
frame_count = 0 |
|
|
successfulDetections=0 |
|
|
|
|
|
|
|
|
|
|
|
if args.video is not None: |
|
|
if args.video is not None: |
|
|
try: |
|
|
try: |
|
|
videoObj = cv.VideoCapture(args.video) |
|
|
videoObj = cv.VideoCapture(args.video) |
|
@@ -70,11 +67,11 @@ class TrafficApp(object): |
|
|
print('Frames-{},Height-{}, Width-{}'.format(totalFrames,imgH,imgW)) |
|
|
print('Frames-{},Height-{}, Width-{}'.format(totalFrames,imgH,imgW)) |
|
|
|
|
|
|
|
|
if self.args.saveoutput and (imgH > 0 and imgW > 0): |
|
|
if self.args.saveoutput and (imgH > 0 and imgW > 0): |
|
|
outputfile = "output-{}-{}.mp4".format(ntpath.basename(args.video)[0:-4],args.jump) |
|
|
|
|
|
self.vid_writer = cv.VideoWriter(outputfile,cv.VideoWriter_fourcc(*"MJPG"), 30,(round(imgW),round(imgH))) |
|
|
|
|
|
|
|
|
self.vid_writer = cv.VideoWriter(self.args.outputfile, |
|
|
|
|
|
cv.VideoWriter_fourcc(*"MJPG"), 30, |
|
|
|
|
|
(round(imgW),round(imgH))) |
|
|
|
|
|
|
|
|
progress_bar=tqdm(total = totalFrames) |
|
|
progress_bar=tqdm(total = totalFrames) |
|
|
total_frame_processed = 0 |
|
|
|
|
|
# start reading frame |
|
|
# start reading frame |
|
|
while True: |
|
|
while True: |
|
|
grabbed, frame = videoObj.read() |
|
|
grabbed, frame = videoObj.read() |
|
@@ -83,10 +80,11 @@ class TrafficApp(object): |
|
|
if not grabbed: |
|
|
if not grabbed: |
|
|
break |
|
|
break |
|
|
frame_count +=1 |
|
|
frame_count +=1 |
|
|
|
|
|
|
|
|
#print('Frame_count-',frame_count) |
|
|
#print('Frame_count-',frame_count) |
|
|
#Use jump argument to skip frames. |
|
|
#Use jump argument to skip frames. |
|
|
if (frame_count % self.args.jump == 0): |
|
|
if (frame_count % self.args.jump == 0): |
|
|
total_frame_processed+=1 |
|
|
|
|
|
|
|
|
|
|
|
# get object detection on this frame |
|
|
# get object detection on this frame |
|
|
img_objectMarking, boxes, confidences, classids, idxs,status = self.objectDetection.run_object_detection(frame.copy(),imageH=imgH,imageW=imgW) |
|
|
img_objectMarking, boxes, confidences, classids, idxs,status = self.objectDetection.run_object_detection(frame.copy(),imageH=imgH,imageW=imgW) |
|
|
'''Assign Trcakers''' |
|
|
'''Assign Trcakers''' |
|
@@ -110,18 +108,11 @@ class TrafficApp(object): |
|
|
''' Get Number plate OCR ''' |
|
|
''' Get Number plate OCR ''' |
|
|
number_plate_ocr_dict = get_number_plate_ocr_from_rois(frame.copy(),detected_np_info, False) |
|
|
number_plate_ocr_dict = get_number_plate_ocr_from_rois(frame.copy(),detected_np_info, False) |
|
|
|
|
|
|
|
|
if any(x > PARAMS._YOLOV3_OD_CONFIDENCE for x in confidences): |
|
|
|
|
|
successfulDetections+=1 |
|
|
|
|
|
<<<<<<< Updated upstream |
|
|
|
|
|
|
|
|
|
|
|
======= |
|
|
|
|
|
>>>>>>> Stashed changes |
|
|
|
|
|
#Display frame |
|
|
#Display frame |
|
|
displayFrame = self.displayFrame(frame.copy(),detected_np_info,number_plate_ocr_dict,object_detect_info) |
|
|
displayFrame = self.displayFrame(frame.copy(),detected_np_info,number_plate_ocr_dict,object_detect_info) |
|
|
|
|
|
|
|
|
winName = 'YOLOV3 Object Detection' |
|
|
winName = 'YOLOV3 Object Detection' |
|
|
|
|
|
|
|
|
#cv.namedWindow(winName, cv.WINDOW_NORMAL) |
|
|
|
|
|
|
|
|
cv.namedWindow(winName, cv.WINDOW_NORMAL) |
|
|
#cv.imshow(winName, displayFrame) |
|
|
#cv.imshow(winName, displayFrame) |
|
|
#cv.resizeWindow('objectDetection',680,420) |
|
|
#cv.resizeWindow('objectDetection',680,420) |
|
|
if self.vid_writer: |
|
|
if self.vid_writer: |
|
@@ -132,19 +123,6 @@ class TrafficApp(object): |
|
|
videoObj.release() |
|
|
videoObj.release() |
|
|
break |
|
|
break |
|
|
progress_bar.close() |
|
|
progress_bar.close() |
|
|
<<<<<<< Updated upstream |
|
|
|
|
|
|
|
|
|
|
|
======= |
|
|
|
|
|
>>>>>>> Stashed changes |
|
|
|
|
|
with open("{}-{}-metric.txt".format(ntpath.basename(args.video)[0:-4],args.jump),"w") as f: |
|
|
|
|
|
line = "Successful detection = {} total_frame_processed = {}".format(successfulDetections,total_frame_processed) |
|
|
|
|
|
f.write(line) |
|
|
|
|
|
f.close() |
|
|
|
|
|
|
|
|
|
|
|
<<<<<<< Updated upstream |
|
|
|
|
|
|
|
|
|
|
|
======= |
|
|
|
|
|
>>>>>>> Stashed changes |
|
|
|
|
|
def parseObjDetectInfo(self,object_roi_info): |
|
|
def parseObjDetectInfo(self,object_roi_info): |
|
|
boxes, confidences, classids, idxs, status = object_roi_info |
|
|
boxes, confidences, classids, idxs, status = object_roi_info |
|
|
#[[list of bbox ][list of conf and labels]] |
|
|
#[[list of bbox ][list of conf and labels]] |
|
@@ -271,27 +249,19 @@ if __name__ == '__main__': |
|
|
|
|
|
|
|
|
import cProfile, pstats |
|
|
import cProfile, pstats |
|
|
app_profiler = cProfile.Profile() |
|
|
app_profiler = cProfile.Profile() |
|
|
<<<<<<< Updated upstream |
|
|
|
|
|
|
|
|
|
|
|
======= |
|
|
|
|
|
part_files = ["{}.{}".format(i,'mp4') for i in range(1,num_processes+1)] |
|
|
|
|
|
>>>>>>> Stashed changes |
|
|
|
|
|
parser = argparse.ArgumentParser(description='BitSilica Traffic Analysis Solution') |
|
|
parser = argparse.ArgumentParser(description='BitSilica Traffic Analysis Solution') |
|
|
parser.add_argument('--image', help=' Full Path to image file.') |
|
|
parser.add_argument('--image', help=' Full Path to image file.') |
|
|
parser.add_argument('--video', help='Full Path to video file.') |
|
|
parser.add_argument('--video', help='Full Path to video file.') |
|
|
parser.add_argument('--realtime', help='Camera Connected Input') |
|
|
parser.add_argument('--realtime', help='Camera Connected Input') |
|
|
parser.add_argument('--target', type=str,default = 'CPU',help='Target for CNN to run') |
|
|
parser.add_argument('--target', type=str,default = 'CPU',help='Target for CNN to run') |
|
|
parser.add_argument('--saveoutput',type=bool,default=True, help='save video or not') |
|
|
parser.add_argument('--saveoutput',type=bool,default=True, help='save video or not') |
|
|
|
|
|
parser.add_argument('--outputfile',type=str,default='./result.avi', help='save video path') |
|
|
parser.add_argument('--debug',type=bool,default=False, help='print time taken by function') |
|
|
parser.add_argument('--debug',type=bool,default=False, help='print time taken by function') |
|
|
parser.add_argument('--jump',type=int,default=1,help='integer value for jumping frames') |
|
|
parser.add_argument('--jump',type=int,default=1,help='integer value for jumping frames') |
|
|
parser.add_argument('--cores',type=int,default=1,help='enables usage of multiple cores.') |
|
|
|
|
|
args = parser.parse_args() |
|
|
args = parser.parse_args() |
|
|
app_profiler.enable() |
|
|
app_profiler.enable() |
|
|
app = TrafficApp(args = args) |
|
|
app = TrafficApp(args = args) |
|
|
app_profiler.disable() |
|
|
app_profiler.disable() |
|
|
<<<<<<< Updated upstream |
|
|
|
|
|
profile_name = str('{}-{}.prof'.format(ntpath.basename(args.video)[0:-4],args.jump)) |
|
|
|
|
|
======= |
|
|
|
|
|
profile_name = str('{}-{}-{}.prof'.format(ntpath.basename(args.video)[:-4],args.jump,rgs.cores)) |
|
|
|
|
|
>>>>>>> Stashed changes |
|
|
|
|
|
|
|
|
profile_name = str('profile_info-{}.prof'.format(args.jump)) |
|
|
app_profiler.dump_stats(profile_name) |
|
|
app_profiler.dump_stats(profile_name) |