ソースを参照

Sets up code for generating metrics and profiles for deciding sampling rate.

refactoring
Atmadeep Arya 3年前
コミット
457844fcdc
3個のファイルの変更154行の追加10行の削除
  1. +121
    -0
      .gitignore
  2. +11
    -0
      src/schedule_1.py
  3. +22
    -10
      src/trafficApp.py

+ 121
- 0
.gitignore ファイルの表示

@@ -0,0 +1,121 @@
# Django #
*.log
*.pot
*.pyc
__pycache__
db.sqlite3
media

# Backup files #
*.bak

# If you are using PyCharm #
.idea/**/workspace.xml
.idea/**/tasks.xml
.idea/dictionaries
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.xml
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml
.idea/**/gradle.xml
.idea/**/libraries
*.iws /out/

# Python #
*.py[cod]
*$py.class

# Distribution / packaging
.Python build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
.pytest_cache/
nosetests.xml
coverage.xml
*.cover
.hypothesis/

# Jupyter Notebook
.ipynb_checkpoints

# pyenv
.python-version

# celery
celerybeat-schedule.*

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

# mkdocs documentation
/site

# mypy
.mypy_cache/

# Sublime Text #
*.tmlanguage.cache
*.tmPreferences.cache
*.stTheme.cache
*.sublime-workspace
*.sublime-project

# sftp configuration file
sftp-config.json

# Package control specific files Package
Control.last-run
Control.ca-list
Control.ca-bundle
Control.system-ca-bundle
GitHub.sublime-settings

# Visual Studio Code #
.vscode/*
!.vscode/settings.json
!.vscode/tasks.json
!.vscode/launch.json
!.vscode/extensions.json
.history
.vscode
.avi
.mp4
.xvid
.divx

+ 11
- 0
src/schedule_1.py ファイルの表示

@@ -0,0 +1,11 @@
import os
import argparse

parser = argparse.ArgumentParser(description="Job scheduler for sampling decision")
parser.add_argument("--jump",type=int,default=1,help = 'integer value for jumping frames')
args = parser.parse_args()
for i in range (1,11):
videoname = "sample_video_{}.mp4".format(i)
command = "python src/trafficApp.py --video ./resources/{} --saveoutput true --jump {} ".format(videoname,args.jump)
os.system(command)

+ 22
- 10
src/trafficApp.py ファイルの表示

@@ -1,11 +1,12 @@
import argparse
import ntpath
import cv2 as cv
import numpy as np
from tqdm import tqdm
import os
os.environ['DISPLAY'] = ':0'


from config.config import PARAMS
from src.numberPlateRoiDetection import NumberPlateROIDetection
from src.objectDetection import ObjectDetection
@@ -16,7 +17,7 @@ from src.trackingManager import TrackerManager
class TrafficApp(object):
def __init__(self,args):
self.args = args
#get Object Detection Up
self.objectDetection = ObjectDetection(debug=args.debug,target=args.target)
self.numberPlateDetection = NumberPlateROIDetection(args= args,algoType='NumberPlate')
@@ -31,6 +32,8 @@ class TrafficApp(object):

def runVideoFlow(self):
frame_count = 0
successfulDetections=0

if args.video is not None:
try:
videoObj = cv.VideoCapture(args.video)
@@ -67,11 +70,11 @@ class TrafficApp(object):
print('Frames-{},Height-{}, Width-{}'.format(totalFrames,imgH,imgW))

if self.args.saveoutput and (imgH > 0 and imgW > 0):
self.vid_writer = cv.VideoWriter(self.args.outputfile,
cv.VideoWriter_fourcc(*"MJPG"), 30,
(round(imgW),round(imgH)))
outputfile = "output-{}-{}.mp4".format(ntpath.basename(args.video)[0:-4],args.jump)
self.vid_writer = cv.VideoWriter(outputfile,cv.VideoWriter_fourcc(*"MJPG"), 30,(round(imgW),round(imgH)))
progress_bar=tqdm(total = totalFrames)
total_frame_processed = 0
# start reading frame
while True:
grabbed, frame = videoObj.read()
@@ -80,11 +83,10 @@ class TrafficApp(object):
if not grabbed:
break
frame_count +=1

#print('Frame_count-',frame_count)
#Use jump argument to skip frames.
if (frame_count % self.args.jump == 0):
total_frame_processed+=1
# get object detection on this frame
img_objectMarking, boxes, confidences, classids, idxs,status = self.objectDetection.run_object_detection(frame.copy(),imageH=imgH,imageW=imgW)
'''Assign Trcakers'''
@@ -108,11 +110,15 @@ class TrafficApp(object):
''' Get Number plate OCR '''
number_plate_ocr_dict = get_number_plate_ocr_from_rois(frame.copy(),detected_np_info, False)

if any(x > PARAMS._YOLOV3_OD_CONFIDENCE for x in confidences):
successfulDetections+=1

#Display frame
displayFrame = self.displayFrame(frame.copy(),detected_np_info,number_plate_ocr_dict,object_detect_info)

winName = 'YOLOV3 Object Detection'
cv.namedWindow(winName, cv.WINDOW_NORMAL)
#cv.namedWindow(winName, cv.WINDOW_NORMAL)
#cv.imshow(winName, displayFrame)
#cv.resizeWindow('objectDetection',680,420)
if self.vid_writer:
@@ -123,6 +129,13 @@ class TrafficApp(object):
videoObj.release()
break
progress_bar.close()
with open("{}-{}-metric.txt".format(ntpath.basename(args.video)[0:-4],args.jump),"w") as f:
line = "Successful detection = {} total_frame_processed = {}".format(successfulDetections,total_frame_processed)
f.write(line)
f.close()


def parseObjDetectInfo(self,object_roi_info):
boxes, confidences, classids, idxs, status = object_roi_info
#[[list of bbox ][list of conf and labels]]
@@ -256,12 +269,11 @@ if __name__ == '__main__':
parser.add_argument('--realtime', help='Camera Connected Input')
parser.add_argument('--target', type=str,default = 'CPU',help='Target for CNN to run')
parser.add_argument('--saveoutput',type=bool,default=True, help='save video or not')
parser.add_argument('--outputfile',type=str,default='./result.avi', help='save video path')
parser.add_argument('--debug',type=bool,default=False, help='print time taken by function')
parser.add_argument('--jump',type=int,default=1,help='integer value for jumping frames')
args = parser.parse_args()
app_profiler.enable()
app = TrafficApp(args = args)
app_profiler.disable()
profile_name = str('profile_info-{}.prof'.format(args.jump))
profile_name = str('{}-{}.prof'.format(ntpath.basename(args.video)[0:-4],args.jump))
app_profiler.dump_stats(profile_name)

読み込み中…
キャンセル
保存