.
1
Result_yolo3_fault_4/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
result_otros/
|
||||
|
Before Width: | Height: | Size: 93 KiB |
|
Before Width: | Height: | Size: 101 KiB |
|
Before Width: | Height: | Size: 108 KiB |
|
Before Width: | Height: | Size: 113 KiB |
|
Before Width: | Height: | Size: 112 KiB |
|
Before Width: | Height: | Size: 65 KiB |
|
Before Width: | Height: | Size: 92 KiB |
|
Before Width: | Height: | Size: 94 KiB |
|
Before Width: | Height: | Size: 50 KiB |
|
Before Width: | Height: | Size: 75 KiB |
|
Before Width: | Height: | Size: 77 KiB |
|
Before Width: | Height: | Size: 58 KiB |
|
Before Width: | Height: | Size: 50 KiB |
|
Before Width: | Height: | Size: 95 KiB |
|
Before Width: | Height: | Size: 87 KiB |
|
Before Width: | Height: | Size: 98 KiB |
|
Before Width: | Height: | Size: 95 KiB |
|
Before Width: | Height: | Size: 101 KiB |
|
Before Width: | Height: | Size: 98 KiB |
|
Before Width: | Height: | Size: 74 KiB |
|
Before Width: | Height: | Size: 108 KiB |
|
Before Width: | Height: | Size: 94 KiB |
|
Before Width: | Height: | Size: 101 KiB |
|
Before Width: | Height: | Size: 105 KiB |
|
Before Width: | Height: | Size: 96 KiB |
|
Before Width: | Height: | Size: 108 KiB |
|
Before Width: | Height: | Size: 49 KiB |
|
Before Width: | Height: | Size: 93 KiB |
|
Before Width: | Height: | Size: 97 KiB |
|
Before Width: | Height: | Size: 101 KiB |
|
Before Width: | Height: | Size: 114 KiB |
|
Before Width: | Height: | Size: 80 KiB |
|
Before Width: | Height: | Size: 89 KiB |
|
Before Width: | Height: | Size: 107 KiB |
|
Before Width: | Height: | Size: 92 KiB |
BIN
keras-yolo3-master/utils/__pycache__/__init__.cpython-36.pyc
Executable file → Normal file
BIN
keras-yolo3-master/utils/__pycache__/colors.cpython-36.pyc
Executable file → Normal file
@@ -16,18 +16,19 @@ import numpy as np
|
||||
|
||||
|
||||
def _main_(args):
|
||||
|
||||
config_path = args.conf
|
||||
input_path = args.input
|
||||
output_path = args.output
|
||||
|
||||
with open(config_path) as config_buffer:
|
||||
with open(config_path) as config_buffer:
|
||||
config = json.load(config_buffer)
|
||||
|
||||
makedirs(output_path)
|
||||
|
||||
###############################
|
||||
# Set some parameter
|
||||
###############################
|
||||
###############################
|
||||
net_h, net_w = 416, 416 # a multiple of 32, the smaller the faster
|
||||
obj_thresh, nms_thresh = 0.5, 0.45
|
||||
|
||||
@@ -38,7 +39,7 @@ def _main_(args):
|
||||
infer_model = load_model(config['train']['saved_weights_name'])
|
||||
|
||||
###############################
|
||||
# Predict bounding boxes
|
||||
# Predict bounding boxes
|
||||
###############################
|
||||
if 'webcam' in input_path: # do detection on the first webcam
|
||||
video_reader = cv2.VideoCapture(0)
|
||||
@@ -54,13 +55,13 @@ def _main_(args):
|
||||
batch_boxes = get_yolo_boxes(infer_model, images, net_h, net_w, config['model']['anchors'], obj_thresh, nms_thresh)
|
||||
|
||||
for i in range(len(images)):
|
||||
draw_boxes(images[i], batch_boxes[i], config['model']['labels'], obj_thresh)
|
||||
draw_boxes(images[i], batch_boxes[i], config['model']['labels'], obj_thresh)
|
||||
cv2.imshow('video with bboxes', images[i])
|
||||
images = []
|
||||
if cv2.waitKey(1) == 27:
|
||||
if cv2.waitKey(1) == 27:
|
||||
break # esc to quit
|
||||
cv2.destroyAllWindows()
|
||||
elif input_path[-4:] == '.mp4': # do detection on a video
|
||||
cv2.destroyAllWindows()
|
||||
elif input_path[-4:] == '.mp4': # do detection on a video
|
||||
video_out = output_path + input_path.split('/')[-1]
|
||||
video_reader = cv2.VideoCapture(input_path)
|
||||
|
||||
@@ -69,8 +70,8 @@ def _main_(args):
|
||||
frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||
|
||||
video_writer = cv2.VideoWriter(video_out,
|
||||
cv2.VideoWriter_fourcc(*'MPEG'),
|
||||
50.0,
|
||||
cv2.VideoWriter_fourcc(*'MPEG'),
|
||||
50.0,
|
||||
(frame_w, frame_h))
|
||||
# the main loop
|
||||
batch_size = 1
|
||||
@@ -89,27 +90,27 @@ def _main_(args):
|
||||
|
||||
for i in range(len(images)):
|
||||
# draw bounding boxes on the image using labels
|
||||
draw_boxes(images[i], batch_boxes[i], config['model']['labels'], obj_thresh)
|
||||
draw_boxes(images[i], batch_boxes[i], config['model']['labels'], obj_thresh)
|
||||
|
||||
# show the video with detection bounding boxes
|
||||
if show_window: cv2.imshow('video with bboxes', images[i])
|
||||
# show the video with detection bounding boxes
|
||||
if show_window: cv2.imshow('video with bboxes', images[i])
|
||||
|
||||
# write result to the output video
|
||||
video_writer.write(images[i])
|
||||
video_writer.write(images[i])
|
||||
images = []
|
||||
|
||||
if show_window and cv2.waitKey(1) == 27: break # esc to quit
|
||||
|
||||
if show_window: cv2.destroyAllWindows()
|
||||
video_reader.release()
|
||||
video_writer.release()
|
||||
video_writer.release()
|
||||
else: # do detection on an image or a set of images
|
||||
|
||||
|
||||
|
||||
image_paths = []
|
||||
|
||||
if os.path.isdir(input_path):
|
||||
if os.path.isdir(input_path):
|
||||
for inp_file in os.listdir(input_path):
|
||||
image_paths += [input_path + inp_file]
|
||||
else:
|
||||
@@ -129,20 +130,20 @@ def _main_(args):
|
||||
print('Elapsed time = {}'.format(time.time() - start))
|
||||
times.append(time.time() - start)
|
||||
# draw bounding boxes on the image using labels
|
||||
draw_boxes(image, boxes, config['model']['labels'], obj_thresh)
|
||||
|
||||
draw_boxes(image, boxes, config['model']['labels'], obj_thresh)
|
||||
|
||||
# write the image with bounding boxes to file
|
||||
cv2.imwrite(output_path + image_path.split('/')[-1], np.uint8(image))
|
||||
|
||||
cv2.imwrite(output_path + image_path.split('/')[-1], np.uint8(image))
|
||||
|
||||
file = open(args.output + '/time.txt','w')
|
||||
file.write('Tiempo promedio:' + str(np.mean(times)))
|
||||
file.close()
|
||||
file.close()
|
||||
|
||||
if __name__ == '__main__':
|
||||
argparser = argparse.ArgumentParser(description='Predict with a trained yolo model')
|
||||
argparser.add_argument('-c', '--conf', help='path to configuration file')
|
||||
argparser.add_argument('-i', '--input', help='path to an image, a directory of images, a video, or webcam')
|
||||
argparser.add_argument('-o', '--output', default='output/', help='path to output directory')
|
||||
|
||||
argparser.add_argument('-i', '--input', help='path to an image, a directory of images, a video, or webcam')
|
||||
argparser.add_argument('-o', '--output', default='output/', help='path to output directory')
|
||||
|
||||
args = argparser.parse_args()
|
||||
_main_(args)
|
||||
|
||||