change panel disconnect
This commit is contained in:
3
Result_yolo3_panel/.gitignore
vendored
3
Result_yolo3_panel/.gitignore
vendored
@@ -4,7 +4,8 @@ Result_Prueba/
|
||||
Prueba/
|
||||
Otros_2/
|
||||
Otros/
|
||||
Result_Prueba_disconnect
|
||||
|
||||
|
||||
*.jpg
|
||||
*.h5
|
||||
*.pkl
|
||||
|
||||
BIN
Result_yolo3_panel/Mision 11_DJI_0058.jpg
Normal file
BIN
Result_yolo3_panel/Mision 11_DJI_0058.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 100 KiB |
BIN
Result_yolo3_panel/Mision 3_DJI_0045.jpg
Normal file
BIN
Result_yolo3_panel/Mision 3_DJI_0045.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 198 KiB |
BIN
__pycache__/panel_disconnect.cpython-37.pyc
Normal file
BIN
__pycache__/panel_disconnect.cpython-37.pyc
Normal file
Binary file not shown.
Binary file not shown.
@@ -8,34 +8,107 @@ Created on Tue Mar 17 13:55:42 2020
|
||||
import numpy as np
|
||||
|
||||
def disconnect(image, boxes, obj_thresh = 0.5, area_min = 400, merge = 0, z_thresh = 1.8):
|
||||
|
||||
|
||||
new_boxes = []
|
||||
for num, box in enumerate(boxes):
|
||||
|
||||
|
||||
xmin = box.xmin + merge
|
||||
xmax = box.xmax - merge
|
||||
ymin = box.ymin + merge
|
||||
ymax = box.ymax - merge
|
||||
|
||||
|
||||
if xmin > 0 and ymin > 0 and xmax < image.shape[1] and ymax < image.shape[0] and box.get_score() > obj_thresh:
|
||||
|
||||
|
||||
area = (ymax - ymin)*(xmax - xmin)
|
||||
z_score = np.sum(image[np.int(ymin):np.int(ymax), np.int(xmin):np.int(xmax)]) / area
|
||||
|
||||
|
||||
if area > area_min:
|
||||
|
||||
|
||||
box.z_score = z_score
|
||||
new_boxes.append(box)
|
||||
#boxes_area_score[str(num)] = {'xmin': xmin, 'xmax': xmax, 'ymin': ymin, 'ymax': ymax, 'score' : score, 'area' : area}
|
||||
|
||||
|
||||
mean_score = np.mean([box.z_score for box in new_boxes])
|
||||
sd_score = np.std([box.z_score for box in new_boxes])
|
||||
|
||||
|
||||
new_boxes = [box for box in new_boxes if (box.z_score - mean_score)/sd_score > z_thresh]
|
||||
|
||||
|
||||
for box in new_boxes:
|
||||
|
||||
|
||||
z_score = (box.z_score - mean_score)/sd_score
|
||||
box.classes[0] = min((z_score-z_thresh)*0.5/(3-z_thresh)+ 0.5, 1)
|
||||
|
||||
return new_boxes
|
||||
box.score = -1
|
||||
return new_boxes
|
||||
|
||||
|
||||
|
||||
|
||||
def disconnect_plot(image, boxes, obj_thresh = 0.5, area_min = 400, merge = 0, z_thresh = 1.8):
|
||||
|
||||
new_boxes = []
|
||||
for num, box in enumerate(boxes):
|
||||
|
||||
xmin = box.xmin + merge
|
||||
xmax = box.xmax - merge
|
||||
ymin = box.ymin + merge
|
||||
ymax = box.ymax - merge
|
||||
|
||||
if xmin > 0 and ymin > 0 and xmax < image.shape[1] and ymax < image.shape[0] and box.get_score() > obj_thresh:
|
||||
|
||||
area = (ymax - ymin)*(xmax - xmin)
|
||||
z_score = np.sum(image[np.int(ymin):np.int(ymax), np.int(xmin):np.int(xmax)]) / area
|
||||
|
||||
if area > area_min:
|
||||
|
||||
box.z_score = z_score
|
||||
new_boxes.append(box)
|
||||
#boxes_area_score[str(num)] = {'xmin': xmin, 'xmax': xmax, 'ymin': ymin, 'ymax': ymax, 'score' : score, 'area' : area}
|
||||
|
||||
mean_score = np.mean([box.z_score for box in new_boxes])
|
||||
sd_score = np.std([box.z_score for box in new_boxes])
|
||||
|
||||
normal_score = ([box.z_score for box in new_boxes] - mean_score)/sd_score
|
||||
# plt.figure()
|
||||
# _ = plt.hist(normal_score, bins='auto') # arguments are passed to np.histogram
|
||||
# plt.title("Histogram with 'auto' bins")
|
||||
# plt.show()
|
||||
#
|
||||
# plt.figure()
|
||||
# mean = np.mean([boxes_area_score[i]['area'] for i in boxes_area_score])
|
||||
# sd = np.std([boxes_area_score[i]['area'] for i in boxes_area_score])
|
||||
# normal = ([boxes_area_score[i]['area'] for i in boxes_area_score] - mean)/sd
|
||||
# _ = plt.hist(normal, bins='auto') # arguments are passed to np.histogram
|
||||
# plt.title("Histogram with 'auto' bins")
|
||||
# plt.show()
|
||||
|
||||
new_boxes = [box for box in new_boxes if (box.z_score - mean_score)/sd_score > z_thresh]
|
||||
|
||||
for box in new_boxes:
|
||||
|
||||
z_score = (box.z_score - mean_score)/sd_score
|
||||
box.classes[0] = min((z_score-z_thresh)*0.5/(3-z_thresh)+ 0.5, 1)
|
||||
|
||||
|
||||
|
||||
|
||||
colors = plt.cm.brg(np.linspace(0, 1, 21)).tolist()
|
||||
plt.figure(figsize=(10,6))
|
||||
plt.imshow(I,cmap = 'gray')
|
||||
current_axis = plt.gca()
|
||||
|
||||
for box in new_boxes:
|
||||
|
||||
color = colors[2]
|
||||
|
||||
#boxes_area_score[key]['score_norm'] = (boxes_area_score[key]['score'] - mean) / sd
|
||||
#z_score = (box.score - mean_score) / sd_score
|
||||
#z_score = (boxes_area_score[key]['area'] )
|
||||
|
||||
### Escribe el z-score
|
||||
#if z_score > 1:
|
||||
current_axis.text((box.xmin + box.xmax)/2,
|
||||
(box.ymin+ box.ymax)/2,
|
||||
'%.2f' % box.classes[0], size='x-large',
|
||||
color='white', bbox={'facecolor':color, 'alpha':1.0})
|
||||
|
||||
return new_boxes
|
||||
|
||||
@@ -13,111 +13,9 @@ from utils.bbox import draw_boxes
|
||||
from tensorflow.keras.models import load_model
|
||||
from tqdm import tqdm
|
||||
import numpy as np
|
||||
from panel_disconnect import disconnect
|
||||
|
||||
|
||||
def disconnect(image, boxes, obj_thresh = 0.5, area_min = 400, merge = 0, z_thresh = 1.8):
|
||||
|
||||
new_boxes = []
|
||||
for num, box in enumerate(boxes):
|
||||
|
||||
xmin = box.xmin + merge
|
||||
xmax = box.xmax - merge
|
||||
ymin = box.ymin + merge
|
||||
ymax = box.ymax - merge
|
||||
|
||||
if xmin > 0 and ymin > 0 and xmax < image.shape[1] and ymax < image.shape[0] and box.get_score() > obj_thresh:
|
||||
|
||||
area = (ymax - ymin)*(xmax - xmin)
|
||||
z_score = np.sum(image[np.int(ymin):np.int(ymax), np.int(xmin):np.int(xmax)]) / area
|
||||
|
||||
if area > area_min:
|
||||
|
||||
box.z_score = z_score
|
||||
new_boxes.append(box)
|
||||
#boxes_area_score[str(num)] = {'xmin': xmin, 'xmax': xmax, 'ymin': ymin, 'ymax': ymax, 'score' : score, 'area' : area}
|
||||
|
||||
mean_score = np.mean([box.z_score for box in new_boxes])
|
||||
sd_score = np.std([box.z_score for box in new_boxes])
|
||||
|
||||
new_boxes = [box for box in new_boxes if (box.z_score - mean_score)/sd_score > z_thresh]
|
||||
|
||||
for box in new_boxes:
|
||||
|
||||
z_score = (box.z_score - mean_score)/sd_score
|
||||
box.classes[0] = min((z_score-z_thresh)*0.5/(3-z_thresh)+ 0.5, 1)
|
||||
|
||||
return new_boxes
|
||||
|
||||
|
||||
def disconnect_plot(image, boxes, obj_thresh = 0.5, area_min = 400, merge = 0, z_thresh = 1.8):
|
||||
|
||||
new_boxes = []
|
||||
for num, box in enumerate(boxes):
|
||||
|
||||
xmin = box.xmin + merge
|
||||
xmax = box.xmax - merge
|
||||
ymin = box.ymin + merge
|
||||
ymax = box.ymax - merge
|
||||
|
||||
if xmin > 0 and ymin > 0 and xmax < image.shape[1] and ymax < image.shape[0] and box.get_score() > obj_thresh:
|
||||
|
||||
area = (ymax - ymin)*(xmax - xmin)
|
||||
z_score = np.sum(image[np.int(ymin):np.int(ymax), np.int(xmin):np.int(xmax)]) / area
|
||||
|
||||
if area > area_min:
|
||||
|
||||
box.z_score = z_score
|
||||
new_boxes.append(box)
|
||||
#boxes_area_score[str(num)] = {'xmin': xmin, 'xmax': xmax, 'ymin': ymin, 'ymax': ymax, 'score' : score, 'area' : area}
|
||||
|
||||
mean_score = np.mean([box.z_score for box in new_boxes])
|
||||
sd_score = np.std([box.z_score for box in new_boxes])
|
||||
|
||||
normal_score = ([box.z_score for box in new_boxes] - mean_score)/sd_score
|
||||
# plt.figure()
|
||||
# _ = plt.hist(normal_score, bins='auto') # arguments are passed to np.histogram
|
||||
# plt.title("Histogram with 'auto' bins")
|
||||
# plt.show()
|
||||
#
|
||||
# plt.figure()
|
||||
# mean = np.mean([boxes_area_score[i]['area'] for i in boxes_area_score])
|
||||
# sd = np.std([boxes_area_score[i]['area'] for i in boxes_area_score])
|
||||
# normal = ([boxes_area_score[i]['area'] for i in boxes_area_score] - mean)/sd
|
||||
# _ = plt.hist(normal, bins='auto') # arguments are passed to np.histogram
|
||||
# plt.title("Histogram with 'auto' bins")
|
||||
# plt.show()
|
||||
|
||||
new_boxes = [box for box in new_boxes if (box.z_score - mean_score)/sd_score > z_thresh]
|
||||
|
||||
for box in new_boxes:
|
||||
|
||||
z_score = (box.z_score - mean_score)/sd_score
|
||||
box.classes[0] = min((z_score-z_thresh)*0.5/(3-z_thresh)+ 0.5, 1)
|
||||
|
||||
|
||||
|
||||
|
||||
colors = plt.cm.brg(np.linspace(0, 1, 21)).tolist()
|
||||
plt.figure(figsize=(10,6))
|
||||
plt.imshow(I,cmap = 'gray')
|
||||
current_axis = plt.gca()
|
||||
|
||||
for box in new_boxes:
|
||||
|
||||
color = colors[2]
|
||||
|
||||
#boxes_area_score[key]['score_norm'] = (boxes_area_score[key]['score'] - mean) / sd
|
||||
#z_score = (box.score - mean_score) / sd_score
|
||||
#z_score = (boxes_area_score[key]['area'] )
|
||||
|
||||
### Escribe el z-score
|
||||
#if z_score > 1:
|
||||
current_axis.text((box.xmin + box.xmax)/2,
|
||||
(box.ymin+ box.ymax)/2,
|
||||
'%.2f' % box.classes[0], size='x-large',
|
||||
color='white', bbox={'facecolor':color, 'alpha':1.0})
|
||||
|
||||
return new_boxes
|
||||
|
||||
def _main_(args):
|
||||
|
||||
@@ -134,7 +32,7 @@ def _main_(args):
|
||||
# Set some parameter
|
||||
###############################
|
||||
net_h, net_w = 416, 416 # a multiple of 32, the smaller the faster
|
||||
obj_thresh, nms_thresh = 0.8, 0.3
|
||||
obj_thresh, nms_thresh = 0.5, 0.3
|
||||
|
||||
###############################
|
||||
# Load the model
|
||||
@@ -145,112 +43,46 @@ def _main_(args):
|
||||
###############################
|
||||
# Predict bounding boxes
|
||||
###############################
|
||||
if 'webcam' in input_path: # do detection on the first webcam
|
||||
video_reader = cv2.VideoCapture(0)
|
||||
|
||||
# the main loop
|
||||
batch_size = 1
|
||||
images = []
|
||||
while True:
|
||||
ret_val, image = video_reader.read()
|
||||
if ret_val == True: images += [image]
|
||||
image_paths = []
|
||||
|
||||
if (len(images)==batch_size) or (ret_val==False and len(images)>0):
|
||||
batch_boxes = get_yolo_boxes(infer_model, images, net_h, net_w, config['model']['anchors'], obj_thresh, nms_thresh)
|
||||
if os.path.isdir(input_path):
|
||||
for inp_file in os.listdir(input_path):
|
||||
image_paths += [input_path + inp_file]
|
||||
else:
|
||||
image_paths += [input_path]
|
||||
|
||||
for i in range(len(images)):
|
||||
draw_boxes(images[i], batch_boxes[i], config['model']['labels'], obj_thresh)
|
||||
cv2.imshow('video with bboxes', images[i])
|
||||
images = []
|
||||
if cv2.waitKey(1) == 27:
|
||||
break # esc to quit
|
||||
cv2.destroyAllWindows()
|
||||
elif input_path[-4:] == '.mp4': # do detection on a video
|
||||
video_out = output_path + input_path.split('/')[-1]
|
||||
video_reader = cv2.VideoCapture(input_path)
|
||||
image_paths = [inp_file for inp_file in image_paths if (inp_file[-4:] in ['.jpg', '.png', 'JPEG'])]
|
||||
|
||||
nb_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT))
|
||||
frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||
frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||
# the main loop
|
||||
times = []
|
||||
images = [cv2.imread(image_path) for image_path in image_paths]
|
||||
|
||||
video_writer = cv2.VideoWriter(video_out,
|
||||
cv2.VideoWriter_fourcc(*'MPEG'),
|
||||
50.0,
|
||||
(frame_w, frame_h))
|
||||
# the main loop
|
||||
batch_size = 1
|
||||
images = []
|
||||
start_point = 0 #%
|
||||
show_window = False
|
||||
for i in tqdm(range(nb_frames)):
|
||||
_, image = video_reader.read()
|
||||
#print(images)
|
||||
start = time.time()
|
||||
# predict the bounding boxes
|
||||
boxes = get_yolo_boxes(infer_model, images, net_h, net_w, config['model']['anchors'], obj_thresh, nms_thresh)
|
||||
boxes = [[box for box in boxes_image if box.get_score() > obj_thresh] for boxes_image in boxes]
|
||||
|
||||
if (float(i+1)/nb_frames) > start_point/100.:
|
||||
images += [image]
|
||||
print('Elapsed time = {}'.format(time.time() - start))
|
||||
times.append(time.time() - start)
|
||||
|
||||
if (i%batch_size == 0) or (i == (nb_frames-1) and len(images) > 0):
|
||||
# predict the bounding boxes
|
||||
batch_boxes = get_yolo_boxes(infer_model, images, net_h, net_w, config['model']['anchors'], obj_thresh, nms_thresh)
|
||||
boxes_disc = [disconnect(image, boxes_image, z_thresh = 1.8) for image, boxes_image in zip(images, boxes)]
|
||||
|
||||
for i in range(len(images)):
|
||||
# draw bounding boxes on the image using labels
|
||||
draw_boxes(images[i], batch_boxes[i], config['model']['labels'], obj_thresh)
|
||||
for image_path, image, boxes_image in zip(image_paths, images, boxes_disc):
|
||||
|
||||
# show the video with detection bounding boxes
|
||||
if show_window: cv2.imshow('video with bboxes', images[i])
|
||||
#print(boxes_image[0].score)
|
||||
# draw bounding boxes on the image using labels
|
||||
|
||||
# write result to the output video
|
||||
video_writer.write(images[i])
|
||||
images = []
|
||||
draw_boxes(image, boxes_image, ["disconnect"], obj_thresh)
|
||||
#plt.figure(figsize = (10,12))
|
||||
#plt.imshow(I)
|
||||
# write the image with bounding boxes to file
|
||||
cv2.imwrite(output_path + image_path.split('/')[-1], np.uint8(image))
|
||||
|
||||
if show_window and cv2.waitKey(1) == 27: break # esc to quit
|
||||
|
||||
if show_window: cv2.destroyAllWindows()
|
||||
video_reader.release()
|
||||
video_writer.release()
|
||||
else: # do detection on an image or a set of images
|
||||
|
||||
|
||||
|
||||
image_paths = []
|
||||
|
||||
if os.path.isdir(input_path):
|
||||
for inp_file in os.listdir(input_path):
|
||||
image_paths += [input_path + inp_file]
|
||||
else:
|
||||
image_paths += [input_path]
|
||||
|
||||
image_paths = [inp_file for inp_file in image_paths if (inp_file[-4:] in ['.jpg', '.png', 'JPEG'])]
|
||||
|
||||
# the main loop
|
||||
times = []
|
||||
images = [cv2.imread(image_path) for image_path in image_paths]
|
||||
|
||||
print(images)
|
||||
start = time.time()
|
||||
# predict the bounding boxes
|
||||
boxes = get_yolo_boxes(infer_model, images, net_h, net_w, config['model']['anchors'], obj_thresh, nms_thresh)
|
||||
boxes = [[box for box in boxes_image if box.get_score() > obj_thresh] for boxes_image in boxes]
|
||||
|
||||
print('Elapsed time = {}'.format(time.time() - start))
|
||||
times.append(time.time() - start)
|
||||
|
||||
boxes_disc = [disconnect(image, boxes_image, z_thresh = 1.8) for image, boxes_image in zip(images, boxes)]
|
||||
|
||||
for image, boxes_image in zip(images, boxes_disc):
|
||||
|
||||
|
||||
# draw bounding boxes on the image using labels
|
||||
I = image.copy()
|
||||
draw_boxes(I, boxes_image, config['model']['labels'], obj_thresh)
|
||||
plt.figure(figsize = (10,12))
|
||||
plt.imshow(I)
|
||||
# write the image with bounding boxes to file
|
||||
cv2.imwrite(output_path + image_path.split('/')[-1], np.uint8(image))
|
||||
|
||||
file = open(args.output + '/time.txt','w')
|
||||
file.write('Tiempo promedio:' + str(np.mean(times)))
|
||||
file.close()
|
||||
file = open(args.output + '/time.txt','w')
|
||||
file.write('Tiempo promedio:' + str(np.mean(times)))
|
||||
file.close()
|
||||
|
||||
if __name__ == '__main__':
|
||||
argparser = argparse.ArgumentParser(description='Predict with a trained yolo model')
|
||||
|
||||
Reference in New Issue
Block a user