{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "Cargar el modelo ssd7 \n", "(https://github.com/pierluigiferrari/ssd_keras#how-to-fine-tune-one-of-the-trained-models-on-your-own-dataset)\n", "\n", "Training del SSD7 (modelo reducido de SSD). Parámetros en config_7.json y descargar VGG_ILSVRC_16_layers_fc_reduced.h5\n", "\n", "\n" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Using TensorFlow backend.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "Training on: \t{'1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8}\n", "\n", "WARNING:tensorflow:From /home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n", "Instructions for updating:\n", "Colocations handled automatically by placer.\n", "OK create model\n", "\n", "Loading pretrained weights VGG.\n", "\n", "WARNING:tensorflow:From /home/dl-desktop/Desktop/Rentadrone/ssd_keras-master/keras_loss_function/keras_ssd_loss.py:133: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", "Instructions for updating:\n", "Use tf.cast instead.\n", "WARNING:tensorflow:From /home/dl-desktop/Desktop/Rentadrone/ssd_keras-master/keras_loss_function/keras_ssd_loss.py:166: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", "Instructions for updating:\n", "Use tf.cast instead.\n", "__________________________________________________________________________________________________\n", "Layer (type) Output Shape Param # Connected to \n", "==================================================================================================\n", "input_1 (InputLayer) (None, 400, 400, 3) 0 \n", "__________________________________________________________________________________________________\n", "identity_layer (Lambda) (None, 400, 400, 3) 0 input_1[0][0] \n", "__________________________________________________________________________________________________\n", "input_mean_normalization (Lambd (None, 400, 400, 3) 0 identity_layer[0][0] \n", "__________________________________________________________________________________________________\n", "input_channel_swap (Lambda) (None, 400, 400, 3) 0 input_mean_normalization[0][0] \n", "__________________________________________________________________________________________________\n", "conv1_1 (Conv2D) (None, 400, 400, 64) 1792 input_channel_swap[0][0] \n", "__________________________________________________________________________________________________\n", "conv1_2 (Conv2D) (None, 400, 400, 64) 36928 conv1_1[0][0] \n", "__________________________________________________________________________________________________\n", "pool1 (MaxPooling2D) (None, 200, 200, 64) 0 conv1_2[0][0] \n", "__________________________________________________________________________________________________\n", "conv2_1 (Conv2D) (None, 200, 200, 128 73856 pool1[0][0] \n", "__________________________________________________________________________________________________\n", "conv2_2 (Conv2D) (None, 200, 200, 128 147584 conv2_1[0][0] \n", "__________________________________________________________________________________________________\n", "pool2 (MaxPooling2D) (None, 100, 100, 128 0 conv2_2[0][0] \n", "__________________________________________________________________________________________________\n", "conv3_1 (Conv2D) (None, 100, 100, 256 295168 pool2[0][0] \n", "__________________________________________________________________________________________________\n", "conv3_2 (Conv2D) (None, 100, 100, 256 590080 conv3_1[0][0] \n", "__________________________________________________________________________________________________\n", "conv3_3 (Conv2D) (None, 100, 100, 256 590080 conv3_2[0][0] \n", "__________________________________________________________________________________________________\n", "pool3 (MaxPooling2D) (None, 50, 50, 256) 0 conv3_3[0][0] \n", "__________________________________________________________________________________________________\n", "conv4_1 (Conv2D) (None, 50, 50, 512) 1180160 pool3[0][0] \n", "__________________________________________________________________________________________________\n", "conv4_2 (Conv2D) (None, 50, 50, 512) 2359808 conv4_1[0][0] \n", "__________________________________________________________________________________________________\n", "conv4_3 (Conv2D) (None, 50, 50, 512) 2359808 conv4_2[0][0] \n", "__________________________________________________________________________________________________\n", "pool4 (MaxPooling2D) (None, 25, 25, 512) 0 conv4_3[0][0] \n", "__________________________________________________________________________________________________\n", "conv5_1 (Conv2D) (None, 25, 25, 512) 2359808 pool4[0][0] \n", "__________________________________________________________________________________________________\n", "conv5_2 (Conv2D) (None, 25, 25, 512) 2359808 conv5_1[0][0] \n", "__________________________________________________________________________________________________\n", "conv5_3 (Conv2D) (None, 25, 25, 512) 2359808 conv5_2[0][0] \n", "__________________________________________________________________________________________________\n", "pool5 (MaxPooling2D) (None, 25, 25, 512) 0 conv5_3[0][0] \n", "__________________________________________________________________________________________________\n", "fc6 (Conv2D) (None, 25, 25, 1024) 4719616 pool5[0][0] \n", "__________________________________________________________________________________________________\n", "fc7 (Conv2D) (None, 25, 25, 1024) 1049600 fc6[0][0] \n", "__________________________________________________________________________________________________\n", "conv6_1 (Conv2D) (None, 25, 25, 256) 262400 fc7[0][0] \n", "__________________________________________________________________________________________________\n", "conv6_padding (ZeroPadding2D) (None, 27, 27, 256) 0 conv6_1[0][0] \n", "__________________________________________________________________________________________________\n", "conv6_2 (Conv2D) (None, 13, 13, 512) 1180160 conv6_padding[0][0] \n", "__________________________________________________________________________________________________\n", "conv7_1 (Conv2D) (None, 13, 13, 128) 65664 conv6_2[0][0] \n", "__________________________________________________________________________________________________\n", "conv7_padding (ZeroPadding2D) (None, 15, 15, 128) 0 conv7_1[0][0] \n", "__________________________________________________________________________________________________\n", "conv7_2 (Conv2D) (None, 7, 7, 256) 295168 conv7_padding[0][0] \n", "__________________________________________________________________________________________________\n", "conv8_1 (Conv2D) (None, 7, 7, 128) 32896 conv7_2[0][0] \n", "__________________________________________________________________________________________________\n", "conv8_2 (Conv2D) (None, 5, 5, 256) 295168 conv8_1[0][0] \n", "__________________________________________________________________________________________________\n", "conv9_1 (Conv2D) (None, 5, 5, 128) 32896 conv8_2[0][0] \n", "__________________________________________________________________________________________________\n", "conv4_3_norm (L2Normalization) (None, 50, 50, 512) 512 conv4_3[0][0] \n", "__________________________________________________________________________________________________\n", "conv9_2 (Conv2D) (None, 3, 3, 256) 295168 conv9_1[0][0] \n", "__________________________________________________________________________________________________\n", "conv4_3_norm_mbox_conf (Conv2D) (None, 50, 50, 36) 165924 conv4_3_norm[0][0] \n", "__________________________________________________________________________________________________\n", "fc7_mbox_conf (Conv2D) (None, 25, 25, 54) 497718 fc7[0][0] \n", "__________________________________________________________________________________________________\n", "conv6_2_mbox_conf (Conv2D) (None, 13, 13, 54) 248886 conv6_2[0][0] \n", "__________________________________________________________________________________________________\n", "conv7_2_mbox_conf (Conv2D) (None, 7, 7, 54) 124470 conv7_2[0][0] \n", "__________________________________________________________________________________________________\n", "conv8_2_mbox_conf (Conv2D) (None, 5, 5, 36) 82980 conv8_2[0][0] \n", "__________________________________________________________________________________________________\n", "conv9_2_mbox_conf (Conv2D) (None, 3, 3, 36) 82980 conv9_2[0][0] \n", "__________________________________________________________________________________________________\n", "conv4_3_norm_mbox_loc (Conv2D) (None, 50, 50, 16) 73744 conv4_3_norm[0][0] \n", "__________________________________________________________________________________________________\n", "fc7_mbox_loc (Conv2D) (None, 25, 25, 24) 221208 fc7[0][0] \n", "__________________________________________________________________________________________________\n", "conv6_2_mbox_loc (Conv2D) (None, 13, 13, 24) 110616 conv6_2[0][0] \n", "__________________________________________________________________________________________________\n", "conv7_2_mbox_loc (Conv2D) (None, 7, 7, 24) 55320 conv7_2[0][0] \n", "__________________________________________________________________________________________________\n", "conv8_2_mbox_loc (Conv2D) (None, 5, 5, 16) 36880 conv8_2[0][0] \n", "__________________________________________________________________________________________________\n", "conv9_2_mbox_loc (Conv2D) (None, 3, 3, 16) 36880 conv9_2[0][0] \n", "__________________________________________________________________________________________________\n", "conv4_3_norm_mbox_conf_reshape (None, 10000, 9) 0 conv4_3_norm_mbox_conf[0][0] \n", "__________________________________________________________________________________________________\n", "fc7_mbox_conf_reshape (Reshape) (None, 3750, 9) 0 fc7_mbox_conf[0][0] \n", "__________________________________________________________________________________________________\n", "conv6_2_mbox_conf_reshape (Resh (None, 1014, 9) 0 conv6_2_mbox_conf[0][0] \n", "__________________________________________________________________________________________________\n", "conv7_2_mbox_conf_reshape (Resh (None, 294, 9) 0 conv7_2_mbox_conf[0][0] \n", "__________________________________________________________________________________________________\n", "conv8_2_mbox_conf_reshape (Resh (None, 100, 9) 0 conv8_2_mbox_conf[0][0] \n", "__________________________________________________________________________________________________\n", "conv9_2_mbox_conf_reshape (Resh (None, 36, 9) 0 conv9_2_mbox_conf[0][0] \n", "__________________________________________________________________________________________________\n", "conv4_3_norm_mbox_priorbox (Anc (None, 50, 50, 4, 8) 0 conv4_3_norm_mbox_loc[0][0] \n", "__________________________________________________________________________________________________\n", "fc7_mbox_priorbox (AnchorBoxes) (None, 25, 25, 6, 8) 0 fc7_mbox_loc[0][0] \n", "__________________________________________________________________________________________________\n", "conv6_2_mbox_priorbox (AnchorBo (None, 13, 13, 6, 8) 0 conv6_2_mbox_loc[0][0] \n", "__________________________________________________________________________________________________\n", "conv7_2_mbox_priorbox (AnchorBo (None, 7, 7, 6, 8) 0 conv7_2_mbox_loc[0][0] \n", "__________________________________________________________________________________________________\n", "conv8_2_mbox_priorbox (AnchorBo (None, 5, 5, 4, 8) 0 conv8_2_mbox_loc[0][0] \n", "__________________________________________________________________________________________________\n", "conv9_2_mbox_priorbox (AnchorBo (None, 3, 3, 4, 8) 0 conv9_2_mbox_loc[0][0] \n", "__________________________________________________________________________________________________\n", "mbox_conf (Concatenate) (None, 15194, 9) 0 conv4_3_norm_mbox_conf_reshape[0]\n", " fc7_mbox_conf_reshape[0][0] \n", " conv6_2_mbox_conf_reshape[0][0] \n", " conv7_2_mbox_conf_reshape[0][0] \n", " conv8_2_mbox_conf_reshape[0][0] \n", " conv9_2_mbox_conf_reshape[0][0] \n", "__________________________________________________________________________________________________\n", "conv4_3_norm_mbox_loc_reshape ( (None, 10000, 4) 0 conv4_3_norm_mbox_loc[0][0] \n", "__________________________________________________________________________________________________\n", "fc7_mbox_loc_reshape (Reshape) (None, 3750, 4) 0 fc7_mbox_loc[0][0] \n", "__________________________________________________________________________________________________\n", "conv6_2_mbox_loc_reshape (Resha (None, 1014, 4) 0 conv6_2_mbox_loc[0][0] \n", "__________________________________________________________________________________________________\n", "conv7_2_mbox_loc_reshape (Resha (None, 294, 4) 0 conv7_2_mbox_loc[0][0] \n", "__________________________________________________________________________________________________\n", "conv8_2_mbox_loc_reshape (Resha (None, 100, 4) 0 conv8_2_mbox_loc[0][0] \n", "__________________________________________________________________________________________________\n", "conv9_2_mbox_loc_reshape (Resha (None, 36, 4) 0 conv9_2_mbox_loc[0][0] \n", "__________________________________________________________________________________________________\n", "conv4_3_norm_mbox_priorbox_resh (None, 10000, 8) 0 conv4_3_norm_mbox_priorbox[0][0] \n", "__________________________________________________________________________________________________\n", "fc7_mbox_priorbox_reshape (Resh (None, 3750, 8) 0 fc7_mbox_priorbox[0][0] \n", "__________________________________________________________________________________________________\n", "conv6_2_mbox_priorbox_reshape ( (None, 1014, 8) 0 conv6_2_mbox_priorbox[0][0] \n", "__________________________________________________________________________________________________\n", "conv7_2_mbox_priorbox_reshape ( (None, 294, 8) 0 conv7_2_mbox_priorbox[0][0] \n", "__________________________________________________________________________________________________\n", "conv8_2_mbox_priorbox_reshape ( (None, 100, 8) 0 conv8_2_mbox_priorbox[0][0] \n", "__________________________________________________________________________________________________\n", "conv9_2_mbox_priorbox_reshape ( (None, 36, 8) 0 conv9_2_mbox_priorbox[0][0] \n", "__________________________________________________________________________________________________\n", "mbox_conf_softmax (Activation) (None, 15194, 9) 0 mbox_conf[0][0] \n", "__________________________________________________________________________________________________\n", "mbox_loc (Concatenate) (None, 15194, 4) 0 conv4_3_norm_mbox_loc_reshape[0][\n", " fc7_mbox_loc_reshape[0][0] \n", " conv6_2_mbox_loc_reshape[0][0] \n", " conv7_2_mbox_loc_reshape[0][0] \n", " conv8_2_mbox_loc_reshape[0][0] \n", " conv9_2_mbox_loc_reshape[0][0] \n", "__________________________________________________________________________________________________\n", "mbox_priorbox (Concatenate) (None, 15194, 8) 0 conv4_3_norm_mbox_priorbox_reshap\n", " fc7_mbox_priorbox_reshape[0][0] \n", " conv6_2_mbox_priorbox_reshape[0][\n", " conv7_2_mbox_priorbox_reshape[0][\n", " conv8_2_mbox_priorbox_reshape[0][\n", " conv9_2_mbox_priorbox_reshape[0][\n", "__________________________________________________________________________________________________\n", "predictions (Concatenate) (None, 15194, 21) 0 mbox_conf_softmax[0][0] \n", " mbox_loc[0][0] \n", " mbox_priorbox[0][0] \n", "==================================================================================================\n", "Total params: 24,681,542\n", "Trainable params: 24,681,542\n", "Non-trainable params: 0\n", "__________________________________________________________________________________________________\n" ] } ], "source": [ "from keras.optimizers import Adam, SGD\n", "from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TerminateOnNaN, CSVLogger\n", "from keras import backend as K\n", "from keras.models import load_model\n", "from math import ceil\n", "import numpy as np\n", "from matplotlib import pyplot as plt\n", "import os\n", "import json\n", "import xml.etree.cElementTree as ET\n", "\n", "import sys\n", "sys.path += [os.path.abspath('../ssd_keras-master')]\n", "\n", "from keras_loss_function.keras_ssd_loss import SSDLoss\n", "from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes\n", "from keras_layers.keras_layer_DecodeDetections import DecodeDetections\n", "from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast\n", "from keras_layers.keras_layer_L2Normalization import L2Normalization\n", "from ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder\n", "from ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast\n", "from data_generator.object_detection_2d_data_generator import DataGenerator\n", "from data_generator.object_detection_2d_geometric_ops import Resize\n", "from data_generator.object_detection_2d_photometric_ops import ConvertTo3Channels\n", "from data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation\n", "from data_generator.object_detection_2d_misc_utils import apply_inverse_transforms\n", "from eval_utils.average_precision_evaluator import Evaluator\n", "from data_generator.data_augmentation_chain_variable_input_size import DataAugmentationVariableInputSize\n", "from data_generator.data_augmentation_chain_constant_input_size import DataAugmentationConstantInputSize\n", "\n", "\n", "def makedirs(path):\n", " try:\n", " os.makedirs(path)\n", " except OSError:\n", " if not os.path.isdir(path):\n", " raise\n", "\n", "\n", "\n", "\n", "\n", "K.tensorflow_backend._get_available_gpus()\n", "\n", "\n", "def lr_schedule(epoch):\n", " if epoch < 80:\n", " return 0.001\n", " elif epoch < 100:\n", " return 0.0001\n", " else:\n", " return 0.00001\n", "\n", "config_path = 'config_300_fault_C.json'\n", "\n", "\n", "with open(config_path) as config_buffer:\n", " config = json.loads(config_buffer.read())\n", "\n", "###############################\n", "# Parse the annotations\n", "###############################\n", "path_imgs_training = config['train']['train_image_folder']\n", "path_anns_training = config['train']['train_annot_folder']\n", "path_imgs_val = config['test']['test_image_folder']\n", "path_anns_val = config['test']['test_annot_folder']\n", "labels = config['model']['labels']\n", "categories = {}\n", "#categories = {\"Razor\": 1, \"Gun\": 2, \"Knife\": 3, \"Shuriken\": 4} #la categoría 0 es la background\n", "for i in range(len(labels)): categories[labels[i]] = i+1\n", "print('\\nTraining on: \\t' + str(categories) + '\\n')\n", "\n", "####################################\n", "# Parameters\n", "###################################\n", " #%%\n", "img_height = config['model']['input'] # Height of the model input images\n", "img_width = config['model']['input'] # Width of the model input images\n", "img_channels = 3 # Number of color channels of the model input images\n", "mean_color = [123, 117, 104] # The per-channel mean of the images in the dataset. Do not change this value if you're using any of the pre-trained weights.\n", "swap_channels = [2, 1, 0] # The color channel order in the original SSD is BGR, so we'll have the model reverse the color channel order of the input images.\n", "n_classes = len(labels) # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO\n", "scales_pascal = [0.01, 0.05, 0.1, 0.2, 0.37, 0.54, 0.71] # The anchor box scaling factors used in the original SSD300 for the Pascal VOC datasets\n", "#scales_coco = [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05] # The anchor box scaling factors used in the original SSD300 for the MS COCO datasets\n", "scales = scales_pascal\n", "aspect_ratios = [[1.0, 2.0, 0.5],\n", " [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n", " [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n", " [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n", " [1.0, 2.0, 0.5],\n", " [1.0, 2.0, 0.5]] # The anchor box aspect ratios used in the original SSD300; the order matters\n", "two_boxes_for_ar1 = True\n", "steps = [8, 16, 32, 64, 100, 300] # The space between two adjacent anchor box center points for each predictor layer.\n", "offsets = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5] # The offsets of the first anchor box center points from the top and left borders of the image as a fraction of the step size for each predictor layer.\n", "clip_boxes = False # Whether or not to clip the anchor boxes to lie entirely within the image boundaries\n", "variances = [0.1, 0.1, 0.2, 0.2] # The variances by which the encoded target coordinates are divided as in the original implementation\n", "normalize_coords = True\n", "\n", "K.clear_session() # Clear previous models from memory.\n", "\n", "\n", "model_path = config['train']['saved_weights_name']\n", "# 3: Instantiate an optimizer and the SSD loss function and compile the model.\n", "# If you want to follow the original Caffe implementation, use the preset SGD\n", "# optimizer, otherwise I'd recommend the commented-out Adam optimizer.\n", "\n", "\n", "if config['model']['backend'] == 'ssd7':\n", " #weights_path = 'VGG_ILSVRC_16_layers_fc_reduced.h5'\n", " scales = [0.08, 0.16, 0.32, 0.64, 0.96] # An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`.\n", " aspect_ratios = [0.5 ,1.0, 2.0] # The list of aspect ratios for the anchor boxes\n", " two_boxes_for_ar1 = True # Whether or not you want to generate two anchor boxes for aspect ratio 1\n", " steps = None # In case you'd like to set the step sizes for the anchor box grids manually; not recommended\n", " offsets = None\n", "\n", "if os.path.exists(model_path):\n", " print(\"\\nLoading pretrained weights.\\n\")\n", " # We need to create an SSDLoss object in order to pass that to the model loader.\n", " ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n", "\n", " K.clear_session() # Clear previous models from memory.\n", " model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n", " 'L2Normalization': L2Normalization,\n", " 'compute_loss': ssd_loss.compute_loss})\n", "\n", "\n", "else:\n", " ####################################\n", " # Build the Keras model.\n", " ###################################\n", "\n", " if config['model']['backend'] == 'ssd300':\n", " #weights_path = 'VGG_VOC0712Plus_SSD_300x300_ft_iter_160000.h5'\n", " from models.keras_ssd300 import ssd_300 as ssd\n", "\n", " model = ssd(image_size=(img_height, img_width, img_channels),\n", " n_classes=n_classes,\n", " mode='training',\n", " l2_regularization=0.0005,\n", " scales=scales,\n", " aspect_ratios_per_layer=aspect_ratios,\n", " two_boxes_for_ar1=two_boxes_for_ar1,\n", " steps=steps,\n", " offsets=offsets,\n", " clip_boxes=clip_boxes,\n", " variances=variances,\n", " normalize_coords=normalize_coords,\n", " subtract_mean=mean_color,\n", " swap_channels=swap_channels)\n", "\n", "\n", " elif config['model']['backend'] == 'ssd7':\n", " #weights_path = 'VGG_ILSVRC_16_layers_fc_reduced.h5'\n", " from models.keras_ssd7 import build_model as ssd\n", " scales = [0.08, 0.16, 0.32, 0.64, 0.96] # An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`.\n", " aspect_ratios = [0.5 ,1.0, 2.0] # The list of aspect ratios for the anchor boxes\n", " two_boxes_for_ar1 = True # Whether or not you want to generate two anchor boxes for aspect ratio 1\n", " steps = None # In case you'd like to set the step sizes for the anchor box grids manually; not recommended\n", " offsets = None\n", " model = ssd(image_size=(img_height, img_width, img_channels),\n", " n_classes=n_classes,\n", " mode='training',\n", " l2_regularization=0.0005,\n", " scales=scales,\n", " aspect_ratios_global=aspect_ratios,\n", " aspect_ratios_per_layer=None,\n", " two_boxes_for_ar1=two_boxes_for_ar1,\n", " steps=steps,\n", " offsets=offsets,\n", " clip_boxes=clip_boxes,\n", " variances=variances,\n", " normalize_coords=normalize_coords,\n", " subtract_mean=None,\n", " divide_by_stddev=None)\n", "\n", " else :\n", " print('Wrong Backend')\n", "\n", "\n", "\n", " print('OK create model')\n", " #sgd = SGD(lr=config['train']['learning_rate'], momentum=0.9, decay=0.0, nesterov=False)\n", "\n", " # TODO: Set the path to the weights you want to load. only for ssd300 or ssd512\n", "\n", " weights_path = '../ssd_keras-master/VGG_ILSVRC_16_layers_fc_reduced.h5'\n", " print(\"\\nLoading pretrained weights VGG.\\n\")\n", " model.load_weights(weights_path, by_name=True)\n", "\n", " # 3: Instantiate an optimizer and the SSD loss function and compile the model.\n", " # If you want to follow the original Caffe implementation, use the preset SGD\n", " # optimizer, otherwise I'd recommend the commented-out Adam optimizer.\n", "\n", "\n", " #adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n", " #sgd = SGD(lr=0.001, momentum=0.9, decay=0.0, nesterov=False)\n", " optimizer = Adam(lr=config['train']['learning_rate'], beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n", " ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n", " model.compile(optimizer=optimizer, loss=ssd_loss.compute_loss)\n", "\n", " model.summary()\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Instanciar los generadores de datos y entrenamiento del modelo.\n", "\n", "*Cambio realizado para leer png y jpg. keras-ssd-master/data_generator/object_detection_2d_data_generator.py función parse_xml\n" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "\n", "Processing image set 'train.txt': 0%| | 0/1337 [00:00\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 187\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 188\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 189\u001b[0;31m \u001b[0mbatch_images\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_labels\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnext\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrain_generator\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 190\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 191\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/Desktop/Rentadrone/ssd_keras-master/data_generator/object_detection_2d_data_generator.py\u001b[0m in \u001b[0;36mgenerate\u001b[0;34m(self, batch_size, shuffle, transformations, label_encoder, returns, keep_images_without_gt, degenerate_box_handling)\u001b[0m\n\u001b[1;32m 1019\u001b[0m \u001b[0mbatch_filenames\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfilenames\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mcurrent\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0mcurrent\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1020\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mfilename\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mbatch_filenames\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1021\u001b[0;31m \u001b[0;32mwith\u001b[0m \u001b[0mImage\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilename\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mimage\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1022\u001b[0m \u001b[0mbatch_X\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0marray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimage\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0muint8\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1023\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/PIL/Image.py\u001b[0m in \u001b[0;36mopen\u001b[0;34m(fp, mode)\u001b[0m\n\u001b[1;32m 2703\u001b[0m \u001b[0mwarnings\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwarn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2704\u001b[0m raise IOError(\"cannot identify image file %r\"\n\u001b[0;32m-> 2705\u001b[0;31m % (filename if filename else fp))\n\u001b[0m\u001b[1;32m 2706\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2707\u001b[0m \u001b[0;31m#\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mOSError\u001b[0m: cannot identify image file 'Train&Test_C/images/Mision 22_DJI_0067.jpg'" ] } ], "source": [ "#ENTRENAMIENTO DE MODELO\n", "#####################################################################\n", "# Instantiate two `DataGenerator` objects: One for training, one for validation.\n", "######################################################################\n", "# Optional: If you have enough memory, consider loading the images into memory for the reasons explained above.\n", "\n", "train_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n", "val_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n", "\n", "# 2: Parse the image and label lists for the training and validation datasets. This can take a while.\n", "\n", "\n", "\n", "# The XML parser needs to now what object class names to look for and in which order to map them to integers.\n", "classes = ['background' ] + labels\n", "\n", "train_dataset.parse_xml(images_dirs= [config['train']['train_image_folder']],\n", " image_set_filenames=[config['train']['train_image_set_filename']],\n", " annotations_dirs=[config['train']['train_annot_folder']],\n", " classes=classes,\n", " include_classes='all',\n", " #classes = classes, \n", " #include_classes= [1],\n", " exclude_truncated=False,\n", " exclude_difficult=False,\n", " ret=False)\n", "\n", "val_dataset.parse_xml(images_dirs= [config['test']['test_image_folder']],\n", " image_set_filenames=[config['test']['test_image_set_filename']],\n", " annotations_dirs=[config['test']['test_annot_folder']],\n", " classes=classes,\n", " include_classes='all',\n", " #classes = classes, \n", " #include_classes=[1],\n", " exclude_truncated=False,\n", " exclude_difficult=False,\n", " ret=False)\n", "\n", "#########################\n", "# 3: Set the batch size.\n", "#########################\n", "batch_size = config['train']['batch_size'] # Change the batch size if you like, or if you run into GPU memory issues.\n", "\n", "##########################\n", "# 4: Set the image transformations for pre-processing and data augmentation options.\n", "##########################\n", "# For the training generator:\n", "\n", "\n", "# For the validation generator:\n", "convert_to_3_channels = ConvertTo3Channels()\n", "resize = Resize(height=img_height, width=img_width)\n", "\n", "######################################3\n", "# 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.\n", "#########################################\n", "# The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.\n", "if config['model']['backend'] == 'ssd300':\n", " predictor_sizes = [model.get_layer('conv4_3_norm_mbox_conf').output_shape[1:3],\n", " model.get_layer('fc7_mbox_conf').output_shape[1:3],\n", " model.get_layer('conv6_2_mbox_conf').output_shape[1:3],\n", " model.get_layer('conv7_2_mbox_conf').output_shape[1:3],\n", " model.get_layer('conv8_2_mbox_conf').output_shape[1:3],\n", " model.get_layer('conv9_2_mbox_conf').output_shape[1:3]]\n", " ssd_input_encoder = SSDInputEncoder(img_height=img_height,\n", " img_width=img_width,\n", " n_classes=n_classes,\n", " predictor_sizes=predictor_sizes,\n", " scales=scales,\n", " aspect_ratios_per_layer=aspect_ratios,\n", " two_boxes_for_ar1=two_boxes_for_ar1,\n", " steps=steps,\n", " offsets=offsets,\n", " clip_boxes=clip_boxes,\n", " variances=variances,\n", " matching_type='multi',\n", " pos_iou_threshold=0.5,\n", " neg_iou_limit=0.5,\n", " normalize_coords=normalize_coords)\n", "\n", "elif config['model']['backend'] == 'ssd7':\n", " predictor_sizes = [model.get_layer('classes4').output_shape[1:3],\n", " model.get_layer('classes5').output_shape[1:3],\n", " model.get_layer('classes6').output_shape[1:3],\n", " model.get_layer('classes7').output_shape[1:3]]\n", " ssd_input_encoder = SSDInputEncoder(img_height=img_height,\n", " img_width=img_width,\n", " n_classes=n_classes,\n", " predictor_sizes=predictor_sizes,\n", " scales=scales,\n", " aspect_ratios_global=aspect_ratios,\n", " two_boxes_for_ar1=two_boxes_for_ar1,\n", " steps=steps,\n", " offsets=offsets,\n", " clip_boxes=clip_boxes,\n", " variances=variances,\n", " matching_type='multi',\n", " pos_iou_threshold=0.5,\n", " neg_iou_limit=0.3,\n", " normalize_coords=normalize_coords)\n", "\n", "\n", "\n", " \n", "data_augmentation_chain = DataAugmentationVariableInputSize(resize_height = img_height,\n", " resize_width = img_width,\n", " random_brightness=(-48, 48, 0.5),\n", " random_contrast=(0.5, 1.8, 0.5),\n", " random_saturation=(0.5, 1.8, 0.5),\n", " random_hue=(18, 0.5),\n", " random_flip=0.5,\n", " n_trials_max=3,\n", " clip_boxes=True,\n", " overlap_criterion='area',\n", " bounds_box_filter=(0.3, 1.0),\n", " bounds_validator=(0.5, 1.0),\n", " n_boxes_min=1,\n", " background=(0,0,0))\n", "#######################\n", "# 6: Create the generator handles that will be passed to Keras' `fit_generator()` function.\n", "#######################\n", "\n", "train_generator = train_dataset.generate(batch_size=batch_size,\n", " shuffle=True,\n", " transformations= [data_augmentation_chain],\n", " label_encoder=ssd_input_encoder,\n", " returns={'processed_images',\n", " 'encoded_labels'},\n", " keep_images_without_gt=False)\n", "\n", "val_generator = val_dataset.generate(batch_size=batch_size,\n", " shuffle=False,\n", " transformations=[convert_to_3_channels,\n", " resize],\n", " label_encoder=ssd_input_encoder,\n", " returns={'processed_images',\n", " 'encoded_labels'},\n", " keep_images_without_gt=False)\n", "\n", "# Summary instance training\n", "category_train_list = []\n", "for image_label in train_dataset.labels:\n", " category_train_list += [i[0] for i in image_label]\n", "summary_category_training = {train_dataset.classes[i]: category_train_list.count(i) for i in list(set(category_train_list))}\n", "for i in summary_category_training.keys():\n", " print(i, ': {:.0f}'.format(summary_category_training[i]))\n", "\n", "\n", "\n", "# Get the number of samples in the training and validations datasets.\n", "train_dataset_size = train_dataset.get_dataset_size()\n", "val_dataset_size = val_dataset.get_dataset_size()\n", "\n", "print(\"Number of images in the training dataset:\\t{:>6}\".format(train_dataset_size))\n", "print(\"Number of images in the validation dataset:\\t{:>6}\".format(val_dataset_size))\n", "\n", "\n", "\n", "##########################\n", "# Define model callbacks.\n", "#########################\n", "\n", "# TODO: Set the filepath under which you want to save the model.\n", "model_checkpoint = ModelCheckpoint(filepath= config['train']['saved_weights_name'],\n", " monitor='val_loss',\n", " verbose=1,\n", " save_best_only=True,\n", " save_weights_only=False,\n", " mode='auto',\n", " period=1)\n", "#model_checkpoint.best =\n", "\n", "csv_logger = CSVLogger(filename='log.csv',\n", " separator=',',\n", " append=True)\n", "\n", "learning_rate_scheduler = LearningRateScheduler(schedule=lr_schedule,\n", " verbose=1)\n", "\n", "terminate_on_nan = TerminateOnNaN()\n", "\n", "callbacks = [model_checkpoint,\n", " csv_logger,\n", " learning_rate_scheduler,\n", " terminate_on_nan]\n", "\n", "\n", "\n", "batch_images, batch_labels = next(train_generator)\n", "\n", "\n", "initial_epoch = 0\n", "final_epoch = 100 #config['train']['nb_epochs']\n", "steps_per_epoch = 200\n", "\n", "history = model.fit_generator(generator=train_generator,\n", " steps_per_epoch=steps_per_epoch,\n", " epochs=final_epoch,\n", " callbacks=callbacks,\n", " validation_data=val_generator,\n", " validation_steps=ceil(val_dataset_size/batch_size*10),\n", " initial_epoch=initial_epoch,\n", " verbose = 1 if config['train']['debug'] else 2)\n", "\n", "history_path = config['train']['saved_weights_name'].split('.')[0] + '_history'\n", "\n", "np.save(history_path, history.history)" ] }, { "cell_type": "code", "execution_count": 19, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "['background', '1', '4']" ] }, "execution_count": 19, "metadata": {}, "output_type": "execute_result" } ], "source": [ "classes" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "dict_keys(['val_loss', 'loss', 'lr'])\n" ] }, { "data": { "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAEWCAYAAABrDZDcAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3Xd8lfXd//HXJ3sTSAh7hCFDRBBEFFQcWHFb66jVqrWlvWtbW1ur9r5tb3t3/rqsta3iqnshiAMXKgqKTJG9VxIgk0D2/P7++J4gmxCSHMj1fj4eeZBzznXO9b3OCdf7fOdlzjlERCS4IsJdABERCS8FgYhIwCkIREQCTkEgIhJwCgIRkYBTEIiIBJyCQOQQzOw/ZvabRm67yczOP9rXEWltCgIRkYBTEIiIBJyCQI57oSaZO81siZmVmdljZtbJzN4ysxIzm2Fm7ffY/jIzW25mxWY208wG7fHYcDNbFHrei0DcPvu6xMwWh577qZkNbWKZv2Nm68ysyMxeM7OuofvNzP5mZnlmtjN0TENCj11kZitCZcsxs5816Q0T2YeCQNqKq4DxwAnApcBbwC+AdPzf+Y8AzOwE4Hngx0BHYDrwupnFmFkM8CrwNNABeDn0uoSeewrwOPBdIA14GHjNzGKPpKBmdi7we+AaoAuwGXgh9PAFwFmh40gFrgUKQ489BnzXOZcMDAE+OJL9ihyMgkDain8453KdcznALGCuc+5z51wVMBUYHtruWuBN59x7zrka4M9APHAGMBqIBu53ztU45yYD8/fYx3eAh51zc51zdc65J4Gq0POOxDeAx51zi0Lluwc43cx6AzVAMjAQMOfcSufcttDzaoDBZpbinNvhnFt0hPsVOSAFgbQVuXv8XnGA20mh37viv4ED4JyrB7KAbqHHctzeKzFu3uP3XsBPQ81CxWZWDPQIPe9I7FuGUvy3/m7OuQ+AB4F/ArlmNsnMUkKbXgVcBGw2s4/M7PQj3K/IASkIJGi24k/ogG+Tx5/Mc4BtQLfQfQ167vF7FvBb51zqHj8Jzrnnj7IMifimphwA59wDzrkRwIn4JqI7Q/fPd85dDmTgm7BeOsL9ihyQgkCC5iXgYjM7z8yigZ/im3c+BeYAtcCPzCzKzL4KjNrjuY8A3zOz00KduolmdrGZJR9hGZ4DbjGzYaH+hd/hm7I2mdmpodePBsqASqAu1IfxDTNrF2rS2gXUHcX7ILKbgkACxTm3GrgB+AdQgO9YvtQ5V+2cqwa+CtwM7MD3J0zZ47kL8P0ED4YeXxfa9kjL8D5wL/AKvhbSF7gu9HAKPnB24JuPCvH9GAA3ApvMbBfwvdBxiBw104VpRESCTTUCEZGAa7EgMLPHQ5Nilu1xXwcze8/M1ob+bX+o1xARkZbXkjWC/wAX7nPf3cD7zrn+wPuh2yIiEkYt2kcQmiDzhnOuYYr8amCcc26bmXUBZjrnBrRYAURE5LCiWnl/nRpmSYbCIONgG5rZRGAiQGJi4oiBAwe2UhFFRNqGhQsXFjjnOh5uu9YOgkZzzk0CJgGMHDnSLViwIMwlEhE5vpjZ5sNv1fqjhnJDTUKE/s1r5f2LiMg+WjsIXgNuCv1+EzCtlfcvIiL7aMnho8/jp+wPMLNsM7sV+AMw3szW4pcM/kNL7V9ERBqnxfoInHNfP8hD5zXH69fU1JCdnU1lZWVzvNwxKy4uju7duxMdHR3uoohIG3XMdhYfTnZ2NsnJyfTu3Zu9F4tsO5xzFBYWkp2dTWZmZriLIyJt1HG7xERlZSVpaWltNgQAzIy0tLQ2X+sRkfA6boMAaNMh0CAIxygi4XVcB4GIiBw9BUETFRcX869//euIn3fRRRdRXFzcAiUSEWkaBUETHSwI6uoOfdGo6dOnk5qa2lLFEhE5YsftqKFwu/vuu1m/fj3Dhg0jOjqapKQkunTpwuLFi1mxYgVXXHEFWVlZVFZWcvvttzNx4kQAevfuzYIFCygtLWXChAmMHTuWTz/9lG7dujFt2jTi4+PDfGQiEjRtIgjue305K7buatbXHNw1hV9deuJBH//DH/7AsmXLWLx4MTNnzuTiiy9m2bJlu4d5Pv7443To0IGKigpOPfVUrrrqKtLS0vZ6jbVr1/L888/zyCOPcM011/DKK69www26+qCItK42EQTHglGjRu011v+BBx5g6tSpAGRlZbF27dr9giAzM5Nhw4YBMGLECDZt2tRq5RURadAmguBQ39xbS2Ji4u7fZ86cyYwZM5gzZw4JCQmMGzfugHMBYmNjd/8eGRlJRUVFq5RVRGRP6ixuouTkZEpKSg742M6dO2nfvj0JCQmsWrWKzz77rJVLJyLSeG2iRhAOaWlpjBkzhiFDhhAfH0+nTp12P3bhhRfy0EMPMXToUAYMGMDo0aPDWFIRkUNr0UtVNpcDXZhm5cqVDBo0KEwlal1BOlYRaT5mttA5N/Jw26lpSEQk4BQEIiIBpyAQEQk4BYGISMApCEREAk5BICIScAqCJmrqMtQA999/P+Xl5c1cIhGRplEQNJGCQETaCs0sbqI9l6EeP348GRkZvPTSS1RVVXHllVdy3333UVZWxjXXXEN2djZ1dXXce++95ObmsnXrVs455xzS09P58MMPw30oIhJwbSMI3robti9t3tfsfBJM+MNBH95zGep3332XyZMnM2/ePJxzXHbZZXz88cfk5+fTtWtX3nzzTcCvQdSuXTv++te/8uGHH5Kent68ZRYRaQI1DTWDd999l3fffZfhw4dzyimnsGrVKtauXctJJ53EjBkzuOuuu5g1axbt2rULd1FFRPbTNmoEh/jm3hqcc9xzzz1897vf3e+xhQsXMn36dO655x4uuOACfvnLX4ahhCIiB6caQRPtuQz1V77yFR5//HFKS0sByMnJIS8vj61bt5KQkMANN9zAz372MxYtWrTfc0VEwq1t1AjCYM9lqCdMmMD111/P6aefDkBSUhLPPPMM69at48477yQiIoLo6Gj+/e9/AzBx4kQmTJhAly5d1FksImGnZaiPA0E6VhFpPlqGWkREGkVBICIScMd1EBwPzVpHKwjHKCLhddwGQVxcHIWFhW36ROmco7CwkLi4uHAXRUTasON21FD37t3Jzs4mPz8/3EVpUXFxcXTv3j3cxRCRNuy4DYLo6GgyMzPDXQwRkePecds0JCIizSMsQWBmPzGz5Wa2zMyeNzM1gouIhEmrB4GZdQN+BIx0zg0BIoHrWrscIiLihatpKAqIN7MoIAHYGqZyiIgEXqsHgXMuB/gzsAXYBux0zr2773ZmNtHMFpjZgrY+MkhEJJzC0TTUHrgcyAS6AolmdsO+2znnJjnnRjrnRnbs2LG1iykiEhjhaBo6H9jonMt3ztUAU4AzwlAOEREhPEGwBRhtZglmZsB5wMowlENERAhPH8FcYDKwCFgaKsOk1i6HiIh4YZlZ7Jz7FfCrcOxbRET2ppnFIiIBpyAQEQk4BYGISMApCEREAk5BICIScAoCEZGAUxCIiAScgkBEJOAUBCIiAacgEBEJOAWBiEjAKQhERAJOQSAiEnAKAhGRgFMQiIgEnIJARCTgFAQiIgGnIBARCTgFgYhIwCkIREQCTkEgIhJwCgIRkYBTEIiIBJyCQEQk4BQEIiIBpyAQEQk4BYGISMApCEREAk5BICIScAoCEZGAUxCIiAScgkBEJOAUBCIiAacgEBEJuLAEgZmlmtlkM1tlZivN7PRwlENERCAqTPv9O/C2c+5rZhYDJISpHCIigdfqQWBmKcBZwM0AzrlqoLq1yyEiIl44mob6APnAE2b2uZk9amaJ+25kZhPNbIGZLcjPz2/9UoqIBEQ4giAKOAX4t3NuOFAG3L3vRs65Sc65kc65kR07dmztMoqIBEY4giAbyHbOzQ3dnowPBhERCYNWDwLn3HYgy8wGhO46D1jREvt6cf4W/vPJRkoqa1ri5UVE2oRwjRr6IfBsaMTQBuCWltjJh6vyeXv5dv70zmquGtGdCwZ3JirSiDCjW/t4uqXGt8RuRUSOK+acC3cZDmvkyJFuwYIFTXru4qxinpqziTe+2EZ1Xf3u+2MiI/jNlUO4ZmSPZiqliMixxcwWOudGHna7th4EDQpLq1idWwIO6pzj4Y82MHtdAbeOzeSeCQOJitQkaxFpWxobBOFqGmp1aUmxnJEUu/v26X3S+O30lTw2eyNrckt48PpTaBcf3ajXcs5hZi1VVBGRVhXYr8FRkRH86tIT+eNVJ/HZhkK++q9P2FxYdsjn1Nc7Hp21gaH3vcvrX2xtpZKKiLSswDQNHcpnGwr53jMLMeCPVw0lOjKCzYVlFJXXMKRrCiN7d6C2vp6fvvQFs9YWkBIXRU2dY9oPxnBCp+QWK5eIyNFQH8ER2lRQxreenM+G/APXCmKjfOXp3ksGM35wJy5+YDYpcVFM+8EYkuOi2VFWzbTFOZw/uBPd22vpJBEJPwVBE+yqrGH22gIykmPpmZZASlw0S3N2Mn9TEVsKy/nW2MzdNYC5Gwq5/tG5nDMgg54dEnh+3hYqauoY2DmZV28bQ1x0ZIuXV0TkUBQEreCRjzfw2+kriYowLhvWleE9Url32nJuHN2L/7tiyO7tKmvqMIPYqL3DoayqFoDE2MD02YtIK9KooVbw7TMz6dEhniHd2u1uDtpcWM6jszcypl8a5wzM4PHZm3jwg7XUOxjTL41xAzKorKnjg1V5zN9UhGGM7Z/OhUM6c97ADNL2GNl0JOrqHZERhxjJVF0Ok86GC34LJ1zQpH2ISNukIDgKZsaFQ7rsdd/PLxzIvE1F/HzyEtonxrC5sJzzB2XQpV08H6zKY8bKPABO6JTEt8ZmUlvneHvZdj5Y5e/vnZbA8J7t6ZWWQEVNHeVVdaQnxTLxrD7Exxy4uempOZv4/fRV/OaKIVw1ovuBC5u/CgrWwLr3FAQishcFQTOLiYrgH18fzqX/mE10ZARPfWsUZ53gV0/9tXNsKCgjLjpyr+Ut/ufiQSzN2cmn6wv5fMsOZq8rYOrnVcRGRZAQE8mO8hreXLqVB68/Zb9RSs/O3cwvpy0nJS6Kn778BRU1ddwwutd+5arPX00EUJb1BfH1johD1R5EJFAa1UdgZrcDTwAlwKPAcOBu59y7LVs8r8l9BJs/hYgo6DGq+Qt1GCWVNcRHRzZpxrJzjrp6t/u5H6/J546XFlNaVcvPLhjAaZlp9EpP4O2l2/n5K0s4d2AGf79uGLe/sJgPVuXx3xcN4taxmbtP9su37mTlMz/ja+UvsdMlcE7kk5zeN53xgzsxfnCnw/ZR1NU7pi3OIcKMK4Z3O/I3Q0TColk7i83sC+fcyWb2FeA24F7gCedcqywf3eQgePxCyJoLZ/4Mzv45RDZu5vCxKK+kcvc8hj2d2T+dR745krjoSKpr6/nxi58zfel2kuOiGNq9HelJsbz+xVYei72fc5gHwK/7v8wbmyLIK6kiLjqC8wd1Yki3diTHRZEUG0WnlDh6pyWSkRzLR2vz+cP0VX55DmDiWX24+8KBqlGIHAeaOwiWOOeGmtnfgZnOualm9nnowjItrslBULkL3roLvngOugyDrz4CHU9o/gK2Eucca3JL2VhQxubCMmrrHbeOzdxrqGptXT2vfbGVBZt3sCS7mA35ZVwxvBv/l3UzkdUlUJoLX3+R+v5fYcHmHbz2RQ7Tl26nqGz/q4XGREZQXVdPr7QE7vzKAOZtLOKpOZu5ZGgX/nz1yfsNkXXOUVVbv/t+5xxLc3YyeWE2n28pZsJJnbl+VE9SE2Ja9H0SEa+5g+AJoBuQCZwMROIDYcTRFrQxjnr46Ipp8PqPwdXDt9+H9H7NV7jjQW01/LYznHorzJsE594LZ/1s98POOSpq6iiprKWksoZtOyvZXFjO5sIyeqUlcs3IHsREReCcY9LHG/j9W6volZbAyd1T6ZeRRGxUBJ9vKWbhlh3kl1TRLj6aLu3iqK13rMsrJSYqggGdklmas5P46Ei+NqI7t5/fn/QmjJCqrKkjMsKI3qfJreHvWGtAiXypuYMgAhgGbHDOFZtZB6C7c27J0Rf18JplHkHRRnj0PIhLhW/PgIQOzVO440HeKvjXaXDlJPjwN9BtJFz9RJNf7u1l23lh/hbW5ZWSvaMCgJ4dEhjZqz290xPJL6lia3EFVbX1XDikM5ee3JV28dGs3LaLx2dv5NXFOSTFRvG/l53IZSd3bdTJO6uonMdmb+TF+VnERkdw4YmduWRoV2rq63ln2XbeW5FLvXOcO7AT4wdncErP9sRGRxIbFUFsVMR++5ixIpdXF+cwrEcq4wZ0pG/HJIWItDnNHQRjgMXOuTIzuwF/acm/O+c2H31RD6/ZJpRt+QyevBS6j4Ibp0JUQJooVkyDl74JE2fCR/8PCtfDD+Y1y0uXV9dSWVNPh8TGv5drcku4c/ISvsgq5uwTOpKZnsjOihpKq2oZ3CWFcQM6MrR7KoWlVcxck8/7K3N5b0UuEWZcdnJX6p3jvRW5lFXXAZAUG8U5AzOIijA+WJXHzoq9r0jXOSWOO8afsHto7d/eW8ODH64jOS6Kkko/qa9Hh3i+c2YfrhnZY78mr8qaOhZt3sHcjUWsyS1hXV4pW4rKGdsvndvP78/Q7qlH8xYeUF29o6y6lpS447dfS8Kv2fsI8E1CQ4GngceArzrnzj7agjZGs84sXvIyTPk2DPkaXPYAxCQ2z+seyz76k68J/GIrzP4bzPoL/GIbRMeFrUh19Y4nPtnI32esBaBdQjSxURFsKCjDOX9yLw3NvM5IjuXK4d24eUxvurTzw24ra+r4eE0+0VERnNE3bfes7Zq6euZvKmJ9XilVtfVU1dYzY2Uun28pZmDnZNKSYvhkXSHXjuzBfZefSEFpFR+vKWDKomwWbN5Bp5RYvnl6b+rqHdk7ytlUUM7irGKq6+qJMOiVlkjfjklkpMTy5pJt7Kyo4ZwBHTl3UCe6pcbRpV081bX15JVUkVdSyeAuKQzv2f6w70X2jnLW55eyPGcXCzbvYNGWHZRU1tItNZ4h3VI4tXcHbjqj935NYgdTVVu330x2CZ7mDoJFzrlTzOyXQI5z7rGG+5qjsIfT7EtMzPoLvP9rSO0JF/8N+p/ffK99LHrl27429JNlsHwqvHwzTPwIug4Ld8n2U1RWzay1+Xy2oYju7eMZN6Ajg7ukHFWzjXOO6Uu384e3V7J9ZyX3XTaE60/rud82c9YXcv+MtczbVARAx+RYerSPZ0Sv9pzRN52RvduTvMc39JLKGp6as5lHZ21gR/nBr4s9YUhn7rpwIL3SEthYUMZnG4pYuW0XWTvKySoqJ6uoYq+r5w3olMyI3u3plhrPqu0lLMvZycaCMsb2S+ef39j/uhm1dfXMXJ3P9KXbWF9QRlZROUVl1Yzu04Hvj+vHmf3TMTOccxSX15ASH73fLPRlOTuJMGNw15Qmv89y7GnuIPgIeBv4FnAmkI9vKjrpaAvaGC2y1tDmT+H12/1s20GXwik3Q5+z/RDTmkrY+LGfjTvkq9AuNFu3rgY+fQA+ewh6jobTvge9zoCGk1Rt9bHZ3PTQmZDYEW6cAgVr4cGRcPm/YPg3wl2yVlVdW09xRTUZyQevCTnnyAt1eDd24cC6euf7RXZWsK24kpioCDqlxNI+IYapn+fw0Efrqa71zWd5JVUAJMdG0TMtgZ4d/E/fjCT6dkykX0byAS+Q9NKCLH4xZSmZ6Yk8fvOpxEVHsiS7mM82FPLq4q3kl1TRITGGE7um0KNDAu3io5m6KIftuyo5sWsKibFRrM0tYUd5ze5msKtH9CCnuJw/vr2a91bkAnBq7/bcMiaTzPREPl1fyJz1BZRW1TKyVwdGZXZgRK/2WhvrONLcQdAZuB6Y75ybZWY9gXHOuaeOvqiH12KLztVW+aaSOf+Eql0Q3wG6nOznHtSU+20iouHk62DARfDhbyF3GfQ+0/9bsQM6DvIn/+It/vYpN8HFf4XIA/xnqa/z38YtAi69H+IP3WTQLOrr4XddYeQtcOHvfRl+1w1Gfgsu/N3e25YVwnu/9CF3yo17P1aa59+vVF3j+Ujll1Txr5nrKCqr5rTMNEb36UBmeuIR13I+XVfA955ZSHl1HbX1/v9tVIRxzsAMrh7RnXMGZuzVdFRVW8fURTk8/dlmYqMiOKFTMr3SEnlvxXYWbSmmXXw0JZU1JMRE8b2z+xAfE8V/Pt1IVlHF7tfonZZAclw0K7btoq7ekRDjR301hMWhOOfYUV5D+4ToJtXoisurWZdXSu6uKvJLKhmVmaYayxFq9tVHzawTcGro5jznXN5RlO+ItPjqo7VVsO59WPZK6EQ/Fk6YAO17w9x/w6Knoa4KkrvARX+GQZf4RdyWvgxLXvJt7ak9/essfhYGXgJXPbZ/G/zsv8GM//VB0K4HXPPUl80zdTVgkRDRzBeN27EZ/j4ULrnfhwHApHMgNhlueu3L7TZ94puQSrZCYgbcsWLvCXiPng+7tsGPPj82az0BsS6vlKfnbKJHhwSGdk/d/W3/SDjnmL9pB0/N2USnlDi+P67v7sUO6+odM1fnUVxew+i+abuXQimtqmXR5h1MW7yV17/YSk19PaN6+0Dr0i6erqlx9EpLpHdaApERxquLt/LygixWbS8hPSlmd41iVGYHBnVJOfQCicAnodBr6MwH36H//h3jiIkK7IUVj1hz1wiuAf4EzAQM3zx0p3Nu8lGWs1HCvgx1yXbYOMsv1hbX7tDbzn0Y3vo59BoL1z0L8aERJVsX+5PpwIvh9NvgpZugvBD6j/ejeArXQYdMP5qp3UEWjmuKte/Bs1+DW97yzVgA034Aq6fDnev97Y//DDN/54Nv2PXwwW/guud8WQG2fg6TxvnfL3tw/9rCkaqvgwh1ZB6v8koqeeazLcxcncfW4koKSqsOuN3J3dsxfnAnNuSXMW9T0e6hxsmxUYzo3Z7Lh3VlwpAu+zXBvbwgi3umLKVvxyTunjCQzu3i2JBfxm3PLeJXlw7mljGZLX6MbUWzLzEBjG+oBZhZR2CGc+7koy5pI4Q9CI7U0skw9bt+zsK4u2HoNT4Eqkrgvz71cxjKCuC1H0HeCug4ENL6wqKn/GM3vfFlE0x9HdRVQ3T8ofd5MJ8+CO/+N9y5ARLT/H2fPQRv3wW3L/Gd5ssm+1FUl94PUfHwtxOh63C4/gW//bQf+NpSai+or4Hb5jX9RL7kZXjzDrjmSeh7btNe43BWTPO1l9Hfa5nXl71U1daxrbiSzUV+EuLO8hrGn9iJgZ33bsbZWlzB/E1FzNtYxKy1BWwpKqdDYgxXj+hOt/bxVFTXsbGgjBfmZ3Fmf98x3jB81jnHNx6dy6rtJXx057i9Ou3l4Jo7CJbu2TEcmmD2xXHdWdzStn4O794Lm2ZBTBJUl8KNr0Lfcw7+nOyF8PSVvhZx6f2w/gMfKuWFvp/ijB9Bev8DP2/2X32TU2wKJKbD6T+ApI7w2g9h5Rtw18Yvt984C568BFK6wa4cOO+XMPaOLzu9Z/wvfPKAbx6KioW/DPJh1mccTL4Frn4STrziyN+TjR/D01/1YdKuJ3x/DsQm+cfy18AL18Pgy2DcPU1fF2rDTL8PV3f491vCxjnHJ+sKefqzTaHJgP5+M7ju1J78+vIT9xsquyS7mMse/IQfntuPn14wIAylPv40dxD8CT+H4PnQXdcCS5xzdx1VKRvpuAwCAOd808zM3/smoHN+cfjnNIRB1U6/cmq/8ZCUAUte9H0QAy+GM34IPU7z2y/8j2+KimsHCWm+1lGa6/sgbpwCU//L/+/61ttf7qO8CP5fJkTGwpUP+ZFReypcD/84Bc77FUTFwTv3wHdnQacT4cFT/dyL7378ZXA0Rt5KeOwrkNIFxv8anrsGRt/mO6zLi/ys713boLYCuo2Aqx6FDn0a//oN5X7kXN+XU1ftw+C/5kDMUVxD2rkjO045YjvLa6ipryc+OpK46MhD9h/84LlFvL8yj4/uHEdGSvjmwRwvWqKz+CpgDL6P4GPn3NSjK2LjHbdB0FS5yyF7ge90bmjOKc2HuQ/B/EehstgvE5Haw88L6HueP3E2LJuRNR+eu9oHSU0FDLnKT57b0+fPQsYg6HaQqSBPXAQl23wHdnxoWQ7wzVev/RBueMWPnire4puJ9jxpOwdfPA+Ln4PoBD86atMsqK/1r5PaE978KSx43PddfPAbP1Lrpjd8DeX1H/uT+JjbYcTNPggPp3InPDoeyvLgOx/Czmxf6xlzuw+eplj4H3j//+DKh9v+XJPjxObCMs7/60dcdFIX7r92mJYFOQxds7itqi7zJ9g5/4QdG+Gsn/t+iH3b7PPXwDNfhZ1Z8JXf+Q7qI7H4eXg11MZ+5cO+aQr8XIm/n+yHytZWAqG/nxMuhDN/6r+Nv347rH8f0gf4pqXKYoiM8SOpGkZJVe6Cf57mm73qqvbeR/EWHxRr3/XDdwdfDnEpfs2kgjW+iWrPWeHlRfDijZD1me9szzzL3//aD33gTfzQDwturLoaePsemP+I339Cmm/GCtL6VMew+2es4f4Za/n22Ez+++JBCoNDaJYgMLMSdv9P3/shwDnnWmVQr4LgAOrr/Gimdoe4UMyubTDrz/56DCldDr7dgVSXwZ8H+Lb6O1buPRR29du+JtK+F7TP9Cfuuf/24RAR7U/65/8vnPrtQw+HXTUdXvg6jP2J335fBWt9DWjxc755JmMwpHT1++40BL7+gu97ef46XwO4/F8w9Oovn1+xAx4c5UPk0r/7YcENnPNNbfsO8S3Jhcnfgs2zfZ/MiVfAYxfAiV+Fqx45svewpiL0frTQBCznYOsiyDgxrMuF7MU5H+6J6S24C8d9r6/gP59u4kfn9uMO9RcclGoEcvSWTvYn9cGXHX7bqlJY9KQ/eY/9iQ+Jxti1DZI7H7odvr7eP96wzZp3YPKtvu2/psLXOq59Fnqetv9zN3wEU7/n50ecMMEHxcZZsOZtP3Jr9H/BWXf6sFj9Nky7zYfLpQ/Aydf61/jw9/DRH+DaZ/ws9MbYsck3r8UkwbVPQ8c9Tlb1df6EeaCAKFgLK1+HVW9A0QboNcb3L/W/wIfgnu/MAMMTAAAQZUlEQVTJ9FDzWmwKDLrMd+hnnnV0fRrO+bk00Ql+JNuRqKv179/Sl+GW6X5iYgupr3f8YupSXpifxbUje3BGvzQGdE6mfUIMW4sr2FpcSVlVLR0SY0gPLRWS1oRlz493CgJp23JX+JpAXIqf85Da8+DbVpf7/pXZf/MzyGOSod+5vrN86Ut+Al3vsbB8iq9pXPWo7z9pUFfjO6F3bYVvvbP/9SzKi/wEvYaRTjtz4IkLffNXQz/NZQ/4k/SCJ/zJu6bcB+zJX/fNacunwrIpkLfcv0bX4X5Y8caPfb+JRfrQGnc3RCfCGz/2wTviFt8xvuI1qC6BE6/0cz0aRmM1VlmBH5Cw+DkfBLEpcPOb0GVo455fU+lrUqvf9OXLGOT7gxpCafsyP+Ks/wW+ppXc6cjKdwB19Y7/eXUZkxdmUVN36PNYZITx9VE9uP28E+iYfOBAqKqtY0N+GWtyS9hcWM5J3dpxet+0Ri81cixSEEjbd6SzscsKoXCtP8lGhU4GOQv9Veyy5/tRTOf/6svH9pS73DcR1ZT7ZqLTb/Pf3hc/40/WSZ1g+A1+VNeUib6J6aZp/iT/8i2+/yIiyneY9zvfh8/K13zto0GP0/xrD7rky0mFzvk1r+Y+BAuf9LWnLsNgzVu+ye/c//En25oK+OxfvuM9fYCfzNiYb/Tbl/p5JUtf8oHSbYSfUzLnQf/+3vrOlwMBijZAziL//kTF+36p+lr/vLkP+ffhoj/72sS07/s+oZO+5oN40tm+9ldT5muZI27xkxc7n3TUo7Kqa+vZUFDK6u0l7KqspVtqHF1T40mMiaKorJqC0ipmrs7n+XlbiI2K4KYzejMqswMDOieTEhfNjJW5vLZ4Kx+vzd8vUBJiIjmjbzqREZC7q4qC0ipS4qLp0SGenh0SGNu/I2f2Sz/opVuX5exk5bZdREdGEBlhDOqSTL+M5KM63iOhIBBprPp6P9ooufOhtyvJ9SfIBY9/eQJP7QUnXe2DYu07/ip40QlwwxTodbrfpq4GPrkfKor9KKiGuSDVZb6fpLzQB8jh1nHKmu8n421fAmff7WsH+55E13/ov5nX1/oaSFIGJHWGHqN8M1NUjG+aWv2WP3lvmuXLO+x636fTUBPKX+2v+R2bDBP+CJ8/A6ve5MBdhvhAvuLfvjmtvg4ePtuP5PrBfD95ceGT8M1X/bDmWX+BL17wI8OSu/qmr/hUH2Y1FX4IdNUuX6NKP8EPFuh7zoED+ghsLCjjT++sYvrS7V8W23zWdk6J4+KhXRjWI5UBnZPpmhrP/E1FzFiRy+x1BcRGRZCRHEd6Ugw7K2rI2lFBVlE5VbX1ZKYn8o3TenLuwAy6psbvXhDw/hlr+WDV3ivxxERF8J+bT+WMfl/2oazctou3lm0nLTGGTilxJMREsj6/lDW5JazJLWXSjSOa3KylIBBpKeVFvhkpfYA/uTbUSHZm++aVXmNarn28vs7XRDIGHnyb4i3wzi/8vIqS7VDhl9Umtp1fYXfrYti5xZ+UT/02jLjpwAsgZi/0F3KqKfMLMp56Kwy+woddbZWfGBgZ45vEEjP2HpCwYSY8dbmv/aybAWN+DOPv+/Lx0jw/KmzNO37bumo/ZyU63odPbIofFbZtsQ+U2BQY/X04+66jXo9rR1l16CRbQu6uKs7sn86pvTsc9Fv9wVTV1vH2su08NWczCzfv2H1/elIMBaXVpCZE850z+3DxSV1wQEV1HT95cTFZO8p5+tZRjOjVgSmLsrlnylKqauv3e/32CdGc0CmZP141lN6HWeDvYBQEIuJVl/mT7arpflhvWj8YNdGvqHu4EU1Z86FgtW+yOtKJec9e42tJXU+BW99t2mzx2mrY+JGfv7LyNV87uPLhpi+50kJWbd/F8pxd5BRXkLOjgt7pidwwuud+S2HklVRyzUNzKCyrZvzgTkxZlMPoPh144OvDMYzcXb6Tu0/HJNKTYo56aKyCQETCq3C9X9b8gv878lni+3LOz51593/8JMjrnm+WDudwyCmu4JqH5pBTXMGtYzO5Z8JAohp55bkjdcwHgZlFAgvwVzy75FDbKghEBPDrZk35jm+eGnixH3XVoY/v9M+a5/sYep3uZ72n9vTNSru2+k7+9BP8KLODacXlRHJ3VbKpoIzT+qS16H6OhyC4AxgJpCgIRKTR8lf7iYZLX/aTBhtEJ/oO5YY+kai40Oz3PaT29MutV5X4TvqK4i/7O5zzs8iTOvnaRqchfhRV55N8X8v2JX5obU1FaLRapO9bSe7i53jEpfrO+Ki4UN9J6CeunZ9gF4YZ0Md0EJhZd+BJ4LfAHQoCETlitdW+w7m8wK+9lTEIMD/cdtMsf1GmlC5+ld2oWL/wYe5yv+xKXKo/6cen+sciov2JurzQjw7ble23r6vee5+JGb5WUV/nRz2VF+09BPhgImP9KgAp3XxwJHf2gVGy1Q+rrSwOhUuUL0ddtQ+oumq4/qXGT9DcR2ODIFwXH70f+Dlw0AG1ZjYRmAjQs+chJguJSDBFxfg5F/vqNNj/7KvhQkuNVVvlawDbl/lv/J2HHrhfonKXb36qKvHrZtVU+hN4w0/FDh8+O3P85MCsub6GUVftax8pXfyoLFfvh/26ej8jPSHN1ygiWv403epBYGaXAHnOuYVmNu5g2znnJgGTwNcIWql4IiJeVKxvGuo24tDbxaUcuu/hQJzztYqWWofqCIXj4p9jgMvMbBPwAnCumT0ThnKIiISH2TETAhCGIHDO3eOc6+6c6w1cB3zgnLuhtcshIiJeOGoEIiJyDAlr3cQ5NxOYGc4yiIgEnWoEIiIBpyAQEQk4BYGISMApCEREAk5BICIScAoCEZGAUxCIiAScgkBEJOAUBCIiAacgEBEJOAWBiEjAKQhERAJOQSAiEnAKAhGRgFMQiIgEnIJARCTgFAQiIgGnIBARCTgFgYhIwCkIREQCTkEgIhJwCgIRkYBTEIiIBJyCQEQk4BQEIiIBpyAQEQk4BYGISMApCEREAk5BICIScAoCEZGAUxCIiAScgkBEJOAUBCIiAacgEBEJuFYPAjPrYWYfmtlKM1tuZre3dhlERORLUWHYZy3wU+fcIjNLBhaa2XvOuRVhKIuISOC1eo3AObfNObco9HsJsBLo1trlEBERL6x9BGbWGxgOzD3AYxPNbIGZLcjPz2/toomIBEbYgsDMkoBXgB8753bt+7hzbpJzbqRzbmTHjh1bv4AiIgERliAws2h8CDzrnJsSjjKIiIgXjlFDBjwGrHTO/bW19y8iInsLR41gDHAjcK6ZLQ79XBSGcoiICGEYPuqcmw1Ya+9XREQOTDOLRUQCTkEgIhJwCgIRkYBTEIiIBJyCQEQk4BQEIiIBpyAQEQk4BYGISMApCEREAk5BICIScAoCEZGAUxCIiAScgkBEJOAUBCIiAacgEBEJOAWBiEjAKQhERAJOQSAiEnAKAhGRgFMQiIgEnIJARCTgFAQiIgGnIBARCTgFgYhIwCkIREQCTkEgIhJwCgIRkYBTEIiIBJyCQEQk4BQEIiIBpyAQEQk4BYGISMApCEREAk5BICIScAoCEZGAC0sQmNmFZrbazNaZ2d3hKIOIiHitHgRmFgn8E5gADAa+bmaDW7scIiLihaNGMApY55zb4JyrBl4ALg9DOUREBIgKwz67AVl73M4GTtt3IzObCEwM3Sw1s9VN3F86UNDE5x7PgnjcQTxmCOZx65gbp1djNgpHENgB7nP73eHcJGDSUe/MbIFzbuTRvs7xJojHHcRjhmAet465eYWjaSgb6LHH7e7A1jCUQ0RECE8QzAf6m1mmmcUA1wGvhaEcIiJCGJqGnHO1ZvYD4B0gEnjcObe8BXd51M1Lx6kgHncQjxmCedw65mZkzu3XPC8iIgGimcUiIgGnIBARCbg2HQRBWMrCzHqY2YdmttLMlpvZ7aH7O5jZe2a2NvRv+3CXtbmZWaSZfW5mb4RuZ5rZ3NAxvxgajNCmmFmqmU02s1Whz/z0tv5Zm9lPQn/by8zseTOLa4uftZk9bmZ5ZrZsj/sO+Nma90Do3LbEzE45mn232SAI0FIWtcBPnXODgNHAbaHjvBt43znXH3g/dLutuR1YucftPwJ/Cx3zDuDWsJSqZf0deNs5NxA4GX/8bfazNrNuwI+Akc65IfgBJtfRNj/r/wAX7nPfwT7bCUD/0M9E4N9Hs+M2GwQEZCkL59w259yi0O8l+BNDN/yxPhna7EngivCUsGWYWXfgYuDR0G0DzgUmhzZpi8ecApwFPAbgnKt2zhXTxj9r/OjGeDOLAhKAbbTBz9o59zFQtM/dB/tsLweect5nQKqZdWnqvttyEBxoKYtuYSpLqzCz3sBwYC7QyTm3DXxYABnhK1mLuB/4OVAfup0GFDvnakO32+Ln3QfIB54INYk9amaJtOHP2jmXA/wZ2IIPgJ3AQtr+Z93gYJ9ts57f2nIQNGopi7bCzJKAV4AfO+d2hbs8LcnMLgHynHML97z7AJu2tc87CjgF+LdzbjhQRhtqBjqQUJv45UAm0BVIxDeL7KutfdaH06x/7205CAKzlIWZReND4Fnn3JTQ3bkNVcXQv3nhKl8LGANcZmab8E1+5+JrCKmh5gNom593NpDtnJsbuj0ZHwxt+bM+H9jonMt3ztUAU4AzaPufdYODfbbNen5ry0EQiKUsQm3jjwErnXN/3eOh14CbQr/fBExr7bK1FOfcPc657s653vjP9QPn3DeAD4GvhTZrU8cM4JzbDmSZ2YDQXecBK2jDnzW+SWi0mSWE/tYbjrlNf9Z7ONhn+xrwzdDoodHAzoYmpCZxzrXZH+AiYA2wHvjvcJenhY5xLL5KuARYHPq5CN9m/j6wNvRvh3CXtYWOfxzwRuj3PsA8YB3wMhAb7vK1wPEOAxaEPu9XgfZt/bMG7gNWAcuAp4HYtvhZA8/j+0Fq8N/4bz3YZ4tvGvpn6Ny2FD+qqsn71hITIiIB15abhkREpBEUBCIiAacgEBEJOAWBiEjAKQhERAJOQSDSwsxsXMMKqSLHIgWBiEjAKQhEQszsBjObZ2aLzezh0PUOSs3sL2a2yMzeN7OOoW2HmdlnobXgp+6xTnw/M5thZl+EntM39PJJe1xH4NnQLFmRY4KCQAQws0HAtcAY59wwoA74Bn6Rs0XOuVOAj4BfhZ7yFHCXc24ofmZnw/3PAv90zp2MXxOnYdr/cODH+Gtj9MGvlyRyTIg6/CYigXAeMAKYH/qyHo9f4KseeDG0zTPAFDNrB6Q65z4K3f8k8LKZJQPdnHNTAZxzlQCh15vnnMsO3V4M9AZmt/xhiRyegkDEM+BJ59w9e91pdu8+2x1qTZZDNfdU7fF7Hfq/J8cQNQ2JeO8DXzOzDNh9rdhe+P8jDatcXg/Mds7tBHaY2Zmh+28EPnL+OhDZZnZF6DVizSyhVY9CpAn0rUQEcM6tMLP/Ad41swj8CpC34S/+cqKZLcRfHeva0FNuAh4Kneg3ALeE7r8ReNjMfh16jatb8TBEmkSrj4ocgpmVOueSwl0OkZakpiERkYBTjUBEJOBUIxARCTgFgYhIwCkIREQCTkEgIhJwCgIRkYD7/2LskS6qBNsaAAAAAElFTkSuQmCC\n", "text/plain": [ "
" ] }, "metadata": { "needs_background": "light" }, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "experimento_ssd300_fault_C_1.h5\n" ] } ], "source": [ "#Graficar aprendizaje\n", "\n", "history_path =config['train']['saved_weights_name'].split('.')[0] + '_history'\n", "\n", "hist_load = np.load(history_path + '.npy',allow_pickle=True).item()\n", "\n", "print(hist_load.keys())\n", "\n", "# summarize history for loss\n", "plt.plot(hist_load['loss'])\n", "plt.plot(hist_load['val_loss'])\n", "plt.title('model loss')\n", "plt.ylabel('loss')\n", "plt.xlabel('epoch')\n", "plt.legend(['train', 'test'], loc='upper left')\n", "plt.ylim((0, 10)) \n", "plt.show()\n", "\n", "print(config['train']['saved_weights_name'])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Evaluación del Modelo" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "Processing image set 'train.txt': 0%| | 0/1366 [00:00\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 84\u001b[0m \u001b[0mreturn_recalls\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 85\u001b[0m \u001b[0mreturn_average_precisions\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 86\u001b[0;31m verbose=True)\n\u001b[0m\u001b[1;32m 87\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 88\u001b[0m \u001b[0mmean_average_precision\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0maverage_precisions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mprecisions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrecalls\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mresults\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/Desktop/Rentadrone/ssd_keras-master/eval_utils/average_precision_evaluator.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, img_height, img_width, batch_size, data_generator_mode, round_confidences, matching_iou_threshold, border_pixels, sorting_algorithm, average_precision_mode, num_recall_points, ignore_neutral_boxes, return_precisions, return_recalls, return_average_precisions, verbose, decoding_confidence_thresh, decoding_iou_threshold, decoding_top_k, decoding_pred_coords, decoding_normalize_coords)\u001b[0m\n\u001b[1;32m 199\u001b[0m \u001b[0mround_confidences\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mround_confidences\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 200\u001b[0m \u001b[0mverbose\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mverbose\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 201\u001b[0;31m ret=False)\n\u001b[0m\u001b[1;32m 202\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 203\u001b[0m \u001b[0;31m#############################################################################################\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/Desktop/Rentadrone/ssd_keras-master/eval_utils/average_precision_evaluator.py\u001b[0m in \u001b[0;36mpredict_on_dataset\u001b[0;34m(self, img_height, img_width, batch_size, data_generator_mode, decoding_confidence_thresh, decoding_iou_threshold, decoding_top_k, decoding_pred_coords, decoding_normalize_coords, decoding_border_pixels, round_confidences, verbose, ret)\u001b[0m\n\u001b[1;32m 389\u001b[0m \u001b[0mimg_height\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mimg_height\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 390\u001b[0m \u001b[0mimg_width\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mimg_width\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 391\u001b[0;31m border_pixels=decoding_border_pixels)\n\u001b[0m\u001b[1;32m 392\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 393\u001b[0m \u001b[0;31m# Filter out the all-zeros dummy elements of `y_pred`.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/Desktop/Rentadrone/ssd_keras-master/ssd_encoder_decoder/ssd_output_decoder.py\u001b[0m in \u001b[0;36mdecode_detections\u001b[0;34m(y_pred, confidence_thresh, iou_threshold, top_k, input_coords, normalize_coords, img_height, img_width, border_pixels)\u001b[0m\n\u001b[1;32m 209\u001b[0m \u001b[0mthreshold_met\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msingle_class\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0msingle_class\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0mconfidence_thresh\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;31m# ...keep only those boxes with a confidence above the set threshold.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 210\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mthreshold_met\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m# If any boxes made the threshold...\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 211\u001b[0;31m \u001b[0mmaxima\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_greedy_nms\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mthreshold_met\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0miou_threshold\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0miou_threshold\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcoords\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'corners'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mborder_pixels\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mborder_pixels\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# ...perform NMS on them.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 212\u001b[0m \u001b[0mmaxima_output\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mzeros\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmaxima\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmaxima\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# Expand the last dimension by one element to have room for the class ID. This is now an arrray of shape `[n_boxes, 6]`\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 213\u001b[0m \u001b[0mmaxima_output\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mclass_id\u001b[0m \u001b[0;31m# Write the class ID to the first column...\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/Desktop/Rentadrone/ssd_keras-master/ssd_encoder_decoder/ssd_output_decoder.py\u001b[0m in \u001b[0;36m_greedy_nms\u001b[0;34m(predictions, iou_threshold, coords, border_pixels)\u001b[0m\n\u001b[1;32m 88\u001b[0m \u001b[0mboxes_left\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdelete\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mboxes_left\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmaximum_index\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0maxis\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# Now remove the maximum box from `boxes_left`\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 89\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mboxes_left\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;32mbreak\u001b[0m \u001b[0;31m# If there are no boxes left after this step, break. Otherwise...\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 90\u001b[0;31m \u001b[0msimilarities\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0miou\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mboxes_left\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmaximum_box\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcoords\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcoords\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'element-wise'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mborder_pixels\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mborder_pixels\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# ...compare (IoU) the other left over boxes to the maximum box...\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 91\u001b[0m \u001b[0mboxes_left\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mboxes_left\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0msimilarities\u001b[0m \u001b[0;34m<=\u001b[0m \u001b[0miou_threshold\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;31m# ...so that we can remove the ones that overlap too much with the maximum box\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 92\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0marray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmaxima\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/Desktop/Rentadrone/ssd_keras-master/bounding_box_utils/bounding_box_utils.py\u001b[0m in \u001b[0;36miou\u001b[0;34m(boxes1, boxes2, coords, mode, border_pixels)\u001b[0m\n\u001b[1;32m 343\u001b[0m \u001b[0;31m# Compute the interesection areas.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 344\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 345\u001b[0;31m \u001b[0mintersection_areas\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mintersection_area_\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mboxes1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mboxes2\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcoords\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcoords\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 346\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 347\u001b[0m \u001b[0mm\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mboxes1\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;31m# The number of boxes in `boxes1`\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/Desktop/Rentadrone/ssd_keras-master/bounding_box_utils/bounding_box_utils.py\u001b[0m in \u001b[0;36mintersection_area_\u001b[0;34m(boxes1, boxes2, coords, mode, border_pixels)\u001b[0m\n\u001b[1;32m 278\u001b[0m \u001b[0mside_lengths\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmaximum\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmax_xy\u001b[0m \u001b[0;34m-\u001b[0m \u001b[0mmin_xy\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0md\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 279\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 280\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mside_lengths\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0mside_lengths\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 281\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 282\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mKeyboardInterrupt\u001b[0m: " ] } ], "source": [ "\n", "config_path = 'config_7_fault_1.json'\n", "\n", "with open(config_path) as config_buffer:\n", " config = json.loads(config_buffer.read())\n", "\n", " \n", "model_mode = 'training'\n", "# TODO: Set the path to the `.h5` file of the model to be loaded.\n", "model_path = config['train']['saved_weights_name']\n", "\n", "# We need to create an SSDLoss object in order to pass that to the model loader.\n", "ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n", "\n", "K.clear_session() # Clear previous models from memory.\n", "\n", "model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n", " 'L2Normalization': L2Normalization,\n", " 'DecodeDetections': DecodeDetections,\n", " 'compute_loss': ssd_loss.compute_loss})\n", "\n", "\n", " \n", "train_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n", "val_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n", "\n", "# 2: Parse the image and label lists for the training and validation datasets. This can take a while.\n", "\n", "\n", "\n", "# The XML parser needs to now what object class names to look for and in which order to map them to integers.\n", "classes = ['background' ] + labels\n", "\n", "train_dataset.parse_xml(images_dirs= [config['train']['train_image_folder']],\n", " image_set_filenames=[config['train']['train_image_set_filename']],\n", " annotations_dirs=[config['train']['train_annot_folder']],\n", " classes=classes,\n", " include_classes='all',\n", " #classes = ['background', 'panel', 'cell'], \n", " #include_classes=classes,\n", " exclude_truncated=False,\n", " exclude_difficult=False,\n", " ret=False)\n", "\n", "val_dataset.parse_xml(images_dirs= [config['test']['test_image_folder']],\n", " image_set_filenames=[config['test']['test_image_set_filename']],\n", " annotations_dirs=[config['test']['test_annot_folder']],\n", " classes=classes,\n", " include_classes='all',\n", " #classes = ['background', 'panel', 'cell'], \n", " #include_classes=classes,\n", " exclude_truncated=False,\n", " exclude_difficult=False,\n", " ret=False)\n", "\n", "#########################\n", "# 3: Set the batch size.\n", "#########################\n", "batch_size = config['train']['batch_size'] # Change the batch size if you like, or if you run into GPU memory issues.\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "evaluator = Evaluator(model=model,\n", " n_classes=n_classes,\n", " data_generator=val_dataset,\n", " model_mode='training')\n", "\n", "results = evaluator(img_height=img_height,\n", " img_width=img_width,\n", " batch_size=4,\n", " data_generator_mode='resize',\n", " round_confidences=False,\n", " matching_iou_threshold=0.5,\n", " border_pixels='include',\n", " sorting_algorithm='quicksort',\n", " average_precision_mode='sample',\n", " num_recall_points=11,\n", " ignore_neutral_boxes=True,\n", " return_precisions=True,\n", " return_recalls=True,\n", " return_average_precisions=True,\n", " verbose=True)\n", "\n", "mean_average_precision, average_precisions, precisions, recalls = results\n", "total_instances = []\n", "precisions = []\n", "\n", "for i in range(1, len(average_precisions)):\n", " \n", " print('{:.0f} instances of class'.format(len(recalls[i])),\n", " classes[i], 'with average precision: {:.4f}'.format(average_precisions[i]))\n", " total_instances.append(len(recalls[i]))\n", " precisions.append(average_precisions[i])\n", "\n", "if sum(total_instances) == 0:\n", " \n", " print('No test instances found.')\n", "\n", "else:\n", "\n", " print('mAP using the weighted average of precisions among classes: {:.4f}'.format(sum([a * b for a, b in zip(total_instances, precisions)]) / sum(total_instances)))\n", " print('mAP: {:.4f}'.format(sum(precisions) / sum(x > 0 for x in total_instances)))\n", "\n", " for i in range(1, len(average_precisions)):\n", " print(\"{:<14}{:<6}{}\".format(classes[i], 'AP', round(average_precisions[i], 3)))\n", " print()\n", " print(\"{:<14}{:<6}{}\".format('','mAP', round(mean_average_precision, 3)))" ] }, { "cell_type": "code", "execution_count": 37, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "24" ] }, "execution_count": 37, "metadata": {}, "output_type": "execute_result" } ], "source": [ "ceil(val_dataset_size/batch_size)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Cargar nuevamente el modelo desde los pesos.\n", "Predicción" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "Training on: \t{'1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8}\n", "\n" ] } ], "source": [ "from imageio import imread\n", "from keras.preprocessing import image\n", "import time\n", "\n", "config_path = 'config_7_fault_1.json'\n", "input_path = ['fault_jpg/']\n", "output_path = 'result_ssd7_fault_1/'\n", "\n", "with open(config_path) as config_buffer:\n", " config = json.loads(config_buffer.read())\n", "\n", "makedirs(output_path)\n", "###############################\n", "# Parse the annotations\n", "###############################\n", "score_threshold = 0.2\n", "score_threshold_iou = 0.2\n", "labels = config['model']['labels']\n", "categories = {}\n", "#categories = {\"Razor\": 1, \"Gun\": 2, \"Knife\": 3, \"Shuriken\": 4} #la categoría 0 es la background\n", "for i in range(len(labels)): categories[labels[i]] = i+1\n", "print('\\nTraining on: \\t' + str(categories) + '\\n')\n", "\n", "img_height = config['model']['input'] # Height of the model input images\n", "img_width = config['model']['input'] # Width of the model input images\n", "img_channels = 3 # Number of color channels of the model input images\n", "n_classes = len(labels) # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO\n", "classes = ['background'] + labels\n", "\n", "model_mode = 'training'\n", "# TODO: Set the path to the `.h5` file of the model to be loaded.\n", "model_path = config['train']['saved_weights_name']\n", "\n", "# We need to create an SSDLoss object in order to pass that to the model loader.\n", "ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n", "\n", "K.clear_session() # Clear previous models from memory.\n", "\n", "model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n", " 'L2Normalization': L2Normalization,\n", " 'DecodeDetections': DecodeDetections,\n", " 'compute_loss': ssd_loss.compute_loss})\n", "\n", "\n", "\n", "\n", "\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Tiempo Total: 0.629\n", "Tiempo promedio por imagen: 0.035\n", "OK\n" ] } ], "source": [ "image_paths = []\n", "for inp in input_path:\n", " if os.path.isdir(inp):\n", " for inp_file in os.listdir(inp):\n", " image_paths += [inp + inp_file]\n", " else:\n", " image_paths += [inp]\n", "\n", "image_paths = [inp_file for inp_file in image_paths if (inp_file[-4:] in ['.jpg', '.png', 'JPEG'])]\n", "times = []\n", "\n", "\n", "for img_path in image_paths:\n", " orig_images = [] # Store the images here.\n", " input_images = [] # Store resized versions of the images here.\n", " #print(img_path)\n", "\n", " # preprocess image for network\n", " orig_images.append(imread(img_path))\n", " img = image.load_img(img_path, target_size=(img_height, img_width))\n", " img = image.img_to_array(img)\n", " input_images.append(img)\n", " input_images = np.array(input_images)\n", " # process image\n", " start = time.time()\n", " y_pred = model.predict(input_images)\n", " y_pred_decoded = decode_detections(y_pred,\n", " confidence_thresh=score_threshold,\n", " iou_threshold=score_threshold_iou,\n", " top_k=200,\n", " normalize_coords=True,\n", " img_height=img_height,\n", " img_width=img_width)\n", "\n", "\n", " #print(\"processing time: \", time.time() - start)\n", " times.append(time.time() - start)\n", " # correct for image scale\n", "\n", " # visualize detections\n", " # Set the colors for the bounding boxes\n", " colors = plt.cm.brg(np.linspace(0, 1, 21)).tolist()\n", "\n", " plt.figure(figsize=(20,12))\n", " plt.imshow(orig_images[0],cmap = 'gray')\n", "\n", " current_axis = plt.gca()\n", " #print(y_pred)\n", " for box in y_pred_decoded[0]:\n", " # Transform the predicted bounding boxes for the 300x300 image to the original image dimensions.\n", "\n", " xmin = box[2] * orig_images[0].shape[1] / img_width\n", " ymin = box[3] * orig_images[0].shape[0] / img_height\n", " xmax = box[4] * orig_images[0].shape[1] / img_width\n", " ymax = box[5] * orig_images[0].shape[0] / img_height\n", "\n", " color = colors[int(box[0])]\n", " label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])\n", " current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color=color, fill=False, linewidth=2))\n", " current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':color, 'alpha':1.0})\n", "\n", " #plt.figure(figsize=(15, 15))\n", " #plt.axis('off')\n", " save_path = output_path + img_path.split('/')[-1]\n", " plt.savefig(save_path)\n", " plt.close()\n", " \n", "file = open(output_path + 'time.txt','w')\n", "\n", "file.write('Tiempo promedio:' + str(np.mean(times)))\n", "\n", "file.close()\n", "print('Tiempo Total: {:.3f}'.format(np.sum(times)))\n", "print('Tiempo promedio por imagen: {:.3f}'.format(np.mean(times)))\n", "print('OK')" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "panel : 69\n", "cell : 423\n" ] } ], "source": [ "\n", "# Summary instance training\n", "category_train_list = []\n", "for image_label in train_dataset.labels:\n", " category_train_list += [i[0] for i in train_dataset.labels[0]]\n", "summary_category_training = {train_dataset.classes[i]: category_train_list.count(i) for i in list(set(category_train_list))}\n", "for i in summary_category_training.keys():\n", " print(i, ': {:.0f}'.format(summary_category_training[i]))\n" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "1 : 6030\n" ] } ], "source": [ "for i in summary_category_training.keys():\n", " print(i, ': {:.0f}'.format(summary_category_training[i]))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.8" } }, "nbformat": 4, "nbformat_minor": 2 }