diff --git a/.ipynb_checkpoints/Panel_Detector_Fault_1-checkpoint.ipynb b/.ipynb_checkpoints/Panel_Detector_Fault_1-checkpoint.ipynb index 3dbf0a7..bb20408 100644 --- a/.ipynb_checkpoints/Panel_Detector_Fault_1-checkpoint.ipynb +++ b/.ipynb_checkpoints/Panel_Detector_Fault_1-checkpoint.ipynb @@ -14,7 +14,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, "outputs": [ { @@ -30,22 +30,19 @@ "text": [ "\n", "Training on: \t{'1': 1}\n", - "\n", - "\n", - "Loading pretrained weights.\n", - "\n", - "WARNING:tensorflow:From /home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n", - "Instructions for updating:\n", - "Colocations handled automatically by placer.\n", - "WARNING:tensorflow:From /home/dl-desktop/Desktop/Rentadrone/ssd_keras-master/keras_loss_function/keras_ssd_loss.py:133: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", - "Instructions for updating:\n", - "Use tf.cast instead.\n", - "WARNING:tensorflow:From /home/dl-desktop/Desktop/Rentadrone/ssd_keras-master/keras_loss_function/keras_ssd_loss.py:166: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", - "Instructions for updating:\n", - "Use tf.cast instead.\n", - "WARNING:tensorflow:From /home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/math_grad.py:102: div (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", - "Instructions for updating:\n", - "Deprecated in favor of operator or tf.math.divide.\n" + "\n" + ] + }, + { + "ename": "ValueError", + "evalue": "It must be either scales is None or len(scales) == 7, but len(scales) == 6.", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 141\u001b[0m \u001b[0mnormalize_coords\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnormalize_coords\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 142\u001b[0m \u001b[0msubtract_mean\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmean_color\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 143\u001b[0;31m swap_channels=swap_channels)\n\u001b[0m\u001b[1;32m 144\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 145\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/Desktop/Rentadrone/ssd_keras-master/models/keras_ssd300.py\u001b[0m in \u001b[0;36mssd_300\u001b[0;34m(image_size, n_classes, mode, l2_regularization, min_scale, max_scale, scales, aspect_ratios_global, aspect_ratios_per_layer, two_boxes_for_ar1, steps, offsets, clip_boxes, variances, coords, normalize_coords, subtract_mean, divide_by_stddev, swap_channels, confidence_thresh, iou_threshold, top_k, nms_max_output_size, return_predictor_sizes)\u001b[0m\n\u001b[1;32m 191\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mscales\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 192\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mscales\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0mn_predictor_layers\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 193\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"It must be either scales is None or len(scales) == {}, but len(scales) == {}.\"\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mn_predictor_layers\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mscales\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 194\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m# If no explicit list of scaling factors was passed, compute the list of scaling factors from `min_scale` and `max_scale`\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 195\u001b[0m \u001b[0mscales\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlinspace\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmin_scale\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmax_scale\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mn_predictor_layers\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mValueError\u001b[0m: It must be either scales is None or len(scales) == 7, but len(scales) == 6." ] } ], @@ -96,14 +93,14 @@ "\n", "\n", "def lr_schedule(epoch):\n", - " if epoch < 80:\n", + " if epoch < 100:\n", " return 0.001\n", - " elif epoch < 100:\n", + " elif epoch < 150:\n", " return 0.0001\n", " else:\n", " return 0.00001\n", "\n", - "config_path = 'config_7_fault_1.json'\n", + "config_path = 'config_300_fault_1.json'\n", "\n", "\n", "with open(config_path) as config_buffer:\n", @@ -132,9 +129,9 @@ "mean_color = [123, 117, 104] # The per-channel mean of the images in the dataset. Do not change this value if you're using any of the pre-trained weights.\n", "swap_channels = [2, 1, 0] # The color channel order in the original SSD is BGR, so we'll have the model reverse the color channel order of the input images.\n", "n_classes = len(labels) # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO\n", - "scales_pascal = [0.01, 0.05, 0.1, 0.2, 0.37, 0.54, 0.71] # The anchor box scaling factors used in the original SSD300 for the Pascal VOC datasets\n", + "#scales_pascal = [0.1, 0.2, 0.37, 0.54, 0.71] # The anchor box scaling factors used in the original SSD300 for the Pascal VOC datasets\n", "#scales_coco = [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05] # The anchor box scaling factors used in the original SSD300 for the MS COCO datasets\n", - "scales = scales_pascal\n", + "scales = [0.01, 0.05, 0.1, 0.2 ,0.3, 0.37, 0.54]\n", "aspect_ratios = [[1.0, 2.0, 0.5],\n", " [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n", " [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n", @@ -142,7 +139,7 @@ " [1.0, 2.0, 0.5],\n", " [1.0, 2.0, 0.5]] # The anchor box aspect ratios used in the original SSD300; the order matters\n", "two_boxes_for_ar1 = True\n", - "steps = [8, 16, 32, 64, 100, 300] # The space between two adjacent anchor box center points for each predictor layer.\n", + "steps = [2,4, 8, 16, 32, 100] # The space between two adjacent anchor box center points for each predictor layer.\n", "offsets = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5] # The offsets of the first anchor box center points from the top and left borders of the image as a fraction of the step size for each predictor layer.\n", "clip_boxes = False # Whether or not to clip the anchor boxes to lie entirely within the image boundaries\n", "variances = [0.1, 0.1, 0.2, 0.2] # The variances by which the encoded target coordinates are divided as in the original implementation\n", @@ -157,13 +154,7 @@ "# optimizer, otherwise I'd recommend the commented-out Adam optimizer.\n", "\n", "\n", - "if config['model']['backend'] == 'ssd7':\n", - " #weights_path = 'VGG_ILSVRC_16_layers_fc_reduced.h5'\n", - " scales = [0.08, 0.16, 0.32, 0.64, 0.96] # An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`.\n", - " aspect_ratios = [0.5 ,1.0, 2.0] # The list of aspect ratios for the anchor boxes\n", - " two_boxes_for_ar1 = True # Whether or not you want to generate two anchor boxes for aspect ratio 1\n", - " steps = None # In case you'd like to set the step sizes for the anchor box grids manually; not recommended\n", - " offsets = None\n", + "\n", "\n", "if os.path.exists(model_path):\n", " print(\"\\nLoading pretrained weights.\\n\")\n", @@ -264,633 +255,9 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Processing image set 'train.txt': 100%|██████████| 783/783 [00:01<00:00, 511.30it/s]\n", - "Processing image set 'test.txt': 100%|██████████| 117/117 [00:00<00:00, 449.85it/s]\n", - "1 : 2246\n", - "Number of images in the training dataset:\t 783\n", - "Number of images in the validation dataset:\t 117\n", - "Epoch 1/100\n", - "\n", - "Epoch 00001: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 471s 942ms/step - loss: 3.8465 - val_loss: 3.9360\n", - "\n", - "Epoch 00001: val_loss improved from inf to 3.93599, saving model to experimento_ssd300_fault_1.h5\n", - "Epoch 2/100\n", - "\n", - "Epoch 00002: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 465s 931ms/step - loss: 3.8139 - val_loss: 3.8815\n", - "\n", - "Epoch 00002: val_loss improved from 3.93599 to 3.88150, saving model to experimento_ssd300_fault_1.h5\n", - "Epoch 3/100\n", - "\n", - "Epoch 00003: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 460s 920ms/step - loss: 3.8367 - val_loss: 3.9229\n", - "\n", - "Epoch 00003: val_loss did not improve from 3.88150\n", - "Epoch 4/100\n", - "\n", - "Epoch 00004: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 458s 917ms/step - loss: 3.8556 - val_loss: 3.8905\n", - "\n", - "Epoch 00004: val_loss did not improve from 3.88150\n", - "Epoch 5/100\n", - "\n", - "Epoch 00005: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 453s 907ms/step - loss: 3.8205 - val_loss: 3.8369\n", - "\n", - "Epoch 00005: val_loss improved from 3.88150 to 3.83686, saving model to experimento_ssd300_fault_1.h5\n", - "Epoch 6/100\n", - "\n", - "Epoch 00006: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 900ms/step - loss: 3.8164 - val_loss: 3.9733\n", - "\n", - "Epoch 00006: val_loss did not improve from 3.83686\n", - "Epoch 7/100\n", - "\n", - "Epoch 00007: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 453s 905ms/step - loss: 3.7874 - val_loss: 3.8792\n", - "\n", - "Epoch 00007: val_loss did not improve from 3.83686\n", - "Epoch 8/100\n", - "\n", - "Epoch 00008: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 901ms/step - loss: 3.7895 - val_loss: 3.8497\n", - "\n", - "Epoch 00008: val_loss did not improve from 3.83686\n", - "Epoch 9/100\n", - "\n", - "Epoch 00009: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 454s 908ms/step - loss: 3.8056 - val_loss: 3.8965\n", - "\n", - "Epoch 00009: val_loss did not improve from 3.83686\n", - "Epoch 10/100\n", - "\n", - "Epoch 00010: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 457s 914ms/step - loss: 3.7874 - val_loss: 3.8854\n", - "\n", - "Epoch 00010: val_loss did not improve from 3.83686\n", - "Epoch 11/100\n", - "\n", - "Epoch 00011: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 458s 917ms/step - loss: 3.7937 - val_loss: 3.9264\n", - "\n", - "Epoch 00011: val_loss did not improve from 3.83686\n", - "Epoch 12/100\n", - "\n", - "Epoch 00012: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 456s 913ms/step - loss: 3.8105 - val_loss: 3.8769\n", - "\n", - "Epoch 00012: val_loss did not improve from 3.83686\n", - "Epoch 13/100\n", - "\n", - "Epoch 00013: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 460s 921ms/step - loss: 3.8102 - val_loss: 3.9104\n", - "\n", - "Epoch 00013: val_loss did not improve from 3.83686\n", - "Epoch 14/100\n", - "\n", - "Epoch 00014: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 456s 912ms/step - loss: 3.8034 - val_loss: 3.8571\n", - "\n", - "Epoch 00014: val_loss did not improve from 3.83686\n", - "Epoch 15/100\n", - "\n", - "Epoch 00015: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 458s 917ms/step - loss: 3.7412 - val_loss: 3.8471\n", - "\n", - "Epoch 00015: val_loss did not improve from 3.83686\n", - "Epoch 16/100\n", - "\n", - "Epoch 00016: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.7816 - val_loss: 3.8868\n", - "\n", - "Epoch 00016: val_loss did not improve from 3.83686\n", - "Epoch 17/100\n", - "\n", - "Epoch 00017: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.7849 - val_loss: 3.9379\n", - "\n", - "Epoch 00017: val_loss did not improve from 3.83686\n", - "Epoch 18/100\n", - "\n", - "Epoch 00018: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.7739 - val_loss: 3.8811\n", - "\n", - "Epoch 00018: val_loss did not improve from 3.83686\n", - "Epoch 19/100\n", - "\n", - "Epoch 00019: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.7704 - val_loss: 3.8714\n", - "\n", - "Epoch 00019: val_loss did not improve from 3.83686\n", - "Epoch 20/100\n", - "\n", - "Epoch 00020: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.7367 - val_loss: 3.9438\n", - "\n", - "Epoch 00020: val_loss did not improve from 3.83686\n", - "Epoch 21/100\n", - "\n", - "Epoch 00021: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.7554 - val_loss: 3.9248\n", - "\n", - "Epoch 00021: val_loss did not improve from 3.83686\n", - "Epoch 22/100\n", - "\n", - "Epoch 00022: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.7682 - val_loss: 3.9140\n", - "\n", - "Epoch 00022: val_loss did not improve from 3.83686\n", - "Epoch 23/100\n", - "\n", - "Epoch 00023: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.7867 - val_loss: 3.8202\n", - "\n", - "Epoch 00023: val_loss improved from 3.83686 to 3.82025, saving model to experimento_ssd300_fault_1.h5\n", - "Epoch 24/100\n", - "\n", - "Epoch 00024: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.7498 - val_loss: 3.8610\n", - "\n", - "Epoch 00024: val_loss did not improve from 3.82025\n", - "Epoch 25/100\n", - "\n", - "Epoch 00025: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.7391 - val_loss: 3.8886\n", - "\n", - "Epoch 00025: val_loss did not improve from 3.82025\n", - "Epoch 26/100\n", - "\n", - "Epoch 00026: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.7487 - val_loss: 3.8860\n", - "\n", - "Epoch 00026: val_loss did not improve from 3.82025\n", - "Epoch 27/100\n", - "\n", - "Epoch 00027: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.6680 - val_loss: 3.3866\n", - "\n", - "Epoch 00027: val_loss improved from 3.82025 to 3.38664, saving model to experimento_ssd300_fault_1.h5\n", - "Epoch 28/100\n", - "\n", - "Epoch 00028: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.6011 - val_loss: 3.3802\n", - "\n", - "Epoch 00028: val_loss improved from 3.38664 to 3.38020, saving model to experimento_ssd300_fault_1.h5\n", - "Epoch 29/100\n", - "\n", - "Epoch 00029: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.5266 - val_loss: 3.3741\n", - "\n", - "Epoch 00029: val_loss improved from 3.38020 to 3.37413, saving model to experimento_ssd300_fault_1.h5\n", - "Epoch 30/100\n", - "\n", - "Epoch 00030: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.5440 - val_loss: 3.3658\n", - "\n", - "Epoch 00030: val_loss improved from 3.37413 to 3.36583, saving model to experimento_ssd300_fault_1.h5\n", - "Epoch 31/100\n", - "\n", - "Epoch 00031: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.5227 - val_loss: 3.2712\n", - "\n", - "Epoch 00031: val_loss improved from 3.36583 to 3.27118, saving model to experimento_ssd300_fault_1.h5\n", - "Epoch 32/100\n", - "\n", - "Epoch 00032: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.5003 - val_loss: 3.2636\n", - "\n", - "Epoch 00032: val_loss improved from 3.27118 to 3.26357, saving model to experimento_ssd300_fault_1.h5\n", - "Epoch 33/100\n", - "\n", - "Epoch 00033: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.5573 - val_loss: 3.2981\n", - "\n", - "Epoch 00033: val_loss did not improve from 3.26357\n", - "Epoch 34/100\n", - "\n", - "Epoch 00034: LearningRateScheduler setting learning rate to 0.001.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "500/500 [==============================] - 451s 902ms/step - loss: 3.5104 - val_loss: 3.3216\n", - "\n", - "Epoch 00034: val_loss did not improve from 3.26357\n", - "Epoch 35/100\n", - "\n", - "Epoch 00035: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.4535 - val_loss: 3.2405\n", - "\n", - "Epoch 00035: val_loss improved from 3.26357 to 3.24054, saving model to experimento_ssd300_fault_1.h5\n", - "Epoch 36/100\n", - "\n", - "Epoch 00036: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.4667 - val_loss: 3.2127\n", - "\n", - "Epoch 00036: val_loss improved from 3.24054 to 3.21267, saving model to experimento_ssd300_fault_1.h5\n", - "Epoch 37/100\n", - "\n", - "Epoch 00037: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.4625 - val_loss: 3.2967\n", - "\n", - "Epoch 00037: val_loss did not improve from 3.21267\n", - "Epoch 38/100\n", - "\n", - "Epoch 00038: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.4349 - val_loss: 3.2318\n", - "\n", - "Epoch 00038: val_loss did not improve from 3.21267\n", - "Epoch 39/100\n", - "\n", - "Epoch 00039: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.4616 - val_loss: 3.2234\n", - "\n", - "Epoch 00039: val_loss did not improve from 3.21267\n", - "Epoch 40/100\n", - "\n", - "Epoch 00040: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.4579 - val_loss: 3.2443\n", - "\n", - "Epoch 00040: val_loss did not improve from 3.21267\n", - "Epoch 41/100\n", - "\n", - "Epoch 00041: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.4741 - val_loss: 3.1831\n", - "\n", - "Epoch 00041: val_loss improved from 3.21267 to 3.18308, saving model to experimento_ssd300_fault_1.h5\n", - "Epoch 42/100\n", - "\n", - "Epoch 00042: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.4098 - val_loss: 3.1778\n", - "\n", - "Epoch 00042: val_loss improved from 3.18308 to 3.17781, saving model to experimento_ssd300_fault_1.h5\n", - "Epoch 43/100\n", - "\n", - "Epoch 00043: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.4194 - val_loss: 3.3141\n", - "\n", - "Epoch 00043: val_loss did not improve from 3.17781\n", - "Epoch 44/100\n", - "\n", - "Epoch 00044: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.4294 - val_loss: 3.1888\n", - "\n", - "Epoch 00044: val_loss did not improve from 3.17781\n", - "Epoch 45/100\n", - "\n", - "Epoch 00045: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.4157 - val_loss: 3.2060\n", - "\n", - "Epoch 00045: val_loss did not improve from 3.17781\n", - "Epoch 46/100\n", - "\n", - "Epoch 00046: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 901ms/step - loss: 3.4147 - val_loss: 3.1829\n", - "\n", - "Epoch 00046: val_loss did not improve from 3.17781\n", - "Epoch 47/100\n", - "\n", - "Epoch 00047: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.4061 - val_loss: 3.2214\n", - "\n", - "Epoch 00047: val_loss did not improve from 3.17781\n", - "Epoch 48/100\n", - "\n", - "Epoch 00048: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.4013 - val_loss: 3.1010\n", - "\n", - "Epoch 00048: val_loss improved from 3.17781 to 3.10097, saving model to experimento_ssd300_fault_1.h5\n", - "Epoch 49/100\n", - "\n", - "Epoch 00049: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.4092 - val_loss: 3.1462\n", - "\n", - "Epoch 00049: val_loss did not improve from 3.10097\n", - "Epoch 50/100\n", - "\n", - "Epoch 00050: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.3759 - val_loss: 3.1760\n", - "\n", - "Epoch 00050: val_loss did not improve from 3.10097\n", - "Epoch 51/100\n", - "\n", - "Epoch 00051: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.4261 - val_loss: 3.1638\n", - "\n", - "Epoch 00051: val_loss did not improve from 3.10097\n", - "Epoch 52/100\n", - "\n", - "Epoch 00052: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.3900 - val_loss: 3.2724\n", - "\n", - "Epoch 00052: val_loss did not improve from 3.10097\n", - "Epoch 53/100\n", - "\n", - "Epoch 00053: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.3771 - val_loss: 3.1456\n", - "\n", - "Epoch 00053: val_loss did not improve from 3.10097\n", - "Epoch 54/100\n", - "\n", - "Epoch 00054: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.3997 - val_loss: 3.2297\n", - "\n", - "Epoch 00054: val_loss did not improve from 3.10097\n", - "Epoch 55/100\n", - "\n", - "Epoch 00055: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.3632 - val_loss: 3.1960\n", - "\n", - "Epoch 00055: val_loss did not improve from 3.10097\n", - "Epoch 56/100\n", - "\n", - "Epoch 00056: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.3829 - val_loss: 3.1371\n", - "\n", - "Epoch 00056: val_loss did not improve from 3.10097\n", - "Epoch 57/100\n", - "\n", - "Epoch 00057: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.3724 - val_loss: 3.1169\n", - "\n", - "Epoch 00057: val_loss did not improve from 3.10097\n", - "Epoch 58/100\n", - "\n", - "Epoch 00058: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.3625 - val_loss: 3.2694\n", - "\n", - "Epoch 00058: val_loss did not improve from 3.10097\n", - "Epoch 59/100\n", - "\n", - "Epoch 00059: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.3691 - val_loss: 3.1037\n", - "\n", - "Epoch 00059: val_loss did not improve from 3.10097\n", - "Epoch 60/100\n", - "\n", - "Epoch 00060: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.3976 - val_loss: 3.1110\n", - "\n", - "Epoch 00060: val_loss did not improve from 3.10097\n", - "Epoch 61/100\n", - "\n", - "Epoch 00061: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 901ms/step - loss: 3.3747 - val_loss: 3.1192\n", - "\n", - "Epoch 00061: val_loss did not improve from 3.10097\n", - "Epoch 62/100\n", - "\n", - "Epoch 00062: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 452s 903ms/step - loss: 3.3526 - val_loss: 3.1612\n", - "\n", - "Epoch 00062: val_loss did not improve from 3.10097\n", - "Epoch 63/100\n", - "\n", - "Epoch 00063: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 452s 903ms/step - loss: 3.4435 - val_loss: 3.1396\n", - "\n", - "Epoch 00063: val_loss did not improve from 3.10097\n", - "Epoch 64/100\n", - "\n", - "Epoch 00064: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 452s 903ms/step - loss: 3.3811 - val_loss: 3.1575\n", - "\n", - "Epoch 00064: val_loss did not improve from 3.10097\n", - "Epoch 65/100\n", - "\n", - "Epoch 00065: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.3493 - val_loss: 3.2337\n", - "\n", - "Epoch 00065: val_loss did not improve from 3.10097\n", - "Epoch 66/100\n", - "\n", - "Epoch 00066: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 903ms/step - loss: 3.3510 - val_loss: 3.1230\n", - "\n", - "Epoch 00066: val_loss did not improve from 3.10097\n", - "Epoch 67/100\n", - "\n", - "Epoch 00067: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.3569 - val_loss: 3.1419\n", - "\n", - "Epoch 00067: val_loss did not improve from 3.10097\n", - "Epoch 68/100\n", - "\n", - "Epoch 00068: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 901ms/step - loss: 3.3372 - val_loss: 3.0772\n", - "\n", - "Epoch 00068: val_loss improved from 3.10097 to 3.07717, saving model to experimento_ssd300_fault_1.h5\n", - "Epoch 69/100\n", - "\n", - "Epoch 00069: LearningRateScheduler setting learning rate to 0.001.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "500/500 [==============================] - 449s 898ms/step - loss: 3.3732 - val_loss: 3.1180\n", - "\n", - "Epoch 00069: val_loss did not improve from 3.07717\n", - "Epoch 70/100\n", - "\n", - "Epoch 00070: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 901ms/step - loss: 3.3438 - val_loss: 3.1318\n", - "\n", - "Epoch 00070: val_loss did not improve from 3.07717\n", - "Epoch 71/100\n", - "\n", - "Epoch 00071: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.3451 - val_loss: 3.1492\n", - "\n", - "Epoch 00071: val_loss did not improve from 3.07717\n", - "Epoch 72/100\n", - "\n", - "Epoch 00072: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 901ms/step - loss: 3.3055 - val_loss: 3.1092\n", - "\n", - "Epoch 00072: val_loss did not improve from 3.07717\n", - "Epoch 73/100\n", - "\n", - "Epoch 00073: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.3299 - val_loss: 3.2583\n", - "\n", - "Epoch 00073: val_loss did not improve from 3.07717\n", - "Epoch 74/100\n", - "\n", - "Epoch 00074: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.3542 - val_loss: 3.1427\n", - "\n", - "Epoch 00074: val_loss did not improve from 3.07717\n", - "Epoch 75/100\n", - "\n", - "Epoch 00075: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.3353 - val_loss: 3.1750\n", - "\n", - "Epoch 00075: val_loss did not improve from 3.07717\n", - "Epoch 76/100\n", - "\n", - "Epoch 00076: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.3239 - val_loss: 3.1659\n", - "\n", - "Epoch 00076: val_loss did not improve from 3.07717\n", - "Epoch 77/100\n", - "\n", - "Epoch 00077: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.3305 - val_loss: 3.0835\n", - "\n", - "Epoch 00077: val_loss did not improve from 3.07717\n", - "Epoch 78/100\n", - "\n", - "Epoch 00078: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.3211 - val_loss: 3.1030\n", - "\n", - "Epoch 00078: val_loss did not improve from 3.07717\n", - "Epoch 79/100\n", - "\n", - "Epoch 00079: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.3223 - val_loss: 3.1195\n", - "\n", - "Epoch 00079: val_loss did not improve from 3.07717\n", - "Epoch 80/100\n", - "\n", - "Epoch 00080: LearningRateScheduler setting learning rate to 0.001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.3430 - val_loss: 3.2754\n", - "\n", - "Epoch 00080: val_loss did not improve from 3.07717\n", - "Epoch 81/100\n", - "\n", - "Epoch 00081: LearningRateScheduler setting learning rate to 0.0001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.1907 - val_loss: 2.9731\n", - "\n", - "Epoch 00081: val_loss improved from 3.07717 to 2.97306, saving model to experimento_ssd300_fault_1.h5\n", - "Epoch 82/100\n", - "\n", - "Epoch 00082: LearningRateScheduler setting learning rate to 0.0001.\n", - "500/500 [==============================] - 451s 901ms/step - loss: 3.1456 - val_loss: 2.9711\n", - "\n", - "Epoch 00082: val_loss improved from 2.97306 to 2.97114, saving model to experimento_ssd300_fault_1.h5\n", - "Epoch 83/100\n", - "\n", - "Epoch 00083: LearningRateScheduler setting learning rate to 0.0001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.1248 - val_loss: 2.9670\n", - "\n", - "Epoch 00083: val_loss improved from 2.97114 to 2.96699, saving model to experimento_ssd300_fault_1.h5\n", - "Epoch 84/100\n", - "\n", - "Epoch 00084: LearningRateScheduler setting learning rate to 0.0001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.1167 - val_loss: 2.9557\n", - "\n", - "Epoch 00084: val_loss improved from 2.96699 to 2.95567, saving model to experimento_ssd300_fault_1.h5\n", - "Epoch 85/100\n", - "\n", - "Epoch 00085: LearningRateScheduler setting learning rate to 0.0001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.1287 - val_loss: 2.9472\n", - "\n", - "Epoch 00085: val_loss improved from 2.95567 to 2.94721, saving model to experimento_ssd300_fault_1.h5\n", - "Epoch 86/100\n", - "\n", - "Epoch 00086: LearningRateScheduler setting learning rate to 0.0001.\n", - "500/500 [==============================] - 451s 901ms/step - loss: 3.1195 - val_loss: 2.9572\n", - "\n", - "Epoch 00086: val_loss did not improve from 2.94721\n", - "Epoch 87/100\n", - "\n", - "Epoch 00087: LearningRateScheduler setting learning rate to 0.0001.\n", - "500/500 [==============================] - 451s 901ms/step - loss: 3.0942 - val_loss: 2.9739\n", - "\n", - "Epoch 00087: val_loss did not improve from 2.94721\n", - "Epoch 88/100\n", - "\n", - "Epoch 00088: LearningRateScheduler setting learning rate to 0.0001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.0693 - val_loss: 2.9428\n", - "\n", - "Epoch 00088: val_loss improved from 2.94721 to 2.94277, saving model to experimento_ssd300_fault_1.h5\n", - "Epoch 89/100\n", - "\n", - "Epoch 00089: LearningRateScheduler setting learning rate to 0.0001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.0901 - val_loss: 2.9392\n", - "\n", - "Epoch 00089: val_loss improved from 2.94277 to 2.93917, saving model to experimento_ssd300_fault_1.h5\n", - "Epoch 90/100\n", - "\n", - "Epoch 00090: LearningRateScheduler setting learning rate to 0.0001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.0916 - val_loss: 2.9386\n", - "\n", - "Epoch 00090: val_loss improved from 2.93917 to 2.93864, saving model to experimento_ssd300_fault_1.h5\n", - "Epoch 91/100\n", - "\n", - "Epoch 00091: LearningRateScheduler setting learning rate to 0.0001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.0853 - val_loss: 2.9484\n", - "\n", - "Epoch 00091: val_loss did not improve from 2.93864\n", - "Epoch 92/100\n", - "\n", - "Epoch 00092: LearningRateScheduler setting learning rate to 0.0001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.0696 - val_loss: 2.9277\n", - "\n", - "Epoch 00092: val_loss improved from 2.93864 to 2.92770, saving model to experimento_ssd300_fault_1.h5\n", - "Epoch 93/100\n", - "\n", - "Epoch 00093: LearningRateScheduler setting learning rate to 0.0001.\n", - "500/500 [==============================] - 451s 901ms/step - loss: 3.0827 - val_loss: 2.9312\n", - "\n", - "Epoch 00093: val_loss did not improve from 2.92770\n", - "Epoch 94/100\n", - "\n", - "Epoch 00094: LearningRateScheduler setting learning rate to 0.0001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.0772 - val_loss: 2.9390\n", - "\n", - "Epoch 00094: val_loss did not improve from 2.92770\n", - "Epoch 95/100\n", - "\n", - "Epoch 00095: LearningRateScheduler setting learning rate to 0.0001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.0609 - val_loss: 2.9373\n", - "\n", - "Epoch 00095: val_loss did not improve from 2.92770\n", - "Epoch 96/100\n", - "\n", - "Epoch 00096: LearningRateScheduler setting learning rate to 0.0001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.0587 - val_loss: 2.9377\n", - "\n", - "Epoch 00096: val_loss did not improve from 2.92770\n", - "Epoch 97/100\n", - "\n", - "Epoch 00097: LearningRateScheduler setting learning rate to 0.0001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.0529 - val_loss: 2.9221\n", - "\n", - "Epoch 00097: val_loss improved from 2.92770 to 2.92209, saving model to experimento_ssd300_fault_1.h5\n", - "Epoch 98/100\n", - "\n", - "Epoch 00098: LearningRateScheduler setting learning rate to 0.0001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.0698 - val_loss: 2.9095\n", - "\n", - "Epoch 00098: val_loss improved from 2.92209 to 2.90946, saving model to experimento_ssd300_fault_1.h5\n", - "Epoch 99/100\n", - "\n", - "Epoch 00099: LearningRateScheduler setting learning rate to 0.0001.\n", - "500/500 [==============================] - 450s 901ms/step - loss: 3.0672 - val_loss: 2.9138\n", - "\n", - "Epoch 00099: val_loss did not improve from 2.90946\n", - "Epoch 100/100\n", - "\n", - "Epoch 00100: LearningRateScheduler setting learning rate to 0.0001.\n", - "500/500 [==============================] - 451s 902ms/step - loss: 3.0530 - val_loss: 2.9209\n", - "\n", - "Epoch 00100: val_loss did not improve from 2.90946\n" - ] - } - ], + "outputs": [], "source": [ "#ENTRENAMIENTO DE MODELO\n", "#####################################################################\n", @@ -1084,8 +451,8 @@ "\n", "\n", "initial_epoch = 0\n", - "final_epoch = 100 #config['train']['nb_epochs']\n", - "steps_per_epoch = 500\n", + "final_epoch = 500 #config['train']['nb_epochs']\n", + "steps_per_epoch = 100\n", "\n", "history = model.fit_generator(generator=train_generator,\n", " steps_per_epoch=steps_per_epoch,\n", @@ -1119,36 +486,9 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "dict_keys(['val_loss', 'loss', 'lr'])\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAEWCAYAAABrDZDcAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3XmcXFWd9/HPr5au6uo13enO1glJCEmAEBIICKISRYEQBBUEFHAdozOO4ow6wsyojzOPI/MMo+CGoqAoiEsAQUGFsKosMQkBQhLIQkJ31k6n96XW8/xxKqGzd5buInW/79erXqnlLufW7dxvnXPuPdecc4iISHCFCl0AEREpLAWBiEjAKQhERAJOQSAiEnAKAhGRgFMQiIgEnIJAZD/M7Kdm9n8HOO06M3vn4S5HZKgpCEREAk5BICIScAoCOerlm2S+aGYvmFm3md1qZiPM7A9m1mlmC8xsWL/pLzKzl8yszcweN7Pj+30208yW5Of7FRDfbV0XmtnS/LxPmdn0QyzzJ8xstZltN7P7zWx0/n0zs2+Z2VYza89v07T8ZxeY2fJ82TaY2RcO6QsT2Y2CQIrFJcC7gMnAu4E/AP8KDMf/nX8WwMwmA3cBnwPqgAeB35lZiZmVAL8Ffg7UAL/JL5f8vKcAtwGfBGqBHwL3m1nsYApqZu8AvgFcBowC1gO/zH98LvC2/HZUA5cDLfnPbgU+6ZyrAKYBjx7MekX2RUEgxeI7zrktzrkNwJ+BZ51zzznnksC9wMz8dJcDDzjnHnbOpYEbgFLgzcAZQBS40TmXds7NB/7Wbx2fAH7onHvWOZd1zt0OJPPzHYwrgducc0vy5bsOONPMxgNpoAKYCphzboVzblN+vjRwgplVOudanXNLDnK9InulIJBisaXf8969vC7PPx+N/wUOgHMuBzQCY/KfbXC7jsS4vt/zY4DP55uF2sysDRibn+9g7F6GLvyv/jHOuUeB7wLfA7aY2S1mVpmf9BLgAmC9mT1hZmce5HpF9kpBIEGzEX9AB3ybPP5gvgHYBIzJv7fDuH7PG4GvO+eq+z0Szrm7DrMMZfimpg0AzrlvO+dOBU7ENxF9Mf/+35xzFwP1+CasXx/kekX2SkEgQfNrYK6ZnWNmUeDz+Oadp4CngQzwWTOLmNn7gNP7zfsj4FNm9qZ8p26Zmc01s4qDLMMvgI+a2Yx8/8J/4Zuy1pnZafnlR4FuoA/I5vswrjSzqnyTVgeQPYzvQWQnBYEEinPuZeAq4DvANnzH8rudcynnXAp4H/ARoBXfn3BPv3kX4fsJvpv/fHV+2oMtwyPAl4G78bWQY4Er8h9X4gOnFd981ILvxwC4GlhnZh3Ap/LbIXLYTDemEREJNtUIREQCbtCCwMxuy18Us6zfezVm9rCZrcr/O2x/yxARkcE3mDWCnwLn7/betcAjzrnjgEfyr0VEpIAGtY8gf4HM751zOy6RfxmY7ZzbZGajgMedc1MGrQAiInJAkSFe34gdV0nmw6B+XxOa2TxgHkBZWdmpU6dOHaIiiogUh8WLF29zztUdaLqhDoIBc87dAtwCMGvWLLdo0aICl0hE5OhiZusPPNXQnzW0Jd8kRP7frUO8fhER2c1QB8H9wIfzzz8M3DfE6xcRkd0M5umjd+Ev2Z9iZk1m9nHgeuBdZrYKP2Tw9YO1fhERGZhB6yNwzn1gHx+dcySWn06naWpqoq+v70gs7g0rHo/T0NBANBotdFFEpEi9YTuLD6SpqYmKigrGjx/ProNFFg/nHC0tLTQ1NTFhwoRCF0dEitRRO8REX18ftbW1RRsCAGZGbW1t0dd6RKSwjtogAIo6BHYIwjaKSGEd1UEgIiKHT0FwiNra2vj+979/0PNdcMEFtLW1DUKJREQOjYLgEO0rCLLZ/d806sEHH6S6unqwiiUictCO2rOGCu3aa69lzZo1zJgxg2g0Snl5OaNGjWLp0qUsX76c97znPTQ2NtLX18c111zDvHnzABg/fjyLFi2iq6uLOXPm8Ja3vIWnnnqKMWPGcN9991FaWlrgLRORoCmKIPja715i+caOI7rME0ZX8tV3n7jPz6+//nqWLVvG0qVLefzxx5k7dy7Lli3beZrnbbfdRk1NDb29vZx22mlccskl1NbW7rKMVatWcdddd/GjH/2Iyy67jLvvvpurrtLdB0VkaBVFELwRnH766buc6//tb3+be++9F4DGxkZWrVq1RxBMmDCBGTNmAHDqqaeybt26ISuviMgORREE+/vlPlTKysp2Pn/88cdZsGABTz/9NIlEgtmzZ+/1WoBYLLbzeTgcpre3d0jKKiLSnzqLD1FFRQWdnZ17/ay9vZ1hw4aRSCRYuXIlzzzzzBCXTkRk4IqiRlAItbW1nHXWWUybNo3S0lJGjBix87Pzzz+fH/zgB0yfPp0pU6ZwxhlnFLCkIiL7N6i3qjxS9nZjmhUrVnD88ccXqERDK0jbKiJHjpktds7NOtB0ahoSEQk4BYGISMApCEREAk5BICIScAoCEZGAUxCIiAScguAQHeow1AA33ngjPT09R7hEIiKHRkFwiBQEIlIsdGXxIeo/DPW73vUu6uvr+fWvf00ymeS9730vX/va1+ju7uayyy6jqamJbDbLl7/8ZbZs2cLGjRt5+9vfzvDhw3nssccKvSkiEnDFEQR/uBY2v3hklznyJJhz/T4/7j8M9UMPPcT8+fNZuHAhzjkuuuginnzySZqbmxk9ejQPPPAA4Mcgqqqq4pvf/CaPPfYYw4cPP7JlFhE5BGoaOgIeeughHnroIWbOnMkpp5zCypUrWbVqFSeddBILFizgS1/6En/+85+pqqoqdFFFRPZQHDWC/fxyHwrOOa677jo++clP7vHZ4sWLefDBB7nuuus499xz+cpXvlKAEoqI7JtqBIeo/zDU5513HrfddhtdXV0AbNiwga1bt7Jx40YSiQRXXXUVX/jCF1iyZMke84qIFFpx1AgKoP8w1HPmzOGDH/wgZ555JgDl5eXccccdrF69mi9+8YuEQiGi0Sg333wzAPPmzWPOnDmMGjVKncUiUnAahvooEKRtFZEjR8NQi4jIgCgIREQC7qgOgqOhWetwBWEbRaSwjtogiMfjtLS0FPWB0jlHS0sL8Xi80EURkSJ21J411NDQQFNTE83NzYUuyqCKx+M0NDQUuhgiUsSO2iCIRqNMmDCh0MUQETnqHbVNQyIicmQUJAjM7J/M7CUzW2Zmd5mZGsFFRApkyIPAzMYAnwVmOeemAWHgiqEuh4iIeIVqGooApWYWARLAxgKVQ0Qk8IY8CJxzG4AbgNeATUC7c+6h3aczs3lmtsjMFhX7mUEiIoVUiKahYcDFwARgNFBmZlftPp1z7hbn3Czn3Ky6urqhLqaISGAUomnoncCrzrlm51wauAd4cwHKISIiFCYIXgPOMLOEmRlwDrCiAOUQEREK00fwLDAfWAK8mC/DLUNdDhER8QpyZbFz7qvAVwuxbhER2ZWuLBYRCTgFgYhIwCkIREQCTkEgIhJwCgIRkYBTEIiIBJyCQEQk4BQEIiIBpyAQEQk4BYGISMApCEREAk5BICIScAoCEZGAUxCIiAScgkBEJOAUBCIiAacgEBEJOAWBiEjAKQhERAJOQSAiEnAKAhGRgFMQiIgEnIJARCTgFAQiIgGnIBARCTgFgYhIwCkIREQCTkEgIhJwCgIRkYBTEIiIBJyCQEQk4BQEIiIBpyAQEQk4BYGISMAVJAjMrNrM5pvZSjNbYWZnFqIcIiICkQKt9ybgj865S82sBEgUqBwiIoE35EFgZpXA24CPADjnUkBqqMshIiJeIZqGJgLNwE/M7Dkz+7GZle0+kZnNM7NFZraoubl56EspIhIQhQiCCHAKcLNzbibQDVy7+0TOuVucc7Occ7Pq6uqGuowiIoFRiCBoApqcc8/mX8/HB4OIiBTAkAeBc24z0GhmU/JvnQMsH+pyiIiIV6izhj4D3Jk/Y2gt8NEClUNEJPAKEgTOuaXArEKsW0REdqUri0VEAk5BICIScAoCEZGAUxCIiAScgkBEJOAUBCIiAacgEBEJOAWBiEjAKQhERAJOQSAiEnADCgIzu8bMKs271cyWmNm5g104EREZfAOtEXzMOdcBnAvU4QeJu37QSiUiIkNmoEFg+X8vAH7inHu+33siInIUG2gQLDazh/BB8CczqwByg1csEREZKgMdhvrjwAxgrXOux8xq0D0ERESKwkBrBGcCLzvn2szsKuDfgfbBK5aIiAyVgQbBzUCPmZ0M/AuwHvjZoJVKRESGzECDIOOcc8DFwE3OuZuAisErloiIDJWB9hF0mtl1wNXAW80sDEQHr1giIjJUBlojuBxI4q8n2AyMAf5n0EolIiJDZkBBkD/43wlUmdmFQJ9zTn0EIiJFYKBDTFwGLATeD1wGPGtmlw5mwUREZGgMtI/g34DTnHNbAcysDlgAzB+sgomIyNAYaB9BaEcI5LUcxLwiIvIGNtAawR/N7E/AXfnXlwMPDk6RRERkKA0oCJxzXzSzS4Cz8IPN3eKcu3dQSyYiIkNioDUCnHN3A3cPYllERKQA9hsEZtYJuL19BDjnXOWglEpERIbMfoPAOadhJEREipzO/BERCTgFgYhIwCkIREQCTkEgIhJwCgIRkYBTEIiIBFzBgsDMwmb2nJn9vlBlEBGRwtYIrgFWFHD9IiJCgYLAzBqAucCPC7H+AckkYe0TkMsWuiQiIoNqwGMNHWE3Av8C7PPKZTObB8wDGDdu3KGt5bk7Id0DJ10KpcNef79rK3RtgbrjIbyXryDVA7/8IKx9DMaeARd/D4ZPGvh6033Q9DeoHgvDxh9a2QGSXbDyAWhvhGmXQM2EPadxzk+z7G4oq/Prqx4H5fVQWgNlw6G0+tDLICJFz5zb21BCg7hCf6vLC5xz/2Bms4EvOOcu3N88s2bNcosWLTr4lf3ySlj5e4jE4fh3Q81EWPUwbFziP48mYMypMP6tMP39/vNkJ/zicnjtaTh9Hjx/l68dvP1f4aT3Q+VoP2+yC175I6x+BEIhSNRCrAKaFsOrT/gAAmg43QcR+GW+9qyfdsYH4KTLoLxu1zL3dcCrT8Ly+3zZdywHYOLbYfplUDHKB1vnJnj8eti0FMrqId0Lqc49v4cZV8F5X98zEHq2w+YXYPOLkOqGUMQ/psyBuikH/32LyBuKmS12zs064HQFCIJvAFcDGSAOVAL3OOeu2tc8hxoEv17USHjz85zV+QdGrLsfkp1kRp3ClhFn00g94Y2LqW9dyrj0akI4OupPpzycIrT5RbjkR/5XeOdm+N3n4JU/+IWWj4TaY2HDYsj0+YN6OAY92yCbgupjYPJ5/qDdvNL/Ut+yzM9b2QDj3gSt6/z8oQiMPMn/ci8d5tfV+AzkMhCvghPfC9Ov8L/wn7sDlvwMOpp23cjqcXD2l/x0oTD0tvrl92yHnhbY+Bws/CGUj4C5/wuhKKx+GFYvgO1r9/7FTXgbfPh3B/19i8gbyxs2CHZZ+SDXCD7580X86aUtAMQtRVU0x5ZUfOfn8WiISfXljLLtTN38AO8LPc5oa+Gfs5/lsdCbKI2GObaunMkjynhTbD0jO5dR276Myq5X2VgxjaUVs3kpPJXSWIzq0gg1sSy5UJx0DlLZHG09KVq6UkTa1tKVCbElVE825zCD8dnXmN33CCeEGhmX6COWavc1iknn0D7mbFaEp5IiQiaXw8yoK49RXx6htnc94b5W6GvzGzHpXRAp2e/30PvqQkL3f5pY6yv+jUipP9gfcyaMOhlGnuyDKJeGBV+DhbfAl9ZBrPygv3MReeNQEORt707xfFMbzze20dGbYfzwBMfUljGhtoyGYaWEQgZAZ1+aZ9a08PJrG+kiQSabozuVYfXWLlZu7qSzL7PHsuPREFWlUXpS2b1+HouEGF4eY3h5CaUlYcIhI2R+fdmcI53NsbSxjZyDi04ezQmjKnl4+RYWrd9Obj+7JRIyouEQ5fEI08dUcer4YZwwqpL23jSb2/vY1N7HxrZeNrb3srGtj+3dKUpIc1H4Kba4YWyonMlpk0YztqaUsliEslgE5xx96Ry1W5/mwqWfgivugqkXHNJ3LiJvDEdFEAzU4QTBkeCcY3t3imy/o3NlaZR4NLzzdSabo6MvgwHRSIhIyIhFQlj+wL8vm9v7+PGf13Lns6/Rm84ydWQF5544ktPH1xCPhoiEQ2RzOZo7k2ztTLK9O0U6myOVybG9O81zja2sbe7eZZnlsQijq+OMri5lVFUpY2tKGTsswYjKOCs2dfDX1dt4Zm0LHXsJrxLSPBebx8LKc2m46maOG6GRyEWOVgqCo0x7T5quVIYx1aUHPe/27hSrtnRSU1bCyKo4FfHogOZLZrL0JLN0JTOEQkY8EiIcMlpvvZTotuW8LXUjl546lr+fPYkJw8sOulwiUlgDDYJCnT4qu6lKRKlKDOwAvruashLeNLH2oOeLRcLEImGGle3ax1B9xkXwwBP88ykhvrN0I/MXNzF3+mj+8e2TmDJSNQSRYqOxhmRPk94JwD+OXc9fvvQOPvG2iTy6Ygvn3/QkX/jN82xs6905aSqTo7Mvvd/FdfaleWrNNrqTezZFiUjhqWlI9u47s/ypqVffA0Brd4rvP76a259ajxmcPqGGxu09NLb2ks05GoaVcsKoSqaOqmR8bYJjahPkHPxmUSO/e34TvekssUiIsyfXccFJozjvxJGUloQPUAgRORzqI5DD88frYNFt/jTS6Ov9Fk2tPXzr4VUs39TBhOEJJg4vp7QkzIpNHSzf1MGr27rp/yeVKAlz0cmjmT2ljmfWbucPyzaxpSNJZTzCJac2cPlpYykridDWk6a9N03OOcIhIxwyxtYkGF0V39nhnsrkWNfSTTKdo7QkRCwSprUnxeqtXaxp7qK+Is77ZzWQKFGLpwgoCORwrV4Ad1wCV86H49414NmSmSxNrb2sb+mmJ5Vl9pR6ymOvH5hzOcfCddu589nX+OOyTaSz+//7q4hHmDKigo6+NGubu8ns47zacMjI5hw1ZSV8/C0TmDNtJF3JDG09aVp7UrR2p9jekyadzTG6Ks6YYaXUlcfJ5HKks45szpEoCVMWi1AeizCsLEoscuAaS1cyw4LlW6hORDltfA1lscKH0AtNbfzwibUcP6qCD5w+jtryWKGLJAWiIJDDk+6D/x7vr3we/xYoKfOPaOL157FKiFdCVcOuYzkN0LauJAuWbyEcMqoTJVTGIzsP6OmsY11LNys3d/DK5i4qSyNMHlHB5BEVlMci9Kaz9KayVJZGmFRfzriaMl5oauO7j63m8Zeb97o+Mwib7TNMdlcei1BVGiWVzdGTzNCXyTF1ZAVnTKxlekMVf129jd+/sImelB+YMBIyTh5bzeQRFYysjDOiMkZLd4oXm9pZtrGdkkiIs44dzlmThjOqKk5Tay+NrT30pLLUVcSor4hRW+avOSmNhomEQvSkM3Qns7R0JVm5uZPlGztoauuhoTrBsfVlTKovZ1JdBcfWl5FzcMOfXuZnT6+jrCRCZzJDSSTExSeP5vxpI5k5bhg1Zfu/+HB3jdt7aO9Nc2xduZryjkIKAjl8D34RXviVH1fJ7WcU1mgC3vk1OO3v/LhL+5Puhe5tfkC+QbJsQzsvb+6kOhGlOhGlKh6hpjxOVWkUA5q7kjS19tLSlSQaCVESDmEGvaks3aksnX1pWrtTtHSnaO9JE4uGKctfEPhCUztLXmslmcntbPa69NQGetNZnl7TwjNrW1jf0kNLd2pneY6pTTBtTBU9yQzPvrp9Z3AcLDMYX1vG2JoETa09rG/p2eXaltJomL5Mlg+dcQyfP28KWzv6+OlT67h78QZ6036d42sTVMSj9KWz9KazJDP+mpRUJkd1IuqDpb6c7mSGp9a00NTau3Pd42oSjKkuJWSGGVTGo7x5Ui1vO66OsTUJ2nvTrGnuYvWWLlZs7mDFpg4at/cyprqUY+vLmDC8jJqyGBVxX+vqSWVp60nR0ZehIh6hviJGfUWcSNhIZ31NbUtHH6+19NDY2sPZk+s45/gRh/6HEUAKAjlynPPjKKW6/SB4qR4/uF1fB/S1+zGQ1jziB++78Ft+BNRwFLJp2LgU1v/Vj8a6dbkfB8nl/JlJ534d6qfuub7OLfC3H8PwyX68pb2NEDsQ2TTc/xm/7it+ccQG0ktmsry8uZOJdeW7NHsBviYViZHKOrZ29lER2/W04FTGX03e1pOiYViCsTWlxKNhWrpSbO30V4H3pXP0pbOkM1nK4lESJWGqEyUcV1++S9PTjj6TNVu7WL21i80dfVx6agMzx+1aO+tNZXmhqY3nGttY+lobyUyW0pIw8UiYWDRMLBIiGjZaulKsbvbLioZDnDGxhjMn1lJXEWfV1k5WbfHr2HHM2Nzex8b2PgAq45FdLlBMlISZPKKCcTUJNrX3sqa5m+39wvFgVYQzpHJw4wdPZ85Jow55OUGjIJCh4xw893P447++PvpppNQf8LNJ/7p2EoyYBnVT/eB4T30XUl0w62Nw4nt8E1Q0AQt/BI9/A5Idfr6qcXDmP/jmqVilH4+ptxVaVkPLGl+zmDxnz7DIJGH+x/wIrrFKsBB88Fcw7ozB+x5WPQz3fMJvy+V3+IEDd0h2+TBN1Bx4Oc7Bgv/ja2Mfuh/qJh+5Mm5bBRUj/fe4z9X7Y8KBrop3zrGmuZsnX2lm1dYujqlNMIsVnPDqT4if/x+ERp64y/Tt+RMCOvrSdCUzlJVEqE5EqYhH6OzLsLWzj60dSbLOEQ37cKotizEh3Ez5Ly5kYXoiV3d9hh99aBazp9Qf/ncRAAoCGXrtTbDi9/4gnuzwB7SG0+CYs/Ycbrt7Gzz2X7D4Jz4wAOLVfjC9Y8+B86+H7Wvgrzf54bv3p2osnP4JHwglZX5U1/s+7UdZnfM/MPlc3/Hd1gjnfAVw/nmyw48kWzfV93Gs+6u/B8X2tb62Mv39+16nc769BPzNix7/Bjz5Pz7wWtf72seV8/09Ihb/BB79T197Gj4Zxp3pO+Ann+9rTrt78gY/fSji+1/+7hF/X4nD0bIGHvkPWP5bGDbBB9XIaYe3zN01LYKfXewDvqQc3ncLTJ17eMvsaobbzvN/C8A/VHyHR9vquOmKmbzz+BGEQ/sPq6BTEMjRoXubHyp701LYttrfN2Lq3NcPsgCbXvBNSn3t/uAdr/YH3JqJ0PgsPPsDWPfn3RZs8O6b4NQP59fTAnddAU0L/euSCj+6auemXecZPcM/3fgczL7OD/Ft5mshqx/xzUwbFvsyhUv8AdpC/kA14yqYe4MPrl9d7YcXj1fBlhdhwtkw8Wx47Rl/T4pku7+vxKkf8fe5GDbe15T+dis88M8w/XLf53L7u2HUDPjQfRCNs1cdG/29MdY/5YNnx3fT1wZtr8GWl+DF+f6+HLM+6p/3tcNF3/br7mnx32/l6Nfvt7E3uRx0b/Uh2rbeD5c+/q1QNQY2L4OfzvX3vLj0NnjgC/6+H2/+jJ+3caG/70W4xE+TqIWZV8MpH/LbvTfJTr/9W1fCZbfD/I+RPPZc5m74CKu3djG8PMacaSOZPaWOY+vKaRhWSiS89z4q59wBazjFSEEgwbLlJf9I9/rHiBP8UNv9ZdP+l3HFCB8mZr7JZtsr0N3say+JGsik4HfXwPO/8LWMbNLfLCiX8c1Xo2f6g7PL+fl6W/29K2Ze+fq6Ni6FO9/vD3znfR1OuHjXGsSqh3w/yOoF/r1wiQ+Dbav8/Swuv8PXFl66F37zETjuXBh9iv+1nez0gdjX4YNs63K/jPKR/v3+NzMCX9uZdokPtfJ63wcz/6O+76ak3C8TfA1k2iXw5s/6iwlffcKXc9ML/q5+3Vv9d7C7+hP855EYfPQPMOwYvw9+d41v3gqX+O9s9Ez/nfW2wbaXYdPzMHK6r/1Vj4Xtr/pAam/0tcsNS3wT4BW/gCnnw5/+DZ65mb6/X8SCzXEeeGETj67cSjLja5TRsDGyKk5ZSYTSkjAGbOtK0dKVpCedZViihJqyEurKY0ys82dcHVdfwaT6ckZUxnYGRV86y+b2PnrTWTJZRzrnTwyojEepLI1SVhI+akJFQSByOJyDv3wTHvlP/+v6hItg6oU+AAbaeZ3s8gfzyH7O429Z42szLWt8k1SiBub8v10u4uOvN/k+A5eDaJmvyew4dbd0mP9VPvl83xzlHHRu9AfV0mH+gB6v3HO92TQ8/V1/M6RhE/yBeN1fYPHtkM7frS6X8etpmOVrL+UjfI2hepxvjnNZWPOYb4Lr3OwP2MOP2/U73FHT2P07cA5eugce+jJ0bNj1Mwvnaydj4IxP+RMGwNd8bpzua1FzbwCgO5lhxaYO1jZ3s3ZbN5vbe+lJ+TOics5RWxZjeHmMRIm/+HB7d4rNHX2s2dq1S+d2RSxCQ02Cli4/yu/+lERC1JXHqKuI7SxDdzJDzvnrWSJhozIeZWRVnFFVcaoTJcTyZ6fFoqF8J32IdNaxobWXplZ/ltmOY3EkHGJkZZxR1XHGVJcye0o9VaWHNg6ZgkDkSMifBUShfwGme/0v6301oxwpva3+LLDeNn9m19jT996PcaSkumHpL3zw1EzwoVQ5Zt9he9+nfdPW55bt2e90EJxzNHclWb2la+eZUo3bexheHvNXtFeXUlYSJhoOEQ4bvaksHb2+s3t7d4rmziTNXT4wyvP39NhxjUoml9t5b5CNbb17He69vxGVPqx29Hck0zk2d/TR3uvH8Hr082czse7QbhKlIBCR4tP8CnzvdH8W2Y4z0EIR3/8RifmTBRK1kBjua1fxKl+riZT4fpHeVt8v1d7km6B6W2HEif7e4vXH7z9onfPNbtHEQf0wcM5fIJnK5kims/RlcvSmsoRDxqiq+C73NemvO5lhU3svx9SWEd1H38eBaBhqESk+dZN9J/ry3/o+oVzWn5abTb5+9tlAWcg3te085Tnu+0zCJb4WZKHXD/jJTh8auYyfp2Yi1E70oZBN+dOV0z35/ptO3/QWCoOFMAtR4hwlQHk07ms8lWP8abzl9VBW75vvdi6nF1JdlCW7mJTq9KdYH8KV+wdDQSDuyIFQAAAIcElEQVQiR5e5N+zsI9hFNuM7y3u2+zOherf7DvVkB2T6/AkCpcN8jaGqwfd7hMLQ+qo/9XXT8/4gnE36A7nLsXMExViFP9spVulPEGhZ7YMom8oHR0l+2JUKv9xw1M+fy/p/zQDzHfMta/zJBzuulTmQKRcoCEREBiQc8c1BiRpg0sDnq5noH9MvG7Si7VWqO382VrMPhUi8XxNXuX/Eyv17g0xBICJSCCVlvoO8ZkKhS6I7lImIBJ2CQEQk4BQEIiIBpyAQEQk4BYGISMApCEREAk5BICIScAoCEZGAUxCIiAScgkBEJOAUBCIiAacgEBEJOAWBiEjAKQhERAJuyIPAzMaa2WNmtsLMXjKza4a6DCIi8rpC3I8gA3zeObfEzCqAxWb2sHNueQHKIiISeENeI3DObXLOLck/7wRWAGOGuhwiIuIVtI/AzMYDM4Fn9/LZPDNbZGaLmpubh7poIiKBUbAgMLNy4G7gc865Pe7i7Jy7xTk3yzk3q66ubugLKCISEAUJAjOL4kPgTufcPYUog4iIeIU4a8iAW4EVzrlvDvX6RURkV4WoEZwFXA28w8yW5h8XFKAcIiJCAU4fdc79BbChXq+IiOydriwWEQk4BYGISMApCEREAk5BICIScAoCEZGAUxCIiAScgkBEJOAUBCIiAacgEBEJOAWBiEjAKQhERAJOQSAiEnAKAhGRgFMQiIgEnIJARCTgFAQiIgGnIBARCTgFgYhIwCkIREQCTkEgIhJwCgIRkYBTEIiIBJyCQEQk4BQEIiIBpyAQEQk4BYGISMApCEREAk5BICIScAoCEZGAUxCIiAScgkBEJOAUBCIiAacgEBEJOAWBiEjAKQhERAKuIEFgZueb2ctmttrMri1EGURExBvyIDCzMPA9YA5wAvABMzthqMshIiJeIWoEpwOrnXNrnXMp4JfAxQUoh4iIAJECrHMM0NjvdRPwpt0nMrN5wLz8yy4ze/kQ1zcc2HaI8x7NgrjdQdxmCOZ2a5sH5piBTFSIILC9vOf2eMO5W4BbDntlZoucc7MOdzlHmyBudxC3GYK53drmI6sQTUNNwNh+rxuAjQUoh4iIUJgg+BtwnJlNMLMS4Arg/gKUQ0REKEDTkHMuY2b/CPwJCAO3OedeGsRVHnbz0lEqiNsdxG2GYG63tvkIMuf2aJ4XEZEA0ZXFIiIBpyAQEQm4og6CIAxlYWZjzewxM1thZi+Z2TX592vM7GEzW5X/d1ihy3qkmVnYzJ4zs9/nX08ws2fz2/yr/MkIRcXMqs1svpmtzO/zM4t9X5vZP+X/tpeZ2V1mFi/GfW1mt5nZVjNb1u+9ve5b876dP7a9YGanHM66izYIAjSURQb4vHPueOAM4NP57bwWeMQ5dxzwSP51sbkGWNHv9X8D38pvcyvw8YKUanDdBPzROTcVOBm//UW7r81sDPBZYJZzbhr+BJMrKM59/VPg/N3e29e+nQMcl3/MA24+nBUXbRAQkKEsnHObnHNL8s878QeGMfhtvT0/2e3AewpTwsFhZg3AXODH+dcGvAOYn5+kGLe5EngbcCuAcy7lnGujyPc1/uzGUjOLAAlgE0W4r51zTwLbd3t7X/v2YuBnznsGqDazUYe67mIOgr0NZTGmQGUZEmY2HpgJPAuMcM5tAh8WQH3hSjYobgT+BcjlX9cCbc65TP51Me7viUAz8JN8k9iPzayMIt7XzrkNwA3Aa/gAaAcWU/z7eod97dsjenwr5iAY0FAWxcLMyoG7gc855zoKXZ7BZGYXAludc4v7v72XSYttf0eAU4CbnXMzgW6KqBlob/Jt4hcDE4DRQBm+WWR3xbavD+SI/r0XcxAEZigLM4viQ+BO59w9+be37Kgq5v/dWqjyDYKzgIvMbB2+ye8d+BpCdb75AIpzfzcBTc65Z/Ov5+ODoZj39TuBV51zzc65NHAP8GaKf1/vsK99e0SPb8UcBIEYyiLfNn4rsMI5981+H90PfDj//MPAfUNdtsHinLvOOdfgnBuP36+POueuBB4DLs1PVlTbDOCc2ww0mtmU/FvnAMsp4n2NbxI6w8wS+b/1Hdtc1Pu6n33t2/uBD+XPHjoDaN/RhHRInHNF+wAuAF4B1gD/VujyDNI2vgVfJXwBWJp/XIBvM38EWJX/t6bQZR2k7Z8N/D7/fCKwEFgN/AaIFbp8g7C9M4BF+f39W2BYse9r4GvASmAZ8HMgVoz7GrgL3w+Sxv/i//i+9i2+aeh7+WPbi/izqg553RpiQkQk4Iq5aUhERAZAQSAiEnAKAhGRgFMQiIgEnIJARCTgFAQig8zMZu8YIVXkjUhBICIScAoCkTwzu8rMFprZUjP7Yf5+B11m9r9mtsTMHjGzuvy0M8zsmfxY8Pf2Gyd+kpktMLPn8/Mcm198eb/7CNyZv0pW5A1BQSACmNnxwOXAWc65GUAWuBI/yNkS59wpwBPAV/Oz/Az4knNuOv7Kzh3v3wl8zzl3Mn5MnB2X/c8EPoe/N8ZE/HhJIm8IkQNPIhII5wCnAn/L/1gvxQ/wlQN+lZ/mDuAeM6sCqp1zT+Tfvx34jZlVAGOcc/cCOOf6APLLW+ica8q/XgqMB/4y+JslcmAKAhHPgNudc9ft8qbZl3ebbn9jsuyvuSfZ73kW/d+TNxA1DYl4jwCXmlk97LxX7DH4/yM7Rrn8IPAX51w70Gpmb82/fzXwhPP3gWgys/fklxEzs8SQboXIIdCvEhHAObfczP4deMjMQvgRID+Nv/nLiWa2GH93rMvzs3wY+EH+QL8W+Gj+/auBH5rZf+SX8f4h3AyRQ6LRR0X2w8y6nHPlhS6HyGBS05CISMCpRiAiEnCqEYiIBJyCQEQk4BQEIiIBpyAQEQk4BYGISMD9f1XwMI4bD5NVAAAAAElFTkSuQmCC\n", - "text/plain": [ - "
" - ] - }, - "metadata": { - "needs_background": "light" - }, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "experimento_ssd300_fault_1.h5\n" - ] - } - ], + "outputs": [], "source": [ "#Graficar aprendizaje\n", "\n", @@ -1180,21 +520,9 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "ename": "NameError", - "evalue": "name 'json' is not defined", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig_path\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mconfig_buffer\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 5\u001b[0;31m \u001b[0mconfig\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mjson\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mloads\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig_buffer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 6\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mNameError\u001b[0m: name 'json' is not defined" - ] - } - ], + "outputs": [], "source": [ "\n", "config_path = 'config_300_fault_1.json'\n", @@ -1311,20 +639,9 @@ }, { "cell_type": "code", - "execution_count": 37, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "24" - ] - }, - "execution_count": 37, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "ceil(val_dataset_size/batch_size)" ] @@ -1339,19 +656,9 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Training on: \t{'1': 1}\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "from imageio import imread\n", "from keras.preprocessing import image\n", @@ -1412,19 +719,9 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Tiempo Total: 27.768\n", - "Tiempo promedio por imagen: 1.111\n", - "OK\n" - ] - } - ], + "outputs": [], "source": [ "image_paths = []\n", "for inp in input_path:\n", @@ -1505,18 +802,9 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "panel : 69\n", - "cell : 423\n" - ] - } - ], + "outputs": [], "source": [ "\n", "# Summary instance training\n", @@ -1530,17 +818,9 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1 : 6030\n" - ] - } - ], + "outputs": [], "source": [ "for i in summary_category_training.keys():\n", " print(i, ': {:.0f}'.format(summary_category_training[i]))" diff --git a/Panel_Detector_Fault_1.ipynb b/Panel_Detector_Fault_1.ipynb index 5f5786d..ede2053 100644 --- a/Panel_Detector_Fault_1.ipynb +++ b/Panel_Detector_Fault_1.ipynb @@ -228,9 +228,9 @@ "\n", "\n", "def lr_schedule(epoch):\n", - " if epoch < 80:\n", + " if epoch < 100:\n", " return 0.001\n", - " elif epoch < 100:\n", + " elif epoch < 150:\n", " return 0.0001\n", " else:\n", " return 0.00001\n", @@ -266,7 +266,7 @@ "n_classes = len(labels) # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO\n", "scales_pascal = [0.01, 0.05, 0.1, 0.2, 0.37, 0.54, 0.71] # The anchor box scaling factors used in the original SSD300 for the Pascal VOC datasets\n", "#scales_coco = [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05] # The anchor box scaling factors used in the original SSD300 for the MS COCO datasets\n", - "scales = scales_pascal\n", + "scales = scales_pascal #[0.01, 0.05, 0.1, 0.2 ,0.3, 0.37, 0.54]\n", "aspect_ratios = [[1.0, 2.0, 0.5],\n", " [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n", " [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n", @@ -289,13 +289,7 @@ "# optimizer, otherwise I'd recommend the commented-out Adam optimizer.\n", "\n", "\n", - "if config['model']['backend'] == 'ssd7':\n", - " #weights_path = 'VGG_ILSVRC_16_layers_fc_reduced.h5'\n", - " scales = [0.08, 0.16, 0.32, 0.64, 0.96] # An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`.\n", - " aspect_ratios = [0.5 ,1.0, 2.0] # The list of aspect ratios for the anchor boxes\n", - " two_boxes_for_ar1 = True # Whether or not you want to generate two anchor boxes for aspect ratio 1\n", - " steps = None # In case you'd like to set the step sizes for the anchor box grids manually; not recommended\n", - " offsets = None\n", + "\n", "\n", "if os.path.exists(model_path):\n", " print(\"\\nLoading pretrained weights.\\n\")\n", @@ -336,7 +330,7 @@ " elif config['model']['backend'] == 'ssd7':\n", " #weights_path = 'VGG_ILSVRC_16_layers_fc_reduced.h5'\n", " from models.keras_ssd7 import build_model as ssd\n", - " scales = [0.08, 0.16, 0.32, 0.64, 0.96] # An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`.\n", + " scales = [0.01, 0.08, 0.16, 0.32, 0.64] # An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`.\n", " aspect_ratios = [0.5 ,1.0, 2.0] # The list of aspect ratios for the anchor boxes\n", " two_boxes_for_ar1 = True # Whether or not you want to generate two anchor boxes for aspect ratio 1\n", " steps = None # In case you'd like to set the step sizes for the anchor box grids manually; not recommended\n", @@ -396,91 +390,1869 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Processing image set 'train.txt': 100%|██████████| 33/33 [00:00<00:00, 101.41it/s]\n", - "Processing image set 'test.txt': 100%|██████████| 2/2 [00:00<00:00, 61.30it/s]\n", + "Processing image set 'train.txt': 100%|██████████| 33/33 [00:00<00:00, 110.18it/s]\n", + "Processing image set 'test.txt': 100%|██████████| 2/2 [00:00<00:00, 68.64it/s]\n", "1 : 444\n", "Number of images in the training dataset:\t 33\n", "Number of images in the validation dataset:\t 2\n", "WARNING:tensorflow:From /home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/math_grad.py:102: div (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", "Instructions for updating:\n", "Deprecated in favor of operator or tf.math.divide.\n", - "Epoch 1/500\n", + "Epoch 1/300\n", "\n", "Epoch 00001: LearningRateScheduler setting learning rate to 0.001.\n", - "100/100 [==============================] - 25s 246ms/step - loss: 11.5508 - val_loss: 6.3620\n", + "100/100 [==============================] - 25s 254ms/step - loss: 10.6549 - val_loss: 7.1380\n", "\n", - "Epoch 00001: val_loss improved from inf to 6.36203, saving model to experimento_ssd7_fault_1.h5\n", - "Epoch 2/500\n", + "Epoch 00001: val_loss improved from inf to 7.13801, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 2/300\n", "\n", "Epoch 00002: LearningRateScheduler setting learning rate to 0.001.\n", - "100/100 [==============================] - 22s 225ms/step - loss: 7.4845 - val_loss: 12.4694\n", + "100/100 [==============================] - 22s 224ms/step - loss: 7.9023 - val_loss: 8.1389\n", "\n", - "Epoch 00002: val_loss did not improve from 6.36203\n", - "Epoch 3/500\n", + "Epoch 00002: val_loss did not improve from 7.13801\n", + "Epoch 3/300\n", "\n", "Epoch 00003: LearningRateScheduler setting learning rate to 0.001.\n", - "100/100 [==============================] - 24s 237ms/step - loss: 7.0083 - val_loss: 5.9608\n", + "100/100 [==============================] - 23s 235ms/step - loss: 6.9971 - val_loss: 6.5428\n", "\n", - "Epoch 00003: val_loss improved from 6.36203 to 5.96082, saving model to experimento_ssd7_fault_1.h5\n", - "Epoch 4/500\n", + "Epoch 00003: val_loss improved from 7.13801 to 6.54282, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 4/300\n", "\n", "Epoch 00004: LearningRateScheduler setting learning rate to 0.001.\n", - "100/100 [==============================] - 23s 232ms/step - loss: 6.3241 - val_loss: 7.0951\n", + "100/100 [==============================] - 23s 230ms/step - loss: 6.9129 - val_loss: 5.2096\n", "\n", - "Epoch 00004: val_loss did not improve from 5.96082\n", - "Epoch 5/500\n", + "Epoch 00004: val_loss improved from 6.54282 to 5.20957, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 5/300\n", "\n", "Epoch 00005: LearningRateScheduler setting learning rate to 0.001.\n", - "100/100 [==============================] - 24s 239ms/step - loss: 5.9832 - val_loss: 5.5583\n", + "100/100 [==============================] - 23s 227ms/step - loss: 6.6221 - val_loss: 5.7554\n", "\n", - "Epoch 00005: val_loss improved from 5.96082 to 5.55828, saving model to experimento_ssd7_fault_1.h5\n", - "Epoch 6/500\n", + "Epoch 00005: val_loss did not improve from 5.20957\n", + "Epoch 6/300\n", "\n", "Epoch 00006: LearningRateScheduler setting learning rate to 0.001.\n", - "100/100 [==============================] - 25s 248ms/step - loss: 6.0359 - val_loss: 10.5573\n", + "100/100 [==============================] - 23s 227ms/step - loss: 6.2810 - val_loss: 7.4999\n", "\n", - "Epoch 00006: val_loss did not improve from 5.55828\n", - "Epoch 7/500\n", + "Epoch 00006: val_loss did not improve from 5.20957\n", + "Epoch 7/300\n", "\n", "Epoch 00007: LearningRateScheduler setting learning rate to 0.001.\n", - "100/100 [==============================] - 23s 232ms/step - loss: 5.9338 - val_loss: 12.5439\n", + "100/100 [==============================] - 24s 236ms/step - loss: 6.2034 - val_loss: 8.1129\n", "\n", - "Epoch 00007: val_loss did not improve from 5.55828\n", - "Epoch 8/500\n", + "Epoch 00007: val_loss did not improve from 5.20957\n", + "Epoch 8/300\n", "\n", "Epoch 00008: LearningRateScheduler setting learning rate to 0.001.\n", - "100/100 [==============================] - 23s 228ms/step - loss: 6.3084 - val_loss: 8.1511\n", + "100/100 [==============================] - 24s 239ms/step - loss: 5.8430 - val_loss: 5.0259\n", "\n", - "Epoch 00008: val_loss did not improve from 5.55828\n", - "Epoch 9/500\n", + "Epoch 00008: val_loss improved from 5.20957 to 5.02593, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 9/300\n", "\n", "Epoch 00009: LearningRateScheduler setting learning rate to 0.001.\n", - "100/100 [==============================] - 22s 222ms/step - loss: 5.8168 - val_loss: 10.5703\n", + "100/100 [==============================] - 24s 241ms/step - loss: 5.8222 - val_loss: 4.7504\n", "\n", - "Epoch 00009: val_loss did not improve from 5.55828\n", - "Epoch 10/500\n", + "Epoch 00009: val_loss improved from 5.02593 to 4.75040, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 10/300\n", "\n", "Epoch 00010: LearningRateScheduler setting learning rate to 0.001.\n", - "100/100 [==============================] - 23s 235ms/step - loss: 5.4740 - val_loss: 5.8349\n", + "100/100 [==============================] - 24s 236ms/step - loss: 5.7965 - val_loss: 5.2418\n", "\n", - "Epoch 00010: val_loss did not improve from 5.55828\n", - "Epoch 11/500\n", + "Epoch 00010: val_loss did not improve from 4.75040\n", + "Epoch 11/300\n", "\n", "Epoch 00011: LearningRateScheduler setting learning rate to 0.001.\n", - "100/100 [==============================] - 23s 227ms/step - loss: 5.4750 - val_loss: 4.4782\n", + "100/100 [==============================] - 24s 238ms/step - loss: 5.7147 - val_loss: 5.0379\n", "\n", - "Epoch 00011: val_loss improved from 5.55828 to 4.47816, saving model to experimento_ssd7_fault_1.h5\n", - "Epoch 12/500\n", + "Epoch 00011: val_loss did not improve from 4.75040\n", + "Epoch 12/300\n", "\n", "Epoch 00012: LearningRateScheduler setting learning rate to 0.001.\n", - " 88/100 [=========================>....] - ETA: 2s - loss: 5.5271" + "100/100 [==============================] - 24s 239ms/step - loss: 6.0124 - val_loss: 6.9272\n", + "\n", + "Epoch 00012: val_loss did not improve from 4.75040\n", + "Epoch 13/300\n", + "\n", + "Epoch 00013: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 235ms/step - loss: 5.8754 - val_loss: 6.6574\n", + "\n", + "Epoch 00013: val_loss did not improve from 4.75040\n", + "Epoch 14/300\n", + "\n", + "Epoch 00014: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 24s 241ms/step - loss: 5.9797 - val_loss: 4.7180\n", + "\n", + "Epoch 00014: val_loss improved from 4.75040 to 4.71805, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 15/300\n", + "\n", + "Epoch 00015: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 24s 240ms/step - loss: 5.4824 - val_loss: 4.7904\n", + "\n", + "Epoch 00015: val_loss did not improve from 4.71805\n", + "Epoch 16/300\n", + "\n", + "Epoch 00016: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 24s 238ms/step - loss: 5.8061 - val_loss: 6.7183\n", + "\n", + "Epoch 00016: val_loss did not improve from 4.71805\n", + "Epoch 17/300\n", + "\n", + "Epoch 00017: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 24s 239ms/step - loss: 5.8219 - val_loss: 5.2346\n", + "\n", + "Epoch 00017: val_loss did not improve from 4.71805\n", + "Epoch 18/300\n", + "\n", + "Epoch 00018: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 24s 241ms/step - loss: 5.3119 - val_loss: 4.6287\n", + "\n", + "Epoch 00018: val_loss improved from 4.71805 to 4.62874, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 19/300\n", + "\n", + "Epoch 00019: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 24s 240ms/step - loss: 5.5968 - val_loss: 7.2247\n", + "\n", + "Epoch 00019: val_loss did not improve from 4.62874\n", + "Epoch 20/300\n", + "\n", + "Epoch 00020: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 24s 237ms/step - loss: 5.1902 - val_loss: 4.9278\n", + "\n", + "Epoch 00020: val_loss did not improve from 4.62874\n", + "Epoch 21/300\n", + "\n", + "Epoch 00021: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 233ms/step - loss: 5.4595 - val_loss: 4.8812\n", + "\n", + "Epoch 00021: val_loss did not improve from 4.62874\n", + "Epoch 22/300\n", + "\n", + "Epoch 00022: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 5.3020 - val_loss: 5.8746\n", + "\n", + "Epoch 00022: val_loss did not improve from 4.62874\n", + "Epoch 23/300\n", + "\n", + "Epoch 00023: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 228ms/step - loss: 5.1747 - val_loss: 6.2522\n", + "\n", + "Epoch 00023: val_loss did not improve from 4.62874\n", + "Epoch 24/300\n", + "\n", + "Epoch 00024: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 228ms/step - loss: 5.2842 - val_loss: 7.1665\n", + "\n", + "Epoch 00024: val_loss did not improve from 4.62874\n", + "Epoch 25/300\n", + "\n", + "Epoch 00025: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 5.2352 - val_loss: 6.2755\n", + "\n", + "Epoch 00025: val_loss did not improve from 4.62874\n", + "Epoch 26/300\n", + "\n", + "Epoch 00026: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 5.3286 - val_loss: 6.8056\n", + "\n", + "Epoch 00026: val_loss did not improve from 4.62874\n", + "Epoch 27/300\n", + "\n", + "Epoch 00027: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 5.1623 - val_loss: 5.1693\n", + "\n", + "Epoch 00027: val_loss did not improve from 4.62874\n", + "Epoch 28/300\n", + "\n", + "Epoch 00028: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 5.1827 - val_loss: 6.2402\n", + "\n", + "Epoch 00028: val_loss did not improve from 4.62874\n", + "Epoch 29/300\n", + "\n", + "Epoch 00029: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 5.0811 - val_loss: 5.0592\n", + "\n", + "Epoch 00029: val_loss did not improve from 4.62874\n", + "Epoch 30/300\n", + "\n", + "Epoch 00030: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 228ms/step - loss: 4.9780 - val_loss: 4.0382\n", + "\n", + "Epoch 00030: val_loss improved from 4.62874 to 4.03825, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 31/300\n", + "\n", + "Epoch 00031: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 226ms/step - loss: 5.0581 - val_loss: 5.0438\n", + "\n", + "Epoch 00031: val_loss did not improve from 4.03825\n", + "Epoch 32/300\n", + "\n", + "Epoch 00032: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 227ms/step - loss: 5.5756 - val_loss: 4.3488\n", + "\n", + "Epoch 00032: val_loss did not improve from 4.03825\n", + "Epoch 33/300\n", + "\n", + "Epoch 00033: LearningRateScheduler setting learning rate to 0.001.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "100/100 [==============================] - 23s 229ms/step - loss: 5.1219 - val_loss: 4.1021\n", + "\n", + "Epoch 00033: val_loss did not improve from 4.03825\n", + "Epoch 34/300\n", + "\n", + "Epoch 00034: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 228ms/step - loss: 5.2336 - val_loss: 6.5314\n", + "\n", + "Epoch 00034: val_loss did not improve from 4.03825\n", + "Epoch 35/300\n", + "\n", + "Epoch 00035: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 233ms/step - loss: 5.0825 - val_loss: 5.4485\n", + "\n", + "Epoch 00035: val_loss did not improve from 4.03825\n", + "Epoch 36/300\n", + "\n", + "Epoch 00036: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 228ms/step - loss: 4.9638 - val_loss: 4.1527\n", + "\n", + "Epoch 00036: val_loss did not improve from 4.03825\n", + "Epoch 37/300\n", + "\n", + "Epoch 00037: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 5.0223 - val_loss: 4.2804\n", + "\n", + "Epoch 00037: val_loss did not improve from 4.03825\n", + "Epoch 38/300\n", + "\n", + "Epoch 00038: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 227ms/step - loss: 4.9524 - val_loss: 4.6403\n", + "\n", + "Epoch 00038: val_loss did not improve from 4.03825\n", + "Epoch 39/300\n", + "\n", + "Epoch 00039: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 4.8690 - val_loss: 5.0897\n", + "\n", + "Epoch 00039: val_loss did not improve from 4.03825\n", + "Epoch 40/300\n", + "\n", + "Epoch 00040: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 226ms/step - loss: 4.9967 - val_loss: 6.5734\n", + "\n", + "Epoch 00040: val_loss did not improve from 4.03825\n", + "Epoch 41/300\n", + "\n", + "Epoch 00041: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 4.6935 - val_loss: 4.8306\n", + "\n", + "Epoch 00041: val_loss did not improve from 4.03825\n", + "Epoch 42/300\n", + "\n", + "Epoch 00042: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 4.9150 - val_loss: 5.5249\n", + "\n", + "Epoch 00042: val_loss did not improve from 4.03825\n", + "Epoch 43/300\n", + "\n", + "Epoch 00043: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 4.9159 - val_loss: 4.5594\n", + "\n", + "Epoch 00043: val_loss did not improve from 4.03825\n", + "Epoch 44/300\n", + "\n", + "Epoch 00044: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 228ms/step - loss: 4.9590 - val_loss: 5.4138\n", + "\n", + "Epoch 00044: val_loss did not improve from 4.03825\n", + "Epoch 45/300\n", + "\n", + "Epoch 00045: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 4.9129 - val_loss: 5.2372\n", + "\n", + "Epoch 00045: val_loss did not improve from 4.03825\n", + "Epoch 46/300\n", + "\n", + "Epoch 00046: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 227ms/step - loss: 4.9230 - val_loss: 4.2868\n", + "\n", + "Epoch 00046: val_loss did not improve from 4.03825\n", + "Epoch 47/300\n", + "\n", + "Epoch 00047: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 228ms/step - loss: 4.7760 - val_loss: 3.9728\n", + "\n", + "Epoch 00047: val_loss improved from 4.03825 to 3.97277, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 48/300\n", + "\n", + "Epoch 00048: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 4.7739 - val_loss: 4.5149\n", + "\n", + "Epoch 00048: val_loss did not improve from 3.97277\n", + "Epoch 49/300\n", + "\n", + "Epoch 00049: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 4.8919 - val_loss: 4.3892\n", + "\n", + "Epoch 00049: val_loss did not improve from 3.97277\n", + "Epoch 50/300\n", + "\n", + "Epoch 00050: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 4.5565 - val_loss: 4.6482\n", + "\n", + "Epoch 00050: val_loss did not improve from 3.97277\n", + "Epoch 51/300\n", + "\n", + "Epoch 00051: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 4.4726 - val_loss: 4.4884\n", + "\n", + "Epoch 00051: val_loss did not improve from 3.97277\n", + "Epoch 52/300\n", + "\n", + "Epoch 00052: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 4.8452 - val_loss: 5.0178\n", + "\n", + "Epoch 00052: val_loss did not improve from 3.97277\n", + "Epoch 53/300\n", + "\n", + "Epoch 00053: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 233ms/step - loss: 4.6518 - val_loss: 4.1244\n", + "\n", + "Epoch 00053: val_loss did not improve from 3.97277\n", + "Epoch 54/300\n", + "\n", + "Epoch 00054: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 5.0259 - val_loss: 5.0076\n", + "\n", + "Epoch 00054: val_loss did not improve from 3.97277\n", + "Epoch 55/300\n", + "\n", + "Epoch 00055: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 226ms/step - loss: 4.8086 - val_loss: 4.0930\n", + "\n", + "Epoch 00055: val_loss did not improve from 3.97277\n", + "Epoch 56/300\n", + "\n", + "Epoch 00056: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 4.5952 - val_loss: 5.0663\n", + "\n", + "Epoch 00056: val_loss did not improve from 3.97277\n", + "Epoch 57/300\n", + "\n", + "Epoch 00057: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 227ms/step - loss: 4.7587 - val_loss: 4.4776\n", + "\n", + "Epoch 00057: val_loss did not improve from 3.97277\n", + "Epoch 58/300\n", + "\n", + "Epoch 00058: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 4.6116 - val_loss: 3.9222\n", + "\n", + "Epoch 00058: val_loss improved from 3.97277 to 3.92225, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 59/300\n", + "\n", + "Epoch 00059: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 233ms/step - loss: 4.6267 - val_loss: 4.5797\n", + "\n", + "Epoch 00059: val_loss did not improve from 3.92225\n", + "Epoch 60/300\n", + "\n", + "Epoch 00060: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 4.7478 - val_loss: 3.8782\n", + "\n", + "Epoch 00060: val_loss improved from 3.92225 to 3.87817, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 61/300\n", + "\n", + "Epoch 00061: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 4.5312 - val_loss: 4.4061\n", + "\n", + "Epoch 00061: val_loss did not improve from 3.87817\n", + "Epoch 62/300\n", + "\n", + "Epoch 00062: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 4.4557 - val_loss: 3.9146\n", + "\n", + "Epoch 00062: val_loss did not improve from 3.87817\n", + "Epoch 63/300\n", + "\n", + "Epoch 00063: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 4.6314 - val_loss: 4.5734\n", + "\n", + "Epoch 00063: val_loss did not improve from 3.87817\n", + "Epoch 64/300\n", + "\n", + "Epoch 00064: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 233ms/step - loss: 4.5498 - val_loss: 5.9857\n", + "\n", + "Epoch 00064: val_loss did not improve from 3.87817\n", + "Epoch 65/300\n", + "\n", + "Epoch 00065: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 4.5426 - val_loss: 6.1037\n", + "\n", + "Epoch 00065: val_loss did not improve from 3.87817\n", + "Epoch 66/300\n", + "\n", + "Epoch 00066: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 228ms/step - loss: 4.7414 - val_loss: 4.6923\n", + "\n", + "Epoch 00066: val_loss did not improve from 3.87817\n", + "Epoch 67/300\n", + "\n", + "Epoch 00067: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 226ms/step - loss: 4.5896 - val_loss: 3.7859\n", + "\n", + "Epoch 00067: val_loss improved from 3.87817 to 3.78585, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 68/300\n", + "\n", + "Epoch 00068: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 228ms/step - loss: 4.6831 - val_loss: 3.9670\n", + "\n", + "Epoch 00068: val_loss did not improve from 3.78585\n", + "Epoch 69/300\n", + "\n", + "Epoch 00069: LearningRateScheduler setting learning rate to 0.001.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "100/100 [==============================] - 23s 231ms/step - loss: 4.8604 - val_loss: 5.3849\n", + "\n", + "Epoch 00069: val_loss did not improve from 3.78585\n", + "Epoch 70/300\n", + "\n", + "Epoch 00070: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 4.5709 - val_loss: 4.4797\n", + "\n", + "Epoch 00070: val_loss did not improve from 3.78585\n", + "Epoch 71/300\n", + "\n", + "Epoch 00071: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 4.4958 - val_loss: 4.6507\n", + "\n", + "Epoch 00071: val_loss did not improve from 3.78585\n", + "Epoch 72/300\n", + "\n", + "Epoch 00072: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 227ms/step - loss: 4.4013 - val_loss: 4.1583\n", + "\n", + "Epoch 00072: val_loss did not improve from 3.78585\n", + "Epoch 73/300\n", + "\n", + "Epoch 00073: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 4.4782 - val_loss: 3.9448\n", + "\n", + "Epoch 00073: val_loss did not improve from 3.78585\n", + "Epoch 74/300\n", + "\n", + "Epoch 00074: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 22s 223ms/step - loss: 4.5436 - val_loss: 4.9104\n", + "\n", + "Epoch 00074: val_loss did not improve from 3.78585\n", + "Epoch 75/300\n", + "\n", + "Epoch 00075: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 4.4747 - val_loss: 5.1044\n", + "\n", + "Epoch 00075: val_loss did not improve from 3.78585\n", + "Epoch 76/300\n", + "\n", + "Epoch 00076: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 233ms/step - loss: 4.4400 - val_loss: 3.9510\n", + "\n", + "Epoch 00076: val_loss did not improve from 3.78585\n", + "Epoch 77/300\n", + "\n", + "Epoch 00077: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 4.8100 - val_loss: 3.7751\n", + "\n", + "Epoch 00077: val_loss improved from 3.78585 to 3.77514, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 78/300\n", + "\n", + "Epoch 00078: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 4.3564 - val_loss: 5.0605\n", + "\n", + "Epoch 00078: val_loss did not improve from 3.77514\n", + "Epoch 79/300\n", + "\n", + "Epoch 00079: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 228ms/step - loss: 4.4826 - val_loss: 4.5409\n", + "\n", + "Epoch 00079: val_loss did not improve from 3.77514\n", + "Epoch 80/300\n", + "\n", + "Epoch 00080: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 4.4823 - val_loss: 4.0823\n", + "\n", + "Epoch 00080: val_loss did not improve from 3.77514\n", + "Epoch 81/300\n", + "\n", + "Epoch 00081: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 4.3587 - val_loss: 4.4663\n", + "\n", + "Epoch 00081: val_loss did not improve from 3.77514\n", + "Epoch 82/300\n", + "\n", + "Epoch 00082: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 4.4684 - val_loss: 3.8928\n", + "\n", + "Epoch 00082: val_loss did not improve from 3.77514\n", + "Epoch 83/300\n", + "\n", + "Epoch 00083: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 4.5686 - val_loss: 3.7042\n", + "\n", + "Epoch 00083: val_loss improved from 3.77514 to 3.70417, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 84/300\n", + "\n", + "Epoch 00084: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 24s 238ms/step - loss: 4.5652 - val_loss: 6.2923\n", + "\n", + "Epoch 00084: val_loss did not improve from 3.70417\n", + "Epoch 85/300\n", + "\n", + "Epoch 00085: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 24s 239ms/step - loss: 4.4278 - val_loss: 4.2425\n", + "\n", + "Epoch 00085: val_loss did not improve from 3.70417\n", + "Epoch 86/300\n", + "\n", + "Epoch 00086: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 24s 236ms/step - loss: 4.5282 - val_loss: 4.0433\n", + "\n", + "Epoch 00086: val_loss did not improve from 3.70417\n", + "Epoch 87/300\n", + "\n", + "Epoch 00087: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 24s 236ms/step - loss: 4.3153 - val_loss: 4.0100\n", + "\n", + "Epoch 00087: val_loss did not improve from 3.70417\n", + "Epoch 88/300\n", + "\n", + "Epoch 00088: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 24s 239ms/step - loss: 4.3400 - val_loss: 3.8899\n", + "\n", + "Epoch 00088: val_loss did not improve from 3.70417\n", + "Epoch 89/300\n", + "\n", + "Epoch 00089: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 24s 237ms/step - loss: 4.2937 - val_loss: 4.2786\n", + "\n", + "Epoch 00089: val_loss did not improve from 3.70417\n", + "Epoch 90/300\n", + "\n", + "Epoch 00090: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 24s 238ms/step - loss: 4.4161 - val_loss: 3.8800\n", + "\n", + "Epoch 00090: val_loss did not improve from 3.70417\n", + "Epoch 91/300\n", + "\n", + "Epoch 00091: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 24s 240ms/step - loss: 4.3962 - val_loss: 3.9083\n", + "\n", + "Epoch 00091: val_loss did not improve from 3.70417\n", + "Epoch 92/300\n", + "\n", + "Epoch 00092: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 24s 238ms/step - loss: 4.1707 - val_loss: 3.7263\n", + "\n", + "Epoch 00092: val_loss did not improve from 3.70417\n", + "Epoch 93/300\n", + "\n", + "Epoch 00093: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 233ms/step - loss: 4.0858 - val_loss: 3.7792\n", + "\n", + "Epoch 00093: val_loss did not improve from 3.70417\n", + "Epoch 94/300\n", + "\n", + "Epoch 00094: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 24s 240ms/step - loss: 4.3901 - val_loss: 5.2404\n", + "\n", + "Epoch 00094: val_loss did not improve from 3.70417\n", + "Epoch 95/300\n", + "\n", + "Epoch 00095: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 24s 237ms/step - loss: 4.2682 - val_loss: 3.7054\n", + "\n", + "Epoch 00095: val_loss did not improve from 3.70417\n", + "Epoch 96/300\n", + "\n", + "Epoch 00096: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 24s 237ms/step - loss: 4.4278 - val_loss: 5.4239\n", + "\n", + "Epoch 00096: val_loss did not improve from 3.70417\n", + "Epoch 97/300\n", + "\n", + "Epoch 00097: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 24s 239ms/step - loss: 4.3972 - val_loss: 3.6471\n", + "\n", + "Epoch 00097: val_loss improved from 3.70417 to 3.64705, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 98/300\n", + "\n", + "Epoch 00098: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 23s 233ms/step - loss: 4.1795 - val_loss: 3.6605\n", + "\n", + "Epoch 00098: val_loss did not improve from 3.64705\n", + "Epoch 99/300\n", + "\n", + "Epoch 00099: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 24s 243ms/step - loss: 4.2485 - val_loss: 3.6667\n", + "\n", + "Epoch 00099: val_loss did not improve from 3.64705\n", + "Epoch 100/300\n", + "\n", + "Epoch 00100: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 24s 237ms/step - loss: 4.1815 - val_loss: 3.8390\n", + "\n", + "Epoch 00100: val_loss did not improve from 3.64705\n", + "Epoch 101/300\n", + "\n", + "Epoch 00101: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 24s 239ms/step - loss: 4.0925 - val_loss: 3.5656\n", + "\n", + "Epoch 00101: val_loss improved from 3.64705 to 3.56559, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 102/300\n", + "\n", + "Epoch 00102: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 24s 240ms/step - loss: 4.0260 - val_loss: 3.5725\n", + "\n", + "Epoch 00102: val_loss did not improve from 3.56559\n", + "Epoch 103/300\n", + "\n", + "Epoch 00103: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 24s 236ms/step - loss: 4.0330 - val_loss: 3.5701\n", + "\n", + "Epoch 00103: val_loss did not improve from 3.56559\n", + "Epoch 104/300\n", + "\n", + "Epoch 00104: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 24s 240ms/step - loss: 4.0368 - val_loss: 3.5767\n", + "\n", + "Epoch 00104: val_loss did not improve from 3.56559\n", + "Epoch 105/300\n", + "\n", + "Epoch 00105: LearningRateScheduler setting learning rate to 0.0001.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "100/100 [==============================] - 24s 239ms/step - loss: 4.0011 - val_loss: 3.5793\n", + "\n", + "Epoch 00105: val_loss did not improve from 3.56559\n", + "Epoch 106/300\n", + "\n", + "Epoch 00106: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 235ms/step - loss: 3.9883 - val_loss: 3.5621\n", + "\n", + "Epoch 00106: val_loss improved from 3.56559 to 3.56213, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 107/300\n", + "\n", + "Epoch 00107: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 24s 237ms/step - loss: 4.0447 - val_loss: 3.6073\n", + "\n", + "Epoch 00107: val_loss did not improve from 3.56213\n", + "Epoch 108/300\n", + "\n", + "Epoch 00108: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 24s 236ms/step - loss: 4.0080 - val_loss: 3.4952\n", + "\n", + "Epoch 00108: val_loss improved from 3.56213 to 3.49518, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 109/300\n", + "\n", + "Epoch 00109: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 24s 239ms/step - loss: 3.9847 - val_loss: 3.6319\n", + "\n", + "Epoch 00109: val_loss did not improve from 3.49518\n", + "Epoch 110/300\n", + "\n", + "Epoch 00110: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 24s 237ms/step - loss: 3.9790 - val_loss: 3.5412\n", + "\n", + "Epoch 00110: val_loss did not improve from 3.49518\n", + "Epoch 111/300\n", + "\n", + "Epoch 00111: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 24s 241ms/step - loss: 3.9774 - val_loss: 3.5972\n", + "\n", + "Epoch 00111: val_loss did not improve from 3.49518\n", + "Epoch 112/300\n", + "\n", + "Epoch 00112: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 24s 239ms/step - loss: 3.9309 - val_loss: 3.4730\n", + "\n", + "Epoch 00112: val_loss improved from 3.49518 to 3.47297, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 113/300\n", + "\n", + "Epoch 00113: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 24s 242ms/step - loss: 3.9758 - val_loss: 3.4766\n", + "\n", + "Epoch 00113: val_loss did not improve from 3.47297\n", + "Epoch 114/300\n", + "\n", + "Epoch 00114: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 24s 241ms/step - loss: 3.9842 - val_loss: 3.5218\n", + "\n", + "Epoch 00114: val_loss did not improve from 3.47297\n", + "Epoch 115/300\n", + "\n", + "Epoch 00115: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 24s 238ms/step - loss: 3.9845 - val_loss: 4.4679\n", + "\n", + "Epoch 00115: val_loss did not improve from 3.47297\n", + "Epoch 116/300\n", + "\n", + "Epoch 00116: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 24s 238ms/step - loss: 3.9049 - val_loss: 3.4957\n", + "\n", + "Epoch 00116: val_loss did not improve from 3.47297\n", + "Epoch 117/300\n", + "\n", + "Epoch 00117: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 234ms/step - loss: 3.8415 - val_loss: 3.6990\n", + "\n", + "Epoch 00117: val_loss did not improve from 3.47297\n", + "Epoch 118/300\n", + "\n", + "Epoch 00118: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.9522 - val_loss: 3.5176\n", + "\n", + "Epoch 00118: val_loss did not improve from 3.47297\n", + "Epoch 119/300\n", + "\n", + "Epoch 00119: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 233ms/step - loss: 3.9326 - val_loss: 3.5038\n", + "\n", + "Epoch 00119: val_loss did not improve from 3.47297\n", + "Epoch 120/300\n", + "\n", + "Epoch 00120: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 3.8707 - val_loss: 3.5074\n", + "\n", + "Epoch 00120: val_loss did not improve from 3.47297\n", + "Epoch 121/300\n", + "\n", + "Epoch 00121: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.8843 - val_loss: 3.5110\n", + "\n", + "Epoch 00121: val_loss did not improve from 3.47297\n", + "Epoch 122/300\n", + "\n", + "Epoch 00122: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 235ms/step - loss: 4.0774 - val_loss: 3.5721\n", + "\n", + "Epoch 00122: val_loss did not improve from 3.47297\n", + "Epoch 123/300\n", + "\n", + "Epoch 00123: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 228ms/step - loss: 3.8747 - val_loss: 3.5390\n", + "\n", + "Epoch 00123: val_loss did not improve from 3.47297\n", + "Epoch 124/300\n", + "\n", + "Epoch 00124: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 235ms/step - loss: 3.9969 - val_loss: 3.5060\n", + "\n", + "Epoch 00124: val_loss did not improve from 3.47297\n", + "Epoch 125/300\n", + "\n", + "Epoch 00125: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.9292 - val_loss: 3.4959\n", + "\n", + "Epoch 00125: val_loss did not improve from 3.47297\n", + "Epoch 126/300\n", + "\n", + "Epoch 00126: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 3.8092 - val_loss: 3.4940\n", + "\n", + "Epoch 00126: val_loss did not improve from 3.47297\n", + "Epoch 127/300\n", + "\n", + "Epoch 00127: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.9727 - val_loss: 3.6423\n", + "\n", + "Epoch 00127: val_loss did not improve from 3.47297\n", + "Epoch 128/300\n", + "\n", + "Epoch 00128: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 3.9268 - val_loss: 3.5594\n", + "\n", + "Epoch 00128: val_loss did not improve from 3.47297\n", + "Epoch 129/300\n", + "\n", + "Epoch 00129: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 3.9828 - val_loss: 3.5784\n", + "\n", + "Epoch 00129: val_loss did not improve from 3.47297\n", + "Epoch 130/300\n", + "\n", + "Epoch 00130: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 228ms/step - loss: 3.9173 - val_loss: 3.4558\n", + "\n", + "Epoch 00130: val_loss improved from 3.47297 to 3.45582, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 131/300\n", + "\n", + "Epoch 00131: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.8509 - val_loss: 3.4421\n", + "\n", + "Epoch 00131: val_loss improved from 3.45582 to 3.44215, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 132/300\n", + "\n", + "Epoch 00132: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 228ms/step - loss: 3.9329 - val_loss: 3.4858\n", + "\n", + "Epoch 00132: val_loss did not improve from 3.44215\n", + "Epoch 133/300\n", + "\n", + "Epoch 00133: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 3.8279 - val_loss: 3.4839\n", + "\n", + "Epoch 00133: val_loss did not improve from 3.44215\n", + "Epoch 134/300\n", + "\n", + "Epoch 00134: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 3.8744 - val_loss: 3.4289\n", + "\n", + "Epoch 00134: val_loss improved from 3.44215 to 3.42894, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 135/300\n", + "\n", + "Epoch 00135: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.9094 - val_loss: 3.5704\n", + "\n", + "Epoch 00135: val_loss did not improve from 3.42894\n", + "Epoch 136/300\n", + "\n", + "Epoch 00136: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 235ms/step - loss: 3.9023 - val_loss: 3.6963\n", + "\n", + "Epoch 00136: val_loss did not improve from 3.42894\n", + "Epoch 137/300\n", + "\n", + "Epoch 00137: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 228ms/step - loss: 3.9174 - val_loss: 3.4380\n", + "\n", + "Epoch 00137: val_loss did not improve from 3.42894\n", + "Epoch 138/300\n", + "\n", + "Epoch 00138: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 3.9170 - val_loss: 3.4615\n", + "\n", + "Epoch 00138: val_loss did not improve from 3.42894\n", + "Epoch 139/300\n", + "\n", + "Epoch 00139: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 3.8904 - val_loss: 3.4831\n", + "\n", + "Epoch 00139: val_loss did not improve from 3.42894\n", + "Epoch 140/300\n", + "\n", + "Epoch 00140: LearningRateScheduler setting learning rate to 0.0001.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "100/100 [==============================] - 23s 231ms/step - loss: 3.9020 - val_loss: 3.4521\n", + "\n", + "Epoch 00140: val_loss did not improve from 3.42894\n", + "Epoch 141/300\n", + "\n", + "Epoch 00141: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 233ms/step - loss: 3.9449 - val_loss: 3.4298\n", + "\n", + "Epoch 00141: val_loss did not improve from 3.42894\n", + "Epoch 142/300\n", + "\n", + "Epoch 00142: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 234ms/step - loss: 3.9587 - val_loss: 3.5832\n", + "\n", + "Epoch 00142: val_loss did not improve from 3.42894\n", + "Epoch 143/300\n", + "\n", + "Epoch 00143: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 3.8442 - val_loss: 3.4396\n", + "\n", + "Epoch 00143: val_loss did not improve from 3.42894\n", + "Epoch 144/300\n", + "\n", + "Epoch 00144: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 3.7764 - val_loss: 3.5899\n", + "\n", + "Epoch 00144: val_loss did not improve from 3.42894\n", + "Epoch 145/300\n", + "\n", + "Epoch 00145: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 3.8358 - val_loss: 3.6356\n", + "\n", + "Epoch 00145: val_loss did not improve from 3.42894\n", + "Epoch 146/300\n", + "\n", + "Epoch 00146: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.8529 - val_loss: 3.5144\n", + "\n", + "Epoch 00146: val_loss did not improve from 3.42894\n", + "Epoch 147/300\n", + "\n", + "Epoch 00147: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 3.8630 - val_loss: 3.5363\n", + "\n", + "Epoch 00147: val_loss did not improve from 3.42894\n", + "Epoch 148/300\n", + "\n", + "Epoch 00148: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.8609 - val_loss: 3.5237\n", + "\n", + "Epoch 00148: val_loss did not improve from 3.42894\n", + "Epoch 149/300\n", + "\n", + "Epoch 00149: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 227ms/step - loss: 3.8884 - val_loss: 3.3907\n", + "\n", + "Epoch 00149: val_loss improved from 3.42894 to 3.39067, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 150/300\n", + "\n", + "Epoch 00150: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 3.8277 - val_loss: 3.5137\n", + "\n", + "Epoch 00150: val_loss did not improve from 3.39067\n", + "Epoch 151/300\n", + "\n", + "Epoch 00151: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.7854 - val_loss: 3.4483\n", + "\n", + "Epoch 00151: val_loss did not improve from 3.39067\n", + "Epoch 152/300\n", + "\n", + "Epoch 00152: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 228ms/step - loss: 3.7544 - val_loss: 3.4099\n", + "\n", + "Epoch 00152: val_loss did not improve from 3.39067\n", + "Epoch 153/300\n", + "\n", + "Epoch 00153: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.8393 - val_loss: 3.3969\n", + "\n", + "Epoch 00153: val_loss did not improve from 3.39067\n", + "Epoch 154/300\n", + "\n", + "Epoch 00154: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 227ms/step - loss: 3.8679 - val_loss: 3.4144\n", + "\n", + "Epoch 00154: val_loss did not improve from 3.39067\n", + "Epoch 155/300\n", + "\n", + "Epoch 00155: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.8372 - val_loss: 3.4349\n", + "\n", + "Epoch 00155: val_loss did not improve from 3.39067\n", + "Epoch 156/300\n", + "\n", + "Epoch 00156: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.8084 - val_loss: 3.4527\n", + "\n", + "Epoch 00156: val_loss did not improve from 3.39067\n", + "Epoch 157/300\n", + "\n", + "Epoch 00157: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 233ms/step - loss: 3.9643 - val_loss: 3.4152\n", + "\n", + "Epoch 00157: val_loss did not improve from 3.39067\n", + "Epoch 158/300\n", + "\n", + "Epoch 00158: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.8958 - val_loss: 3.4274\n", + "\n", + "Epoch 00158: val_loss did not improve from 3.39067\n", + "Epoch 159/300\n", + "\n", + "Epoch 00159: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.7517 - val_loss: 3.4122\n", + "\n", + "Epoch 00159: val_loss did not improve from 3.39067\n", + "Epoch 160/300\n", + "\n", + "Epoch 00160: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 3.8706 - val_loss: 3.4046\n", + "\n", + "Epoch 00160: val_loss did not improve from 3.39067\n", + "Epoch 161/300\n", + "\n", + "Epoch 00161: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.8000 - val_loss: 3.4109\n", + "\n", + "Epoch 00161: val_loss did not improve from 3.39067\n", + "Epoch 162/300\n", + "\n", + "Epoch 00162: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.7853 - val_loss: 3.4250\n", + "\n", + "Epoch 00162: val_loss did not improve from 3.39067\n", + "Epoch 163/300\n", + "\n", + "Epoch 00163: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 3.8154 - val_loss: 3.4399\n", + "\n", + "Epoch 00163: val_loss did not improve from 3.39067\n", + "Epoch 164/300\n", + "\n", + "Epoch 00164: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.8560 - val_loss: 3.4332\n", + "\n", + "Epoch 00164: val_loss did not improve from 3.39067\n", + "Epoch 165/300\n", + "\n", + "Epoch 00165: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 3.7278 - val_loss: 3.4132\n", + "\n", + "Epoch 00165: val_loss did not improve from 3.39067\n", + "Epoch 166/300\n", + "\n", + "Epoch 00166: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.7803 - val_loss: 3.3863\n", + "\n", + "Epoch 00166: val_loss improved from 3.39067 to 3.38626, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 167/300\n", + "\n", + "Epoch 00167: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 227ms/step - loss: 3.8497 - val_loss: 3.4022\n", + "\n", + "Epoch 00167: val_loss did not improve from 3.38626\n", + "Epoch 168/300\n", + "\n", + "Epoch 00168: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 233ms/step - loss: 3.8377 - val_loss: 3.4278\n", + "\n", + "Epoch 00168: val_loss did not improve from 3.38626\n", + "Epoch 169/300\n", + "\n", + "Epoch 00169: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.8392 - val_loss: 3.4152\n", + "\n", + "Epoch 00169: val_loss did not improve from 3.38626\n", + "Epoch 170/300\n", + "\n", + "Epoch 00170: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.7774 - val_loss: 3.3980\n", + "\n", + "Epoch 00170: val_loss did not improve from 3.38626\n", + "Epoch 171/300\n", + "\n", + "Epoch 00171: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.8115 - val_loss: 3.4006\n", + "\n", + "Epoch 00171: val_loss did not improve from 3.38626\n", + "Epoch 172/300\n", + "\n", + "Epoch 00172: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 227ms/step - loss: 3.9257 - val_loss: 3.4249\n", + "\n", + "Epoch 00172: val_loss did not improve from 3.38626\n", + "Epoch 173/300\n", + "\n", + "Epoch 00173: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.7990 - val_loss: 3.4052\n", + "\n", + "Epoch 00173: val_loss did not improve from 3.38626\n", + "Epoch 174/300\n", + "\n", + "Epoch 00174: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 3.8021 - val_loss: 3.4211\n", + "\n", + "Epoch 00174: val_loss did not improve from 3.38626\n", + "Epoch 175/300\n", + "\n", + "Epoch 00175: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 3.8549 - val_loss: 3.4450\n", + "\n", + "Epoch 00175: val_loss did not improve from 3.38626\n", + "Epoch 176/300\n", + "\n", + "Epoch 00176: LearningRateScheduler setting learning rate to 1e-05.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "100/100 [==============================] - 23s 234ms/step - loss: 3.8271 - val_loss: 3.3880\n", + "\n", + "Epoch 00176: val_loss did not improve from 3.38626\n", + "Epoch 177/300\n", + "\n", + "Epoch 00177: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 3.7684 - val_loss: 3.3919\n", + "\n", + "Epoch 00177: val_loss did not improve from 3.38626\n", + "Epoch 178/300\n", + "\n", + "Epoch 00178: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 3.7729 - val_loss: 3.4049\n", + "\n", + "Epoch 00178: val_loss did not improve from 3.38626\n", + "Epoch 179/300\n", + "\n", + "Epoch 00179: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.8642 - val_loss: 3.4236\n", + "\n", + "Epoch 00179: val_loss did not improve from 3.38626\n", + "Epoch 180/300\n", + "\n", + "Epoch 00180: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.8538 - val_loss: 3.4058\n", + "\n", + "Epoch 00180: val_loss did not improve from 3.38626\n", + "Epoch 181/300\n", + "\n", + "Epoch 00181: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 3.7679 - val_loss: 3.4263\n", + "\n", + "Epoch 00181: val_loss did not improve from 3.38626\n", + "Epoch 182/300\n", + "\n", + "Epoch 00182: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.7583 - val_loss: 3.4005\n", + "\n", + "Epoch 00182: val_loss did not improve from 3.38626\n", + "Epoch 183/300\n", + "\n", + "Epoch 00183: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 234ms/step - loss: 3.8434 - val_loss: 3.3924\n", + "\n", + "Epoch 00183: val_loss did not improve from 3.38626\n", + "Epoch 184/300\n", + "\n", + "Epoch 00184: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 3.7685 - val_loss: 3.4177\n", + "\n", + "Epoch 00184: val_loss did not improve from 3.38626\n", + "Epoch 185/300\n", + "\n", + "Epoch 00185: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 3.8087 - val_loss: 3.3950\n", + "\n", + "Epoch 00185: val_loss did not improve from 3.38626\n", + "Epoch 186/300\n", + "\n", + "Epoch 00186: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 228ms/step - loss: 3.7872 - val_loss: 3.4330\n", + "\n", + "Epoch 00186: val_loss did not improve from 3.38626\n", + "Epoch 187/300\n", + "\n", + "Epoch 00187: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 3.7791 - val_loss: 3.4135\n", + "\n", + "Epoch 00187: val_loss did not improve from 3.38626\n", + "Epoch 188/300\n", + "\n", + "Epoch 00188: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 3.8809 - val_loss: 3.4036\n", + "\n", + "Epoch 00188: val_loss did not improve from 3.38626\n", + "Epoch 189/300\n", + "\n", + "Epoch 00189: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 228ms/step - loss: 3.8102 - val_loss: 3.3998\n", + "\n", + "Epoch 00189: val_loss did not improve from 3.38626\n", + "Epoch 190/300\n", + "\n", + "Epoch 00190: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.7757 - val_loss: 3.3909\n", + "\n", + "Epoch 00190: val_loss did not improve from 3.38626\n", + "Epoch 191/300\n", + "\n", + "Epoch 00191: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 227ms/step - loss: 3.7651 - val_loss: 3.4117\n", + "\n", + "Epoch 00191: val_loss did not improve from 3.38626\n", + "Epoch 192/300\n", + "\n", + "Epoch 00192: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.8146 - val_loss: 3.4259\n", + "\n", + "Epoch 00192: val_loss did not improve from 3.38626\n", + "Epoch 193/300\n", + "\n", + "Epoch 00193: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.8433 - val_loss: 3.4080\n", + "\n", + "Epoch 00193: val_loss did not improve from 3.38626\n", + "Epoch 194/300\n", + "\n", + "Epoch 00194: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 3.8861 - val_loss: 3.4040\n", + "\n", + "Epoch 00194: val_loss did not improve from 3.38626\n", + "Epoch 195/300\n", + "\n", + "Epoch 00195: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.7946 - val_loss: 3.4245\n", + "\n", + "Epoch 00195: val_loss did not improve from 3.38626\n", + "Epoch 196/300\n", + "\n", + "Epoch 00196: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 233ms/step - loss: 3.8797 - val_loss: 3.4409\n", + "\n", + "Epoch 00196: val_loss did not improve from 3.38626\n", + "Epoch 197/300\n", + "\n", + "Epoch 00197: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.7990 - val_loss: 3.3932\n", + "\n", + "Epoch 00197: val_loss did not improve from 3.38626\n", + "Epoch 198/300\n", + "\n", + "Epoch 00198: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.8696 - val_loss: 3.3906\n", + "\n", + "Epoch 00198: val_loss did not improve from 3.38626\n", + "Epoch 199/300\n", + "\n", + "Epoch 00199: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 234ms/step - loss: 3.7860 - val_loss: 3.4140\n", + "\n", + "Epoch 00199: val_loss did not improve from 3.38626\n", + "Epoch 200/300\n", + "\n", + "Epoch 00200: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 233ms/step - loss: 3.7718 - val_loss: 3.4198\n", + "\n", + "Epoch 00200: val_loss did not improve from 3.38626\n", + "Epoch 201/300\n", + "\n", + "Epoch 00201: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.7662 - val_loss: 3.4044\n", + "\n", + "Epoch 00201: val_loss did not improve from 3.38626\n", + "Epoch 202/300\n", + "\n", + "Epoch 00202: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.7226 - val_loss: 3.3981\n", + "\n", + "Epoch 00202: val_loss did not improve from 3.38626\n", + "Epoch 203/300\n", + "\n", + "Epoch 00203: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.8132 - val_loss: 3.4034\n", + "\n", + "Epoch 00203: val_loss did not improve from 3.38626\n", + "Epoch 204/300\n", + "\n", + "Epoch 00204: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 3.8098 - val_loss: 3.4484\n", + "\n", + "Epoch 00204: val_loss did not improve from 3.38626\n", + "Epoch 205/300\n", + "\n", + "Epoch 00205: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.8758 - val_loss: 3.4032\n", + "\n", + "Epoch 00205: val_loss did not improve from 3.38626\n", + "Epoch 206/300\n", + "\n", + "Epoch 00206: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 3.8907 - val_loss: 3.4192\n", + "\n", + "Epoch 00206: val_loss did not improve from 3.38626\n", + "Epoch 207/300\n", + "\n", + "Epoch 00207: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.9101 - val_loss: 3.3906\n", + "\n", + "Epoch 00207: val_loss did not improve from 3.38626\n", + "Epoch 208/300\n", + "\n", + "Epoch 00208: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 3.8165 - val_loss: 3.4298\n", + "\n", + "Epoch 00208: val_loss did not improve from 3.38626\n", + "Epoch 209/300\n", + "\n", + "Epoch 00209: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 226ms/step - loss: 3.8523 - val_loss: 3.3792\n", + "\n", + "Epoch 00209: val_loss improved from 3.38626 to 3.37916, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 210/300\n", + "\n", + "Epoch 00210: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 228ms/step - loss: 3.8496 - val_loss: 3.3872\n", + "\n", + "Epoch 00210: val_loss did not improve from 3.37916\n", + "Epoch 211/300\n", + "\n", + "Epoch 00211: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.7777 - val_loss: 3.3874\n", + "\n", + "Epoch 00211: val_loss did not improve from 3.37916\n", + "Epoch 212/300\n", + "\n", + "Epoch 00212: LearningRateScheduler setting learning rate to 1e-05.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "100/100 [==============================] - 23s 234ms/step - loss: 3.7629 - val_loss: 3.3957\n", + "\n", + "Epoch 00212: val_loss did not improve from 3.37916\n", + "Epoch 213/300\n", + "\n", + "Epoch 00213: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.8097 - val_loss: 3.3919\n", + "\n", + "Epoch 00213: val_loss did not improve from 3.37916\n", + "Epoch 214/300\n", + "\n", + "Epoch 00214: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 3.8068 - val_loss: 3.4060\n", + "\n", + "Epoch 00214: val_loss did not improve from 3.37916\n", + "Epoch 215/300\n", + "\n", + "Epoch 00215: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.7517 - val_loss: 3.4154\n", + "\n", + "Epoch 00215: val_loss did not improve from 3.37916\n", + "Epoch 216/300\n", + "\n", + "Epoch 00216: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 226ms/step - loss: 3.7739 - val_loss: 3.4419\n", + "\n", + "Epoch 00216: val_loss did not improve from 3.37916\n", + "Epoch 217/300\n", + "\n", + "Epoch 00217: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 3.8603 - val_loss: 3.4097\n", + "\n", + "Epoch 00217: val_loss did not improve from 3.37916\n", + "Epoch 218/300\n", + "\n", + "Epoch 00218: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 3.8465 - val_loss: 3.4165\n", + "\n", + "Epoch 00218: val_loss did not improve from 3.37916\n", + "Epoch 219/300\n", + "\n", + "Epoch 00219: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 3.8130 - val_loss: 3.4307\n", + "\n", + "Epoch 00219: val_loss did not improve from 3.37916\n", + "Epoch 220/300\n", + "\n", + "Epoch 00220: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.8353 - val_loss: 3.4286\n", + "\n", + "Epoch 00220: val_loss did not improve from 3.37916\n", + "Epoch 221/300\n", + "\n", + "Epoch 00221: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 227ms/step - loss: 3.7896 - val_loss: 3.3915\n", + "\n", + "Epoch 00221: val_loss did not improve from 3.37916\n", + "Epoch 222/300\n", + "\n", + "Epoch 00222: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 233ms/step - loss: 3.7998 - val_loss: 3.3835\n", + "\n", + "Epoch 00222: val_loss did not improve from 3.37916\n", + "Epoch 223/300\n", + "\n", + "Epoch 00223: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.8375 - val_loss: 3.3960\n", + "\n", + "Epoch 00223: val_loss did not improve from 3.37916\n", + "Epoch 224/300\n", + "\n", + "Epoch 00224: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.7733 - val_loss: 3.4285\n", + "\n", + "Epoch 00224: val_loss did not improve from 3.37916\n", + "Epoch 225/300\n", + "\n", + "Epoch 00225: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 228ms/step - loss: 3.8395 - val_loss: 3.3897\n", + "\n", + "Epoch 00225: val_loss did not improve from 3.37916\n", + "Epoch 226/300\n", + "\n", + "Epoch 00226: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 226ms/step - loss: 3.7564 - val_loss: 3.3955\n", + "\n", + "Epoch 00226: val_loss did not improve from 3.37916\n", + "Epoch 227/300\n", + "\n", + "Epoch 00227: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.7657 - val_loss: 3.4127\n", + "\n", + "Epoch 00227: val_loss did not improve from 3.37916\n", + "Epoch 228/300\n", + "\n", + "Epoch 00228: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 228ms/step - loss: 3.8107 - val_loss: 3.4006\n", + "\n", + "Epoch 00228: val_loss did not improve from 3.37916\n", + "Epoch 229/300\n", + "\n", + "Epoch 00229: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 3.8435 - val_loss: 3.3819\n", + "\n", + "Epoch 00229: val_loss did not improve from 3.37916\n", + "Epoch 230/300\n", + "\n", + "Epoch 00230: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 227ms/step - loss: 3.7888 - val_loss: 3.3923\n", + "\n", + "Epoch 00230: val_loss did not improve from 3.37916\n", + "Epoch 231/300\n", + "\n", + "Epoch 00231: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 233ms/step - loss: 3.7604 - val_loss: 3.3797\n", + "\n", + "Epoch 00231: val_loss did not improve from 3.37916\n", + "Epoch 232/300\n", + "\n", + "Epoch 00232: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.7711 - val_loss: 3.4117\n", + "\n", + "Epoch 00232: val_loss did not improve from 3.37916\n", + "Epoch 233/300\n", + "\n", + "Epoch 00233: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.9035 - val_loss: 3.3950\n", + "\n", + "Epoch 00233: val_loss did not improve from 3.37916\n", + "Epoch 234/300\n", + "\n", + "Epoch 00234: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 3.7080 - val_loss: 3.3883\n", + "\n", + "Epoch 00234: val_loss did not improve from 3.37916\n", + "Epoch 235/300\n", + "\n", + "Epoch 00235: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 233ms/step - loss: 3.7244 - val_loss: 3.3910\n", + "\n", + "Epoch 00235: val_loss did not improve from 3.37916\n", + "Epoch 236/300\n", + "\n", + "Epoch 00236: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 3.7635 - val_loss: 3.3841\n", + "\n", + "Epoch 00236: val_loss did not improve from 3.37916\n", + "Epoch 237/300\n", + "\n", + "Epoch 00237: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.8134 - val_loss: 3.3761\n", + "\n", + "Epoch 00237: val_loss improved from 3.37916 to 3.37612, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 238/300\n", + "\n", + "Epoch 00238: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.8370 - val_loss: 3.3885\n", + "\n", + "Epoch 00238: val_loss did not improve from 3.37612\n", + "Epoch 239/300\n", + "\n", + "Epoch 00239: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 226ms/step - loss: 3.8334 - val_loss: 3.4031\n", + "\n", + "Epoch 00239: val_loss did not improve from 3.37612\n", + "Epoch 240/300\n", + "\n", + "Epoch 00240: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.7503 - val_loss: 3.3764\n", + "\n", + "Epoch 00240: val_loss did not improve from 3.37612\n", + "Epoch 241/300\n", + "\n", + "Epoch 00241: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 227ms/step - loss: 3.8170 - val_loss: 3.4142\n", + "\n", + "Epoch 00241: val_loss did not improve from 3.37612\n", + "Epoch 242/300\n", + "\n", + "Epoch 00242: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 234ms/step - loss: 3.8331 - val_loss: 3.3930\n", + "\n", + "Epoch 00242: val_loss did not improve from 3.37612\n", + "Epoch 243/300\n", + "\n", + "Epoch 00243: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.7810 - val_loss: 3.3990\n", + "\n", + "Epoch 00243: val_loss did not improve from 3.37612\n", + "Epoch 244/300\n", + "\n", + "Epoch 00244: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 3.7762 - val_loss: 3.3967\n", + "\n", + "Epoch 00244: val_loss did not improve from 3.37612\n", + "Epoch 245/300\n", + "\n", + "Epoch 00245: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 233ms/step - loss: 3.7134 - val_loss: 3.3882\n", + "\n", + "Epoch 00245: val_loss did not improve from 3.37612\n", + "Epoch 246/300\n", + "\n", + "Epoch 00246: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.7542 - val_loss: 3.3917\n", + "\n", + "Epoch 00246: val_loss did not improve from 3.37612\n", + "Epoch 247/300\n", + "\n", + "Epoch 00247: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.8824 - val_loss: 3.3967\n", + "\n", + "Epoch 00247: val_loss did not improve from 3.37612\n", + "Epoch 248/300\n", + "\n", + "Epoch 00248: LearningRateScheduler setting learning rate to 1e-05.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "100/100 [==============================] - 23s 228ms/step - loss: 3.8210 - val_loss: 3.3955\n", + "\n", + "Epoch 00248: val_loss did not improve from 3.37612\n", + "Epoch 249/300\n", + "\n", + "Epoch 00249: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 3.7440 - val_loss: 3.3825\n", + "\n", + "Epoch 00249: val_loss did not improve from 3.37612\n", + "Epoch 250/300\n", + "\n", + "Epoch 00250: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.7831 - val_loss: 3.3681\n", + "\n", + "Epoch 00250: val_loss improved from 3.37612 to 3.36807, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 251/300\n", + "\n", + "Epoch 00251: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.7880 - val_loss: 3.4132\n", + "\n", + "Epoch 00251: val_loss did not improve from 3.36807\n", + "Epoch 252/300\n", + "\n", + "Epoch 00252: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.8631 - val_loss: 3.4097\n", + "\n", + "Epoch 00252: val_loss did not improve from 3.36807\n", + "Epoch 253/300\n", + "\n", + "Epoch 00253: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.8706 - val_loss: 3.4364\n", + "\n", + "Epoch 00253: val_loss did not improve from 3.36807\n", + "Epoch 254/300\n", + "\n", + "Epoch 00254: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 3.7304 - val_loss: 3.3731\n", + "\n", + "Epoch 00254: val_loss did not improve from 3.36807\n", + "Epoch 255/300\n", + "\n", + "Epoch 00255: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 3.7871 - val_loss: 3.3941\n", + "\n", + "Epoch 00255: val_loss did not improve from 3.36807\n", + "Epoch 256/300\n", + "\n", + "Epoch 00256: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.7739 - val_loss: 3.3988\n", + "\n", + "Epoch 00256: val_loss did not improve from 3.36807\n", + "Epoch 257/300\n", + "\n", + "Epoch 00257: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.7946 - val_loss: 3.3718\n", + "\n", + "Epoch 00257: val_loss did not improve from 3.36807\n", + "Epoch 258/300\n", + "\n", + "Epoch 00258: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.7762 - val_loss: 3.3714\n", + "\n", + "Epoch 00258: val_loss did not improve from 3.36807\n", + "Epoch 259/300\n", + "\n", + "Epoch 00259: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.8246 - val_loss: 3.4150\n", + "\n", + "Epoch 00259: val_loss did not improve from 3.36807\n", + "Epoch 260/300\n", + "\n", + "Epoch 00260: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.8236 - val_loss: 3.3795\n", + "\n", + "Epoch 00260: val_loss did not improve from 3.36807\n", + "Epoch 261/300\n", + "\n", + "Epoch 00261: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 226ms/step - loss: 3.7450 - val_loss: 3.3996\n", + "\n", + "Epoch 00261: val_loss did not improve from 3.36807\n", + "Epoch 262/300\n", + "\n", + "Epoch 00262: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.8179 - val_loss: 3.4078\n", + "\n", + "Epoch 00262: val_loss did not improve from 3.36807\n", + "Epoch 263/300\n", + "\n", + "Epoch 00263: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 228ms/step - loss: 3.7841 - val_loss: 3.3827\n", + "\n", + "Epoch 00263: val_loss did not improve from 3.36807\n", + "Epoch 264/300\n", + "\n", + "Epoch 00264: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 3.7825 - val_loss: 3.3754\n", + "\n", + "Epoch 00264: val_loss did not improve from 3.36807\n", + "Epoch 265/300\n", + "\n", + "Epoch 00265: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.8466 - val_loss: 3.3887\n", + "\n", + "Epoch 00265: val_loss did not improve from 3.36807\n", + "Epoch 266/300\n", + "\n", + "Epoch 00266: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 3.7828 - val_loss: 3.3693\n", + "\n", + "Epoch 00266: val_loss did not improve from 3.36807\n", + "Epoch 267/300\n", + "\n", + "Epoch 00267: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 228ms/step - loss: 3.8028 - val_loss: 3.3789\n", + "\n", + "Epoch 00267: val_loss did not improve from 3.36807\n", + "Epoch 268/300\n", + "\n", + "Epoch 00268: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 228ms/step - loss: 3.8489 - val_loss: 3.3661\n", + "\n", + "Epoch 00268: val_loss improved from 3.36807 to 3.36611, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 269/300\n", + "\n", + "Epoch 00269: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 227ms/step - loss: 3.7001 - val_loss: 3.3673\n", + "\n", + "Epoch 00269: val_loss did not improve from 3.36611\n", + "Epoch 270/300\n", + "\n", + "Epoch 00270: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 233ms/step - loss: 3.8006 - val_loss: 3.3849\n", + "\n", + "Epoch 00270: val_loss did not improve from 3.36611\n", + "Epoch 271/300\n", + "\n", + "Epoch 00271: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 234ms/step - loss: 3.7827 - val_loss: 3.3762\n", + "\n", + "Epoch 00271: val_loss did not improve from 3.36611\n", + "Epoch 272/300\n", + "\n", + "Epoch 00272: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.7435 - val_loss: 3.4069\n", + "\n", + "Epoch 00272: val_loss did not improve from 3.36611\n", + "Epoch 273/300\n", + "\n", + "Epoch 00273: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 3.6934 - val_loss: 3.3726\n", + "\n", + "Epoch 00273: val_loss did not improve from 3.36611\n", + "Epoch 274/300\n", + "\n", + "Epoch 00274: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.8024 - val_loss: 3.3854\n", + "\n", + "Epoch 00274: val_loss did not improve from 3.36611\n", + "Epoch 275/300\n", + "\n", + "Epoch 00275: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 228ms/step - loss: 3.7820 - val_loss: 3.3936\n", + "\n", + "Epoch 00275: val_loss did not improve from 3.36611\n", + "Epoch 276/300\n", + "\n", + "Epoch 00276: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.8680 - val_loss: 3.4046\n", + "\n", + "Epoch 00276: val_loss did not improve from 3.36611\n", + "Epoch 277/300\n", + "\n", + "Epoch 00277: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.7550 - val_loss: 3.3647\n", + "\n", + "Epoch 00277: val_loss improved from 3.36611 to 3.36472, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 278/300\n", + "\n", + "Epoch 00278: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 227ms/step - loss: 3.8021 - val_loss: 3.3870\n", + "\n", + "Epoch 00278: val_loss did not improve from 3.36472\n", + "Epoch 279/300\n", + "\n", + "Epoch 00279: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 228ms/step - loss: 3.8422 - val_loss: 3.3635\n", + "\n", + "Epoch 00279: val_loss improved from 3.36472 to 3.36354, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 280/300\n", + "\n", + "Epoch 00280: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.7865 - val_loss: 3.3686\n", + "\n", + "Epoch 00280: val_loss did not improve from 3.36354\n", + "Epoch 281/300\n", + "\n", + "Epoch 00281: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.7568 - val_loss: 3.3644\n", + "\n", + "Epoch 00281: val_loss did not improve from 3.36354\n", + "Epoch 282/300\n", + "\n", + "Epoch 00282: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.7820 - val_loss: 3.3626\n", + "\n", + "Epoch 00282: val_loss improved from 3.36354 to 3.36264, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 283/300\n", + "\n", + "Epoch 00283: LearningRateScheduler setting learning rate to 1e-05.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "100/100 [==============================] - 23s 228ms/step - loss: 3.6678 - val_loss: 3.3912\n", + "\n", + "Epoch 00283: val_loss did not improve from 3.36264\n", + "Epoch 284/300\n", + "\n", + "Epoch 00284: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 3.7719 - val_loss: 3.3567\n", + "\n", + "Epoch 00284: val_loss improved from 3.36264 to 3.35674, saving model to experimento_ssd7_fault_1.h5\n", + "Epoch 285/300\n", + "\n", + "Epoch 00285: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 228ms/step - loss: 3.7915 - val_loss: 3.3633\n", + "\n", + "Epoch 00285: val_loss did not improve from 3.35674\n", + "Epoch 286/300\n", + "\n", + "Epoch 00286: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.8256 - val_loss: 3.3701\n", + "\n", + "Epoch 00286: val_loss did not improve from 3.35674\n", + "Epoch 287/300\n", + "\n", + "Epoch 00287: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.7223 - val_loss: 3.3879\n", + "\n", + "Epoch 00287: val_loss did not improve from 3.35674\n", + "Epoch 288/300\n", + "\n", + "Epoch 00288: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 3.8087 - val_loss: 3.3747\n", + "\n", + "Epoch 00288: val_loss did not improve from 3.35674\n", + "Epoch 289/300\n", + "\n", + "Epoch 00289: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 24s 235ms/step - loss: 3.8417 - val_loss: 3.3716\n", + "\n", + "Epoch 00289: val_loss did not improve from 3.35674\n", + "Epoch 290/300\n", + "\n", + "Epoch 00290: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.7632 - val_loss: 3.3679\n", + "\n", + "Epoch 00290: val_loss did not improve from 3.35674\n", + "Epoch 291/300\n", + "\n", + "Epoch 00291: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 3.7730 - val_loss: 3.3928\n", + "\n", + "Epoch 00291: val_loss did not improve from 3.35674\n", + "Epoch 292/300\n", + "\n", + "Epoch 00292: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 229ms/step - loss: 3.7766 - val_loss: 3.3722\n", + "\n", + "Epoch 00292: val_loss did not improve from 3.35674\n", + "Epoch 293/300\n", + "\n", + "Epoch 00293: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.7631 - val_loss: 3.3627\n", + "\n", + "Epoch 00293: val_loss did not improve from 3.35674\n", + "Epoch 294/300\n", + "\n", + "Epoch 00294: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 227ms/step - loss: 3.6896 - val_loss: 3.3722\n", + "\n", + "Epoch 00294: val_loss did not improve from 3.35674\n", + "Epoch 295/300\n", + "\n", + "Epoch 00295: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 3.7635 - val_loss: 3.3677\n", + "\n", + "Epoch 00295: val_loss did not improve from 3.35674\n", + "Epoch 296/300\n", + "\n", + "Epoch 00296: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.7644 - val_loss: 3.3910\n", + "\n", + "Epoch 00296: val_loss did not improve from 3.35674\n", + "Epoch 297/300\n", + "\n", + "Epoch 00297: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 232ms/step - loss: 3.7975 - val_loss: 3.3956\n", + "\n", + "Epoch 00297: val_loss did not improve from 3.35674\n", + "Epoch 298/300\n", + "\n", + "Epoch 00298: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 233ms/step - loss: 3.8009 - val_loss: 3.4185\n", + "\n", + "Epoch 00298: val_loss did not improve from 3.35674\n", + "Epoch 299/300\n", + "\n", + "Epoch 00299: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 230ms/step - loss: 3.7140 - val_loss: 3.3914\n", + "\n", + "Epoch 00299: val_loss did not improve from 3.35674\n", + "Epoch 300/300\n", + "\n", + "Epoch 00300: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 23s 231ms/step - loss: 3.7595 - val_loss: 3.3685\n", + "\n", + "Epoch 00300: val_loss did not improve from 3.35674\n" ] } ], @@ -677,7 +2449,7 @@ "\n", "\n", "initial_epoch = 0\n", - "final_epoch = 500 #config['train']['nb_epochs']\n", + "final_epoch = 300 #config['train']['nb_epochs']\n", "steps_per_epoch = 100\n", "\n", "history = model.fit_generator(generator=train_generator,\n", @@ -696,9 +2468,20 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "['background', '1']" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "classes" ] @@ -712,9 +2495,36 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "dict_keys(['val_loss', 'loss', 'lr'])\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAEWCAYAAABrDZDcAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJztnXd4XNW1t981VV2yZLnjDgYbGxsMmBoIoSfUBAiBACGQHuALJHDTcxPCvTchCSSBOMEJCSUU0wKEGroxYBtj3HuRiyTL6tL0/f2xz2hGoxlZttVnvc+jZ86cuo+OtH9nrbXX2mKMQVEURcleXH3dAEVRFKVvUSFQFEXJclQIFEVRshwVAkVRlCxHhUBRFCXLUSFQFEXJclQIFKUTRORvIvLzLu67WUQ+daDnUZTeRoVAURQly1EhUBRFyXJUCJQBj+OSuUVElolIs4jcJyLDReTfItIoIq+IyJCk/c8TkRUiUicir4vIYUnbZonIEue4R4CclGt9WkSWOscuEJEZ+9nm60RkvYjsEZFnRGSUs15E5DciUiUi9c49He5sO0dEVjpt2y4iN+/XL0xRUlAhUAYLFwOnA4cAnwH+DfwXMBT7d/5tABE5BHgYuBEoB54H/iUiPhHxAU8B/wBKgcec8+IceyQwD/gKUAb8CXhGRPz70lAR+STwS+ASYCSwBfins/kM4GTnPkqAS4EaZ9t9wFeMMYXA4cB/9uW6ipIJFQJlsHC3MabSGLMdeAt4zxjzoTEmCDwJzHL2uxR4zhjzsjEmDPwKyAWOB+YAXuC3xpiwMeZx4IOka1wH/MkY854xJmqMuR8IOsftC18A5hljljjtuw04TkTGA2GgEDgUEGPMKmPMTue4MDBVRIqMMbXGmCX7eF1FSYsKgTJYqExabk3zvcBZHoV9AwfAGBMDtgGjnW3bTftKjFuSlscB33HcQnUiUgcc5By3L6S2oQn71j/aGPMf4PfAH4BKEZkrIkXOrhcD5wBbROQNETluH6+rKGlRIVCyjR3YDh2wPnlsZ74d2AmMdtbFGZu0vA34hTGmJOknzxjz8AG2IR/ratoOYIy5yxhzFDAN6yK6xVn/gTHmfGAY1oX16D5eV1HSokKgZBuPAueKyGki4gW+g3XvLADeBSLAt0XEIyIXAcckHftn4KsicqwT1M0XkXNFpHAf2/AQcI2IzHTiC7djXVmbReRo5/xeoBkIAFEnhvEFESl2XFoNQPQAfg+K0oYKgZJVGGPWAFcAdwO7sYHlzxhjQsaYEHARcDVQi40nPJF07CJsnOD3zvb1zr772oZXgR8C87FWyCTgMmdzEVZwarHuoxpsHAPgSmCziDQAX3XuQ1EOGNGJaRRFUbIbtQgURVGynB4TAhGZ5yTFLE9aVyoiL4vIOudzSGfnUBRFUXqenrQI/gaclbLuVuBVY8zBwKvOd0VRFKUP6dEYgZMg86wxJp4ivwY4xRizU0RGAq8bY6b0WAMURVGUveLp5esNj2dJOmIwLNOOInI9cD1Afn7+UYceeug+X6yyIUBVY5Dpo4v3t72KoigDlsWLF+82xpTvbb/eFoIuY4yZC8wFmD17tlm0aNE+n+OuV9dx58tree/2c3C7ZO8HKIqiDCJEZMve9+r9UUOVjksI57OqJy8W7/zD0VhPXkZRFGVA09tC8AxwlbN8FfB0T17M67ZCEI1proSiKEomenL46MPYlP0pIlIhItcCdwCni8g6bMngO3rq+gBul729SFSFQFEUJRMDIrM4XYwgHA5TUVFBIBDIeFxTMEJdS5iRxTkDNkaQk5PDmDFj8Hq9fd0URVEGGCKy2Bgze2/79dtg8d6oqKigsLCQ8ePH075YZIKapiDb61qZMrIIr3vgJVEbY6ipqaGiooIJEyb0dXMURRmkDLze0SEQCFBWVpZRBADimwaA0ZMWEaGsrKxTq0dRFOVAGbBCAHQqAgCC3W4YoErA3u9RURTlQBnQQrA3BrpFoCiK0hsMaiHoSerq6vjjH/+4z8edc8451NXV9UCLFEVR9o9BLQRxt0pPjIzKJATRaOeTRj3//POUlJR0e3sURVH2lwE7aqgrxL3rPeEZuvXWW9mwYQMzZ87E6/VSUFDAyJEjWbp0KStXruSCCy5g27ZtBAIBbrjhBq6//noAxo8fz6JFi2hqauLss8/mxBNPZMGCBYwePZqnn36a3NzcHmitoihKZgaFEPz0XytYuaOhw/pozBAIR8n1uXHtY9B16qgifvyZaRm333HHHSxfvpylS5fy+uuvc+6557J8+fK2YZ7z5s2jtLSU1tZWjj76aC6++GLKysranWPdunU8/PDD/PnPf+aSSy5h/vz5XHGFzj6oKErvMiiEoD9wzDHHtBvrf9ddd/Hkk08CsG3bNtatW9dBCCZMmMDMmTMBOOqoo9i8eXOvtVdRFCXOoBCCTG/uTcEIG6ubmDg0n4Kcns3Mzc/Pb1t+/fXXeeWVV3j33XfJy8vjlFNOSZsL4Pf725bdbjetra092kZFUZR0DO5gsfPZEzGCwsJCGhsb026rr69nyJAh5OXlsXr1ahYuXNgDLVAURekeBoVFkImezCMoKyvjhBNO4PDDDyc3N5fhw4e3bTvrrLO49957mTFjBlOmTGHOnDnd3wBFUZRuYsAWnVu1ahWHHXZYp8e1hiKsq2piXFk+xbkDt2hbV+5VURQlla4WnRvUriFNLVYURdk7g1oIejJGoCiKMlhQIVAURclyBrcQqGdIURRlrwxqIWAQlKFWFEXpaQa1EIj6hhRFUfbK4BYC57MndGB/y1AD/Pa3v6WlpaWbW6QoirJ/DG4h6MEYgQqBoiiDhcGdWdyDMYLkMtSnn346w4YN49FHHyUYDHLhhRfy05/+lObmZi655BIqKiqIRqP88Ic/pLKykh07dnDqqacydOhQXnvttW5vm6Ioyr4wOITg37fCro87rBYME4NRfB4XuPfR+BkxHc6+I+Pm5DLUL730Eo8//jjvv/8+xhjOO+883nzzTaqrqxk1ahTPPfccYGsQFRcXc+edd/Laa68xdOjQfWuToihKDzCoXUO9xUsvvcRLL73ErFmzOPLII1m9ejXr1q1j+vTpvPLKK3zve9/jrbfeori4uK+bqiiK0oHBYRFkeHMXYFNFHeWFOYwozumxyxtjuO222/jKV77SYdvixYt5/vnnue222zjjjDP40Y9+1GPtUBRF2R8Gv0Ug0iMxguQy1GeeeSbz5s2jqakJgO3bt1NVVcWOHTvIy8vjiiuu4Oabb2bJkiUdjlUURelrBodF0AlCz5ehPvvss7n88ss57rjjACgoKOCBBx5g/fr13HLLLbhcLrxeL/fccw8A119/PWeffTYjR47UYLGiKH3OoC5DDbBiRz1D8nyMKhm4k8JrGWpFUfYHLUPtIAgDQewURVH6ikEvBIhWmFAURemMAS0EXXnT76kYQW+h1oyiKD3NgBWCnJwcampq9tpRthWeG4AYY6ipqSEnp+eGviqKogzYUUNjxoyhoqKC6urqTverbAjgdbtoqvT1Usu6l5ycHMaMGdPXzVAUZRAzYIXA6/UyYcKEve737TvfYPKwAu654oheaJWiKMrAY8AKQVe5JPQk46sbgHmJlc010LIbyqf0WbsURVH6C30SIxCRm0RkhYgsF5GHRaTHnOBHRT9iakv7HAR+fxT84ZieuqSiKMqAoteFQERGA98GZhtjDgfcwGU9db1c04rHhNuvbK3tqcspiqIMOPpq1JAHyBURD5AH7OipC+USwBML9dTpFUVRBjy9LgTGmO3Ar4CtwE6g3hjzUup+InK9iCwSkUV7GxnUGbkmgDfVIlAURVHa6AvX0BDgfGACMArIF5ErUvczxsw1xsw2xswuLy/f7+vlmFa8qEWgKIqSib5wDX0K2GSMqTbGhIEngON76mI5JtA+RhCL9dSlFEVRBiR9IQRbgTkikiciApwGrOqRK8Wi+E0AL5GEAATqeuRSiqIoA5W+iBG8BzwOLAE+dtowt0cuFm5JLEeD9rNlT2KdWgeKoih9k1BmjPkx8OMev1CoObEcCYA3F1qThSACroFZekJRFKW7GLBF57pEkhCYSNwiqElsj0V6uUGKoij9j0EuBE1ti4FWx03UkmIRKIqiZDmDXAgSFkFzS1wI1CJQFEVJJmuEoKXFWW4nBNFebpCiKEr/Y5ALQcI11BJ3DSUHi40KgaIoyiAXgoRF0BYjCLcmtqtrSFEUJXuEoDUuBNGkchMqBIqiKINdCBKuoVCrYwlEkoVAXUOKoiiDXAgSFkEw6AhBPMMY1CJQFEUhi4QgHExnEagQKIqiDHIhaAIEgEjoACyCzW/DnVMh2Ni97VMURekHDHIhaIacYgDCwYBdF9kPIdi9Fhq2Q/PuxLqmKnj8SxBsynycoijKAGDwC0FeGQDRsCME0RB48+xyV6uPxoPKycKx9V1YPh8ql3dTYxVFUfqGwS0EJ94EZ/4CgFhaIeiiRRANJ46NE7cE1F2kKMoAp0/KUPca444HYwCIhuKuoRD48qCFrgtBfL9o0kxncQFQIVAUZYAzuC0CABEi4qOlpZlQJGaDxd58u63LQhDuuH9IhUBRlMHB4BcCwHj8eEyY1bsaEhYBtE8o27MRlj6U/gTRdBaB4xoKabBYUZSBTVYIgcvjx0+YZRX1jkWQJkbw8OXw1NegNc2cxvH9YklCENIYgaIog4PsEAJfDgWeKMu21drho+mEwDgjiKpXdzxBTIPFiqIMXrJCCMTtZ3geLK+oBYyduxjaC0HZZPtZtbLjCdpGDSXHCBwh2L4EfjEKajd3d7MVRVF6hawQAjx+hvhhW3Wt/e5zgsXJ8xEUlNvPyjRC0JZHkGbU0NYFEG6G1c91b5sVRVF6iawRghJfFFe8I/emCRbH3/arVtnPtS/CXbOsK6nNNZQmRhDHX9T97VYURekFBnceQRy3nwKJ4cPpyH1pYgTxGkRVK2zuQdUqO5Io0JAhjyBFCOJWhqIoygAjayyCXAmT73YsgHR5BPFAcGutXR/v9COBhLWQbtRQHNPFchWKoij9jKwRAlc0xORSr/2e1iJI6uSjoYQwZHINpY4WSi5m1xWMgUeuhA3/2bfjFEVRupmsEQIiQQ4u89nv6WIEyR15OyEIJOUROJ/GdLQIIoH0126tg58Uw5oXUvYPwqpnYMu7+34/iqIo3UiWCEEORFoZV2ItgpArzfDR5ByBaDjJNRRMGj7qfIZbrCtI3OmPT6Zmvf1883/br48LR3QfLQlFUZRuJnuEIBxgTJHtuKsCzm136hpyOuhkiyDe2ccDxQXDE8dkcg25ve2PTd0/kkFAFEVReonsEAJvLkRaGV1gb3dni521rP3w0a64hhyxiLuFikYmjskkBC5nYFZqh68WgaIo/YTsEALHIhjpCMG2prgQZLAIIqEMriFn/3iguDBJCDJ16HGxUYtAUZR+SnYIgTcXokFyxHboWxqc9e0sghC4fYnlNougNbNFUNgFiyDdiCMYmBZBsBHmfxmaa/q6JYqidCPZIQSeHPsZsAqwqT4KSHuLIBJMJIVFwynDR1MSyuIxglEzwe131jXCq/9tp8dMJpoSX0i+XvLnQKByBXz8GGxf1NctURSlG8kOIYgXmQvUA7CuJoRxeTq6hnyFznIo4bKJBDqOGmrcaT8nngo/qISCEbDlHXjrV7D57fbXTle5NH7edOv7M+mm7FQUZcCTHULQZhFYIagNCkZcHUtM+Auc5dSEshTXUN1WGwQuHAEi4PHZjGSwQ0uTSRWROAPRIsjk5lIUZUDTJ0IgIiUi8riIrBaRVSJyXI9eMG4RBK1rKISXCO6UGEE4yTWUHCwOdHQN1W2F4jHgcvIIPDlJQtDa/tqpQ0/jtFkEPdypbn0PFvy+e87VFvhWIVCUwURfFZ37HfCCMeazIuID8nr0aikWwbAhRYSDLnypCWW+DBZBvOOL71+3FUrGJo51+xO1hjJZBLE+ChbPO8N+Hv/NAz+XuoYUZVDS6xaBiBQBJwP3ARhjQsaYNPNDdiPxkhKOEMwcX04oJpj4G64xKcHivSSU1W+D4iQh8PgSyx0sggxvz3Eh6EnX0K6PE8vGHPj5Mlk3iqIMaPrCNTQRqAb+KiIfishfRKRDDWcRuV5EFonIourq6gO7ore9RXDUxOGEjZv6ZqfTjkUBk2QRpJSYSPaNR4I2WJxsEcQtDugoBJncKL0RLF4+P7GcbP3sLxojUJRBSV8IgQc4ErjHGDMLaAZuTd3JGDPXGDPbGDO7vLz8AK8YHzVUBy4vx04sJ4KL3Q3OUM94Z5w2WBxImqEsAvUVdrmdayjJIkgdPpqpA+6NYHHz7sRyqkDtD5mGwiqKMqDpCyGoACqMMe853x/HCkPPkWwRePwcVJoL4qGm0fHnx91AcYsgEkwfI4iGoW6LXW5nEfgTy/3JIkiuiNodgpM6ekpRlEFBrwuBMWYXsE1EpjirTgPSTBTcjXiS8gjcPkQEr9dLTWMLraFoorNO6xoKJDq+WDjxll0wLOn8yUKQEixO7jSTy0n0hkXQTgi6wSJQ15CiDEr6Ko/gW8CDIrIMmAnc3qNX8yZlFjuddkGun1gkwrx3NnVwDd3zn1WY5I46edhkvLxEXDQgkV0MaSyCJNdQOMlt1BsWQbiHLAJ1DSnKoKJPhMAYs9Tx/88wxlxgjKnt0QvGLQJMmz8/1+9nZKGHe9/YQFOL7aBfWGuLyTU2t2AyZRbHYwDJcxR3ddRQKMla6G2LQGMEiqJkIDsyi71Jo3rySu2ny8Pkobk0BiI89+FWAJ5bbUcV+SSSfqrKWCYhSB41lCGPIHVbskXQHUM709HtMQJ1DSnKYCQ7hKDNIgDynRFILjfFfhfHTyrjoQV2FrELj56IcXnxEUGcTm/9jur2mcWhJns+V9LsZJ25htpZBMmuoXjHbLpnaGc6IoFE/aRuiRGoRaAog5HsEAKXKzHEs00IbNG5X140nTnjrL//pMNGg8dPnoQQ7Ft6JJDUecddQ76UtId2rqHUYHE0/bbufltPRyQIuSXddw11DSnKoCQ7hAASVkH+UPvpCMG4snxuO2MSAF5fDuL2MsyfeIsvkKQ36VgmIeiiayhdjAB6rmMNByDHEYLuiBG0uYZ6yIJRFKVPyB4hiHdi7SyClNnD3D5w+yjzJjrvfJLe3NssgqQRQ/Hj4jgdrjGG55btJJI8ZDTdqCGA3xwOH/xlf+6qcyKB7rUI1DWkKIOS7BGC+Jt6UoygrWOLtBeCEk+io8sn2SKI2BhBB4ugYx7BhupmvvHQEjZVJpVRymQRhJuheu3+3FXnRAKQU+wsd8eoIS06pyiDkewRgjhtriF3xzdctw/cXord9rvx5uOThI8/GgkRC+5NCGyHu7vJdvTNrclDODNYBGAtjYX3wLb39+++0tHtFsE+lqE2Bh65Eta/euDXVhSlx8hCIWgfLAYSJSYciyAeI5CcorbDgsZDKBhgXUUlra6UqtnxUUNun5OJHKOuxYpJIBBMbO8swSvUCC/cCvedfsC3CNhOOxrqGCP4+HH4/TEQi+3HOffRIoiGYNUzsPXdfb+Woii9RpYLQcobrtsLbh+uePawPyEEAXzEImHyTIDqYMo0DnGLIH7uSCu1LfacwVAA4oKS7J4JtyaGdgK0JrmQumOcflxoUi2CqpWwe00iQ3pfyDTbWsb9k3IxFEXpt2SfEOQluYZMSrDY47dv9fHx/v5ERx3Ah4coeRKgMpCUQxA/DhJup3ArtY5FEAwG7Qxp4mo/cicSbHd+GncllrcvPpA7dM7vWB++QhB3QoTinfL+CMG+Fp2Lx140pqAo/ZouCYGI3CAiRWK5T0SWiMgZPd24HiE+5j/uGvron/DMt+w6xzWUTgiC+PEQIZ8A25pdVDYE+MtbG23RurjrJy4y4RbqHIsgHA5hXF47OU4711BSIBfaC8HGNw78PuNC4PFbIWoraeGsDx6AEOytY49F4cHPwQYnNqAWgaL0a7o6VeWXjDG/E5EzgXLgGuCvwEs91rLupnCknVAmTlwInvxKYp3ba3/iQd28srZNnpwC3MFK3ITZVA8/++2b1LWEWbK1lmtGNHE0JFxDoRZqm21nKbEI2xsijPD48URSLIKkGATB+sRy/dYDv9+49eHNtWIQ/x4Xo2Djvp+zq66hYCOseylRoVVLUihKv6arQiDO5znAX40xH4mIdHZAv+Mb77V/I48LQbJAuP3tcwIKR7Qtuv254LzYtpDDyOJcPjNjFP9YuIXVy2t5xe9md854hgGEW6htscFYL1EaQlDo8lKcahEkxSDakTq5zf4Qfwv3+G0yXapFENoPIeiqRRC/VlxsenpeZkVRDoiuxggWi8hLWCF4UUQKgf0YdtKH5BRD4fDEd5fbujCS3vpx+9oPBS0c2bZYXJRw49x4ziz+fcNJ/Oz8abx008lMPmwW0wJ/5oa3ndhBuJW6lhB5PjceIoRx0xj1JHIZYlHrZ8/JIARdddvEYu1LWCQTtz48jkXQNkdyBtfQ+ldsYltnItRlIXCuEWhwvqsQKEp/pqtCcC12OsmjjTEtgBfrHhq4iJNH0Jw0H7LLbV1DcZIsgpzcRDZxYZEdiSMiHDK8kNsvms4FxxxCUaGNKZhgI+fW/oNTxvkoy3WRl5NDXdhDJOR0zs7cyW0xhWQKhnfdIvjXt+Cxq9JvS7YIvLkdhSA1WDz/y1C/DWq3wNaF8JNiqNvWfp/k4nud0SY2jhBosFhR+jVddQ0dByw1xjSLyBXYqSV/13PN6gU8ObbDbcs4HgYiKa6hhEWAN6mCaUpC2dACP7+8aDr/enkXvAMbl77ONaGHyJPJzBiZT32rh3W7IjQ2NjIEEq6oIeM7tqtwZGa3zWPX2DhEyVgrWrvXtxeyZOIxAU9Oe4sgU4yg1ZkSIhaBJX+3yxtehaOuTuzT1RhBqkWgQqAo/ZquWgT3AC0icgTwXWAL8Pcea1VvUDbJvhWbGJzzK7hlnV2fbBEkT0fpTUoiS80sdjhjpi1et3jFGnsJVwvEwhTk5REWP7vr6jj3rrdY8OHH9oAh4zqepHBkZtfQ9kVQuQKWz4flT1gha9mdft+4ReDNse6hcKprKEkIknMYwi2J0VKp7ehqQllbjCDuGlIhUJT+TFeFIGKMMcD5wO+MMb8DCvdyTP+m/NDEckFS7CDZIkiuKprsz88gBP48u0+pM+HaEGmCaBi3x0tpcRGh1mZW7Gjg+QUfAnDN01UpZxAoGIbJ5Bpq3g3BBhobamloqLVCFqhP/4YeyWARpMsj2PZeYrmdEKRYDV0tMdHBItAYgaL0Z7oqBI0ichtwJfCciLixcYKBy7CpieVMQpC8fNh5iVLW8bINqThWwyH51t3kizTat2iXh9HDSskhxAUzRzFC9gDwQW37KqbNkseeqJ9Acz3/88JqAJqCEYKRqH07D7dAqIlgcz0tjfWJzrylpmNb2mIEOSkxAkcgkt/2a7cklkMtCTdYqhB0tehc2willCQ2RVH6JV0Vgkuxgye/ZIzZBYwG/q/HWtUbFJQnRgwVZhKCJK3LKbbuo8sehtKJ6c/pzQWEMV7bgU4sCNu3aJeHgvxCxha5+NXnjuDig93sMYX48orbHd5gcnhwSQ25BJn/wWaCkSjn/f5tfvz0Cmi21oMJNpETayEn1pywHJrTuIfaEsoyWATJnXyydRBuTbhy4q6dOMlTdnY2vWbq3AeaR6Ao/ZouCYHT+T8IFIvIp4GAMWZgxwggYRXkJ8UCMrmGXB7rMjn0HBtUTocI+ApwOQHc/Jh1DeH2gjcHbyyIx+1ipKuOomEH8eerjua60P/jBnMzACUlZbSIfRtvaW7k58+uYmN1Mx9s3tPW2ZtAPQUSoIBWxAl0V1Vu79iWeEwgNUYQ76RDmYSgJRFATxWY5Ck1O+vcUy0AdQ0pSr+mqyUmLgHeBz4HXAK8JyKf7cmG9QqjZtngrC8pENxUaT+nX2LXt1UW7aInzJefeHNurXVcQ16nM3Y64cYdeIpHM+ugElonnsW5p54MQG5BCV89fQYAI3Ii/GOhddls3N1MsM6ONHI5napHEmkcf3xuIbFYyht6Bosgli6PIDkmEW5JHNucEsNInpmsM/dQaoltDRYrSr+mq66h72NzCK4yxnwROAb4Yc81q5c45Ta47j/t1005B0rGwVl32O/xekCulEJzmUgWldY623m6vY6fvtW6VBp2QtFIXC7hgS8fyxmzJtv9/YUUF5cC8IfPTuGKOWO5dPZBGAOVO7eluZjFNNewcFNKnCBJCFbuDtPa2kwkGiMWsuuDLQ0J906wqV2dpDbBakoZmppsEXRWeC5VCNQiUJR+TVeFwGWMSX49rNmHY/svvjwoGtV+3aHnwI3LIN+JH7SVkO5iZ5Y8oihQ1xYsxptjh6qGW+3Y/+Qchfgx/kLw2wDylCHw8wum881PTuZe728Yu+D7GS9ZKg1cd/8iPn33W3y0zRkKGglYa0aEpbtC+GMBbn1sCR5sZ+7ftRh+WmJdRqEmyCsFcdHc1MiKLY5V1FzVPhaQ3Pnvk2tILQJF6c90tTN/QUReFJGrReRq4Dng+Z5rVj/i3F/b4HDJ2K7tnzyfcWtdIkYQH3FUsw4wUDym4zH+woQoOO6aMcVeTnUv7fSSp45x8enhNUxoWMyV971HTVOQlVuraIp5eHxxBZsbBZcYXl+aZjrMpl3O9JsF4M1jXUUlFdV2VBORQPugcpddQynBYnUNKUq/pkuZxcaYW0TkYuAEbAG6ucaYJ3u0Zf2FiafAtz/s+v7JiWeRVjvngcsGiwGotslm7bKK3R7wF9tRTHFRCNlAs+xei5/OR93MGBJmhnmeUGglh1X/gl+/vJbjd1RRGvNx82MfcbnbtmlSfgukliZqrLSi47dCsLu2jhySOu7m6oRVFIvYmEMksBch0GCxogwkulpiAmPMfGB+D7ZlcJCabBYN2Y4+LhDVNj+AkpSs4iufsOLQ4ryNB5vgzsMyl5CI48m1eQQmhi9UxxePG8df39nMyd4G/MVl3HTSIYzetga2wAOXTbJjv5JpqqSurpaPGgs5oTCHprpGRkqo3XbKbMZ0JBKiKeKlhAAfbqqfDRwNAAAgAElEQVRievF4PO40RmVqjMDEnFhJl//c0hN3Uw2wwreK0t/p1DUkIo0i0pDmp1FEGjo7NmvxFXRc5/ImhqJWrbYF74pGt99nzGw7w5k/ySJwRMC4PCz2HsXHhSd1PHfJWDvMs7UWAvXceNrBlOb7KJEmCkrKueFTB/PZ4w8DwBtwAspJWdUr162joaGWPREfO1oEvwmQR4hqtx1SG1n7Ctw+hlD1RgLBEM3GDq/9r8eX8MxHO9L/DtLFU7ojTvDSD+CBiw78PIqitKNTITDGFBpjitL8FBpjMtRQznLio4ZcScNN46OGAKpX2fhAprfjuJAk1RCSoVOY9t2XOfTq33fcf8h4u29rLZgoxeuf4vHp73FIYRhvQVn7c8bzAk65leb/t5koLl75YBm5JkBOXiF7Qh5KfVGG5sRYF7KjiNa/9SiEGvnuPY8isTAhscNpPUR5Z31ipFJVQ4AXV+yiJRTpaBFA97iHdq+F3esO/DyKorTjAG11pQNx11DxaKjdbJddSUKwZyNM+MTej48fCzD+BHK8bshNo71DxtnZwFzOo3z390ysr7CJcfGJ6/0p4uLNI79oCK3+UkaZBko9IY48+CB2r93BoUO9hPaEqWQIAbwc4qqwt2PqyXXHKMwvhsYdeInw7LIdRGMxLj5qDNf/fTGt4SjfP+cwrmxtJielmZsq9zBh/JC9/fY6J9i0f3MtK4rSKQN/CGh/I/72nVyGwu1JjBqC9FVH47jcNp6wZ7P9fsnf4az/aX9uV5J+l4wDTGJo5+71NmbQvBtynY43PhNaPN7gTL6TO2QkFx3swh1tZfjQoUwbN5IcEyTPFaaosAh38WhcWL/8eQf7cJkoRYX2XGdPLSUYifHU0h1c9/dFBCI2Cv3+5j00NXUsmvfXN9OMWNpXQk3dM3uboijtUCHobuJv9GUH21gAtB81BB0Dxan4C6FmvV0uGg0u5zF5fDY3IKfYxhzc/naT5wCJ+ZZjYcgtddqU4hqKi1LBcFxxy8OXb62WcCueaJDTpo/DW5zIsTiyNGwD0s4EPecfbudnPmyI4Tm5iasPquKzR41h8ZZaWlo6dtaLN1YSihzgpHahJhtr0OGoitKtqBB0N/HRQb78RC2j5DwCgEmf7PwcJePs+H6wAeRk/AW2Y/cV2GskT7WZSptFkCoETtmMghFQs8Fprx0+2lZryJvbTmQkXnrDKVE9zBdk1c/O4u+fKWaSaydXjNzO7HFD2NMcoq6x48Q64WCAU/7vNe59Y4Otpro/xMtiqHtIUboVFYLuJv727cuHYfHROdJ+hrNRszo/R9nkxHJ+eftt/kLr6vEX2J9UoUgmLgTePBBXkmvIsU4KhoHj+rHCkmcTyGJhK1zJ2c+NjjANn2bjD9sXk+tzUx61AjHJV8fs8dYCCQZaOjSlxGfYUR/gjn+v5r+fXdn5/Wci7hZS95CidCsqBN1N3DXkK4BhdtgmtZsSNYtmXLb3cfDOuH28eR3zEnxOGQp/ob1GunmP48SFwKmK2mYRxN1UxUlDWP0FVqwCdYl9kt1OcQvFlw+jjrTzGoOd5xigvoJJ5fmcMqU8bQLcHy+bxvKfnsnnjzmIRxdVUN24j6OIYrGE20stAkXpVvpMCETELSIfisizfdWGHsGX5Boad6Jdziu12bnfXgoX3LP3c8QtgnRv+1PPh0PPtZnIvoKuuYbACkew3i7HLYJky8SXD94k0fHmJSyCvDKbgQw23jF2DuxYaiexqUsIgYhw5yUzKfVFibodC8ip3jo0Ryjwe7j+5EmEozHuX7B577+HZEIZqqUqinLA9OXw0RuAVcDgykdIdg2NPRa+/CqMsKWlKZ3QtXO0CUF5x22fuMV+jjnaZux6fFYUgvX22skdZl5px3ZBQgiGT0/ant/efeXJgQknw9QLrLXw4QN2vdsDY4+Dd34Li/+WZBHYz9J8H6WFLggXQ1OrFaCWYFsewYSh+ZwxdTj/WLiFr50yiZ31rby5djdHjy+lORTBGDhmQikuAUm2nJI7/71YBMaY9scqitIpfSIEIjIGOBf4BfD/+qINPcbwaTDrChjvWANjZu/7OeJDT9MJQZyxcxLL+WXWr18wHPY02RhCsCHFIkgjBJ6kSXh8he3dUN5cO3PbJffDm79KrHd5bP2l8SfBi7cl1gfqbHzBX2gzi3OKrTvJX2jzF5JG+lx/8iReXFHJtfd/wOIttYSjhhyvi3DUEI0ZXAIzxpRQmu9jWKGfOy6eQTTQSFsh8GATdS0hqhqDHDLcBq+fXbaD5z/eyZFjh/Dntzby7xtOxi3Ckq21nHpo0sRDiqJ0oK8sgt8C3wUKM+0gItcD1wOMHdvFyp/9AW8unP+HAzuHLw+GHw5DD+7a/nlDbecbTyAbPg22L25fAC85ByE+agjs/MuBOhsTSLYIkpeTXVTxobBffBrumgV1W6xgNVdD/XYbII8EoPggu39cgJIyi48aN4Srjx/Ps8t2cPrU4XzrkwfzrYc/pCzfx2eOGMWWmmYeem8rzSE7umhHfYCGDe/xlJOsvXvPHm5a8CELNtTw8wsO55LZB/G/L6xh654WXl1VRTAS486X11CY4+We1zdwx0XTOX7SUMaW5RGOxvAm1UeKxQwbdzczuiSXXF9izolAOMry7fXMGjsEt2vwWRd7mkOU5vv2vqOSFfS6EDhTXVYZYxaLyCmZ9jPGzAXmAsyePbuTCXIHKde+1H7azM4YMt6OCvLlWzdR6STru092jzjDPhl9VPv11zwP7/7B5iuUJQlP8jSdyQHpeDKbyw2HfhoW/sGWzGiuhj8eC1c/116U4slsKfMX/OS8afzkvGlt31+44STcLmlz6Vx1/Hiag1Eunfsub66t5gfTisAZ6frHlz7irWAZ5YV+bnviYx75YBtb97TgdgnBSIzJwwp46L2tDC+y93DrEx/bWy/JZWd9K8dPGsqvLzmCnfUBfvDUxyzf3oDbJRwyvJDTpw5nTEkuv3h+FfWtYb556mRuPnMKAB9X1HPL4x9xxZxxfG72GPyezJMV/ePdzQwvyuH0qcP50t8+YOqoIm4589CM+2diza5GllXUceGs0ekL/O0Hb62r5ovz3mfulbM5ferwTvc1xvDY4grmTChjbFlep/t2B73l1uvu6xhjMAZce3lpiMUMoWjMVgroR/SFRXACcJ6InAPkAEUi8oAx5oo+aEv/JXW0UGec83+2o339dmjdA6feBkd/qf0+8cqdE05uv374NLjgj3Z59JGJ9d4MmdDJNZLmfA1WPAmn/Bc89Dm77qN/2sl34qOk4gIUL0S3daHNkyhKGpoKHTq5MUNsp3P352fR0BrhXP/SNiEocQfJ97l56caTuf/dzTy2qIIjxhRz9vSRPLBwC3/54mxOu/MNdtYHuO6kCQwrzCEYifLB5lrOnDaCRz7Yyhm/eZPGQJjhRTn8+DNTqW0O8f7mPdz9n3UYAzPGFDOsMId73thAca6XcCzG3a+uJxKL8YOnlvODp5bzqcOGc9PpBzN1ZBENgQgFfg9ul/D00u388OkVAJw5bTivralmwYYavnTCBApzvHxv/jLGDMnlO2dMYfWuBh56byv/WV3FhKH5/PXqo/nBU8vZXNPMry+ZyVXz3mdXQ4DHFlXw0HXHsqG6mR89vZzKhgBPfv0ECnI8bNrdzMHDCto6tuZghNfXVHPyIUMpzGk/xerWmhZ+/uwqjIG5b25IKwS1zSH+tWwHOR43H1XU8eB7WxlbmsfVx4/nqaXb+cKxY7n06LEEI1He3VDDlBGFjCzO5Z31u/nxMytwi3D1CeOZMaYYj8tFIBylrMDH44sraA1FufXsQ2kIRHC7hByPq+3Z3/nSGl5YsYtnvnkiOV43sZihrjXcznL50xsb+MfCLdz/pWOYVG6tzQUbdjP3zY3cfuF0RpXYv9tte1r481sbmTaqiIuPHIPH7SIWM7y3aQ9jy/K4/M8LufbECVw5Zxxz39zIfW9v4rwjRlGS5+W4SUOZdVAJb63fzZThhYwobl8wZfWuBnxuF8FIDGOgoraFXzy/imGFfh6+bk5GwX5tdRU/eGo5gXCUx756HBPLOxaoNMZQ3RRkaL5/r6LSnYgxffey7VgENxtjPt3ZfrNnzzaLFi3qnUYNZELNNvM2N01Nn99Mh/qtcNW/OopBMj9xOvDrX0+MKopF4WdO4PnSB+GwNI+rtRae/BrsWgYN2+HoL8MHf4Hpn4OPH7MT/BxxOfzPeDjiMjjvrn27t2WPwRNfBqDp2O9QMetGDh2ReZzBNx5awnPLdvLqdz7R1mHEWbxlD/e+sZHDRhRy7UkTKc5NdJZVDQE+3l7PCZOHEorG+Oo/FrNggy2ud8qUcm6/cDovLN/FjrpWHnhvC4FwjEK/h8ZghPJCPycdPJRnl+1kxuhixpXlM39JBX6P7TROnVJOMBJjwYYafG4Xj331OL44732CkSiHjihi6bY6po8u5uPtdnSX1207gi+dOIE/vbGRW88+lEc+2EZDa5j61jCnHTaMllCUt9btZmxpHpcfO5ZN1c1s2dPMwo17yPW6mTOxlAlDC1i6rZaiXC+vr7G5JJ84pJw31lYzvMjPpw4bjtslPLdsJ6ceOowXlu+iKZiYhOj0qcN5Y201oUiMHK8Lv8fNXZ+fxX898THb61qZPrqYx756HMfe/iql+T7y/W6Wb29fnNjjEiLOvNonTC5rK1g4eVgBT3z9eN5au5tvPrwEY+Bn50/jyjnjuOXxZTyzdAd/v/YYSvK8zH1jI098uB0RmFCWz9nTR3Di5HKumvc+oWiMc6aP4PYLp/Poom3c+8ZG6lpCxAx889TJXHviBK752wcs3VZHWb6PmuYQbpdw42kH8+uX1zKpPJ8N1XZAgs/tYnixn217Wjlq3BAiMUO+z82XT5rArIOG8Mlfv04wEiMSM23Z8qNLctle18rNZxzCGdNGEIrEOHx0MT96ejnPLtvJSQcP5a11uynN97GnOUSB38O1J07g7fW7+eVF09lR18qf3txIdUOQ9zfvoSzfxzc/OZmrjx9/QJaLiCw2xuw1UKlCkC08/Q078uf7le3LXaQy/8u24/7qOzDi8MT6Xx1i5yb4/CMw5az0xy68F174nl3+xK3wxh1w1DWw+K9w5i9h6CHw4MUwejZc9+q+tX/RPHj2Jrt83DfhzF90uvv2ulbeWlvNpUcfdED/SMYY1lU14fe4GFua1+5ce5pDvLKyko8q6igv9LN6ZyP/WVPFsRNK+d1ls/B7XFzyp3c56eByvG5h3tubyPW5uWDmaP7y9iZ8Hhc5Hhf/+taJHDQkj/P/8A6rdzXwnTOmUF7g5+31u7ns6IM4ZkIpn//zQhZutHNVPHTdsSzaXMudL69FBK4/eSIvr6hk4+5mfG4XoWiMmz51CDXNQd5at5vNNc1MHVnEzvoAlx19EBcdOYaRxTnc/vwqaltCvLzSDg2eOqqYj7bVccSYYu64eAZ5Pjdet4tRJblUNwYJRqLUtYT59N1vA7bz+/QRI/nTGxv51GHDeGVVFQ9ceywnTC5j8ZZaqhqDhKMx3C7hxRWVRGMxXllZRSga47NHjWF0SS53/2cd+T4rpJOHFZDv91Cxp4VTpgxj/pIKcr1uWsM2VuRxCV8/ZRIzx5bwo6dXUFHbit/joijXy3lHjOK+tze1PZtjJpRy+4XTuef1DTy1dDuTywvYVNPMyQeX88qqSk6dUs6WPS1srG5maIGft793Ki+vrGRYoZ9HPthGU9BaeE98uB2AwhwPjYFIm+iPLsllZHEOcyaWkeN18ZVPTOLGR5by3LKdiECB38N3Tj+En/xrJcdMKOWjbXVEY4Znv30iLaEon5+7kKAjIpPK861lUddKUY6HLxw7jsVbanl7/W6unDOOn50/bb//hgeEEHQVFYJuIBK0U2cWdu4TJhyANc/DtAvbxxIeuRJWPWMD4bMyePEqV8I9x9nlm1bAb6bB8d+GBXfBp34KTVU2puArgNsq9m2CmQV32/kIvHl2xNJpP2ovVPuLMbB9CYw56sDPBURjpl1wORYziDMUNhKN4RLB5RKu/dsHbe6fmQfZeEpdS4iWULTNvZFMXUuI5z/exfiyPI6fbGM2W2tsBvfYsjxaQhFW7Ghg+uhitu5paRtNBRCMRDuNZ4QiMTwu266PK+o5eHhBpz7sp5duJxCOcua0ERTleLn8L1akRpfk8tZ3T+3UpXHHv1ezamcD9101G4/bxdw3N/Diikq+cOxYPj1jFBt3N/Gthz5k4+5mLj9mLNedNJFnPtrOyOJcTpg8tJ2b5juPfsT8JRX84kI7YOCJJRXsqAtw5rQRTB1lrcX61jC3PPYRCzbU8N8XTOOc6SOZ+8ZGLjpqDC3BCJfOXchNpx/ClXM61v8KRqKcd/c7HD1hCP91zmG8uqqK7zz2EYeOKOSpr5/Q9lzjRKIx/vj6BjZWN/HUUjtXx5yJpTxw7bFsr2tld1OQo8ZZy/r1NVW8vLKS06cO5zuPfkRNc4i/XnM0p06xI9yMMdzx79XMfWsj8792PEeO3b/KvSoESvey40OYewp8430on5J+H2NsbsHBp0PhKJh3hhWCR6+0iWW5Q6xVgYEbP+76PNAAr/3SWhhDxtsS3XllcPP6REG+dMRiEGpMxCvS8fHjMP9auPYVOOjorrfnAInFTK/6gHuSQDjKXa+uY8aYEs46fMTeD9gL0ZghEI6S7+88hNkUjPDqqkrOnT5yvwPpqcK9t+3rq5oozPG0DUTIxPV/X8TSbXU8+60TGbaXfasaA6ze2cjJh7QfLm6MYfn2BqaP6eTvdy90VQh0PgKla4yaBT+p73wfEZh9TeL7l1+xn/GEN48f5nzdWgVVqzoKQdUqGyRfcLet3HrGz21+hDfXJpF58xLB55YaqFwOI2dkbs+i++DlH8HX3mlfFjyZZY/Yzx0fZhaCzW/b+Md5v9t7wcAuMlhEACDH6+a7Z+37iKhMuF2yVxEA6345f+bove63t2vty/bJw9LMQJiG319+JOForEv3Mawwh2GFHcVCRA5IBPYFFQKl5/n6AlvELr/MuqcW/gEeugSOvMpWOm2qtLkIq/5lRxTVboJYBNa+YIemXv2sFQJfATQlzeG86c2EEFQshg2v2vhBvMzHx4/Z8z96la0Ee95d7XMommtgw3/scuXy9m1uroFHvwgn3wz/vNyeZ8Hvu00IlMGNz+PC5xk4pdxUCJSep3hMYjm3xJbHqFptO3pvLuQPg7Uv2pyDGmcqSrfPCkLtJrtvsNEmp+3ZaLf7CmD1czbLubkK5l9nM5iXPWrjGEPGwbb3rStp1zL7c8iZcHjSnMcrn7KCUziyoxCsehq2vA1PrLEiMPUCWPk03HeGtWTO+Lm1SoZMSAiPogxQVAiU3ufalzsGiiNB+/Obw63l8Jnf2eDyk1+Bf33bZkofclZCCOZ8Hd78X7j3BPvdVwDn3W1jCfPOcBLfDFz2kBWauafA0gdt4txbv7KT9lSvhvJDYeKpNrax9iVY9yLM/AKs+bc9b3O1TbQ747+tWLXWWTdSa621JiZ/Ci5/dN8C34rSz1AhUHqfdJ2mx29/PjvPvmGPO96uX/ey9eNPPMUmvtXdaiuhTj4NppydCByXH2pHRB32Geti2r3WCsCwqfZ6My+3wrHxdZsp3Vpry1588gfWIoi0OklxYvMfwJbJqN9mq72WjIVb1lnBmX8tLJ/vtO8lePmHdlTU7nWw5H67bewcmHSadXmNPwGiEdj0hrWCwi0w7gTbJn+RFZTmaph8uhXBnR9ZESwaDQv/aNt68X122G/1GiuGBcOgbitMORdM1GaSm5gd2TX5NBg502Z/G2OtnR1Lbf5ITrF1ma1/BU79Lxh5hG133RYYOctev2qVtbBmXm4LJ5ZOstbTuBNh6OT2z80Yu//bv7GW39TzYcs79n4mngoTnfm5N75u600dcoZNOAy3ti+KGIvZ0iSdWVexGDTuaG9hZqJhp/39LHsETryxYzyqYYetzeVy2zwZVxcyfY3pHcEPtdhnPHxar71g6KghpX9jjO3guvKP2hnhVtup1m+HU26zbp/XbocrnrBuqH/dYDv8KWfbTm3l03Dhn2wOxKnfb185ds0L8PClNl4wZLzNcYgHxF1e2/lteM120Knkldn6Tns2WKukeIx1W4ENkBePth1YHLffCtaoI22yYKoLq8AZDtxUaY+PX9PjFA305kFVmomAPDn2Z+jBUPGBc32XHe3Vstu6zGKR9m0oGA6HX2wLGsaFp2aD7Zw9ObYjj+Py2OMP/6yN76x9wa4fe7y931CTHYAwYoa1BKtWQuUKK6CFIwCxbXN77e+ycacV9q0LYNg0+7vwF9rffzTkTKgUtcebqH2GcYYfbtveVGldgy4PvPxjW24lErDXHn+SfYnYs8mKnTfPtiO/3F4/0mrzcIrG2L+Tivdh2wcwfKoVcBH7shIN25LzK5+xrsMTb7QvLhtfg/oKm72fN9Q+x13LYNpF9h4adtjtK5+y1m9ztc23+dRPYMJJ6f+mu4AOH1WUniISgvlfsu6pscfB6mdtRzf0EOtWyh9qO5RYxP5z71xqBWL0kXDQsVbUti+B135hXU0zL4dRM2H18/bNfNhUe67matvpLH/CWinFo60rqvxQm72dX26Hv4Zb7Zt71So48Sb79l+53O7TWGlzQsbOsW/qgXrrYssphudvtp3VtAutZbB1oW137hCbGb7uRdvZrnjKxnWW3G+PzymGolH2nopG2Y5qyrnWOmvcYUWrYBi8+H37u/H4Yfol1hLa+i6MmG7f0Fc8ZQXF7bW/k6kX2E6wpcZ26vXbAGPLpbs9sGs5HHmlFZ/cIbYtdVus6PkL7O97x1IrBFMvsPdpYvD0121nWzACtjkTKo05xlqNZZPsM1n2qBVAl9eOVEvHkAn2mYSarLU48ghrEUa7OId2slCDbXekteM1Rh5hRWrhPfb3+eVX96+KMSoEiqJ0N7EoIJ3nbuwP8T4o1Q0SbrVukvwyu0+wofOcELButJqNVljjVmTdNke43DbpMdRsrRGXO3HNWMyxOoba2JHLa9/Sg/XWzZY7JFHePRJM1MratdxaFXHryOO3+5SMBQS2LLDW5IgZdgjz7rW2jf5CK/ib37aWSvFoa5mOPT7x+w23WstixiX77SJSIVAURclyuioEA2egq6IoitIjqBAoiqJkOSoEiqIoWY4KgaIoSpajQqAoipLlqBAoiqJkOSoEiqIoWY4KgaIoSpajQqAoipLlqBAoiqJkOSoEiqIoWY4KgaIoSpajQqAoipLlqBAoiqJkOSoEiqIoWY4KgaIoSpajQqAoipLlqBAoiqJkOSoEiqIoWY4KgaIoSpajQqAoipLlqBAoiqJkOSoEiqIoWY4KgaIoSpbT60IgIgeJyGsiskpEVojIDb3dBkVRFCWBpw+uGQG+Y4xZIiKFwGIRedkYs7IP2qIoipL19LpFYIzZaYxZ4iw3AquA0b3dDkVRFMXSpzECERkPzALeS7PtehFZJCKLqqure7tpiqIoWUOfCYGIFADzgRuNMQ2p240xc40xs40xs8vLy3u/gYqiKFlCnwiBiHixIvCgMeaJvmiDoiiKYumLUUMC3AesMsbc2dvXVxRFUdrTFxbBCcCVwCdFZKnzc04ftENRFEWhD4aPGmPeBqS3r6soiqKkRzOLFUVRshwVAkVRlCxHhUBRFCXLUSFQFEXJclQIFEVRshwVAkVRlCxHhUBRFCXLUSFQFEXJclQIFEVRshwVAkVRlCxHhUBRFCXLUSFQFEXJclQIFEVRshwVAkVRlCxHhUBRFCXLUSFQFEXJclQIFEVRshwVAkVRlCxHhUBRFCXLUSFQFEXJclQIFEVRshwVAkVRlCxHhUBRFCXLUSFQFEXJclQIFEVRshwVAkVRlCxHhUBRFCXLUSFQFEXJclQIFEVRshwVAkVRlCxHhUBRFCXLUSFQFEXJclQIFEVRshwVAkVRlCxHhUBRFCXL6RMhEJGzRGSNiKwXkVv7og2KoiiKpdeFQETcwB+As4GpwOdFZGpvt0NRFEWx9IVFcAyw3hiz0RgTAv4JnN8H7VAURVEATx9cczSwLel7BXBs6k4icj1wvfO1SUTW7Of1hgK79/PY/obeS/9E76V/Mlju5UDuY1xXduoLIZA060yHFcbMBeYe8MVEFhljZh/oefoDei/9E72X/slguZfeuI++cA1VAAclfR8D7OiDdiiKoij0jRB8ABwsIhNExAdcBjzTB+1QFEVR6APXkDEmIiLfBF4E3MA8Y8yKHrzkAbuX+hF6L/0TvZf+yWC5lx6/DzGmg3teURRFySI0s1hRFCXLUSFQFEXJcga1EAzkUhYisllEPhaRpSKyyFlXKiIvi8g653NIX7czEyIyT0SqRGR50rq07RfLXc5zWiYiR/Zdy9uT4T5+IiLbnWezVETOSdp2m3Mfa0TkzL5pdXpE5CAReU1EVonIChG5wVk/EJ9LpnsZcM9GRHJE5H0R+ci5l5866yeIyHvOc3nEGVyDiPid7+ud7eMPuBHGmEH5gw1EbwAmAj7gI2BqX7drH9q/GRiasu5/gVud5VuB/+nrdnbS/pOBI4Hle2s/cA7wb2yOyRzgvb5u/17u4yfAzWn2ner8nfmBCc7fn7uv7yGpfSOBI53lQmCt0+aB+Fwy3cuAezbO77fAWfYC7zm/70eBy5z19wJfc5a/DtzrLF8GPHKgbRjMFsFgLGVxPnC/s3w/cEEftqVTjDFvAntSVmdq//nA341lIVAiIiN7p6Wdk+E+MnE+8E9jTNAYswlYj/077BcYY3YaY5Y4y43AKmym/0B8LpnuJRP99tk4v98m56vX+THAJ4HHnfWpzyX+vB4HThORdIm6XWYwC0G6Uhad/aH0NwzwkogsdsptAAw3xuwE+48ADOuz1u0fmdo/EJ/VN/+QhOUAAAPjSURBVB13ybwkF92AuQ/HnTAL+/Y5oJ9Lyr3AAHw2IuIWkaVAFfAy1mKpM8ZEnF2S29t2L872eqDsQK4/mIWgS6Us+jEnGGOOxFZp/YaInNzXDepBBtqzugeYBMwEdgK/dtYPiPsQkQJgPnCjMaahs13TrOtX95PmXgbkszHGRI0xM7GVFo4BDku3m/PZ7fcymIVgQJeyMMbscD6rgCexfxyVcdPc+azquxbuF5naP6CelTGm0vnHjQF/JuFi6Pf3ISJebMf5oDHmCWf1gHwu6e5lID8bAGNMHfA6NkZQIiLxpN/k9rbdi7O9mK67L9MymIVgwJayEJF8ESmMLwNnAMux7b/K2e0q4Om+aeF+k6n9zwBfdEapzAHq466K/kiKn/xC7LMBex+XOaM6JgAHA+/3dvsy4fiR7wNWGWPuTNo04J5LpnsZiM9GRMpFpMRZzgU+hY15vAZ81tkt9bnEn9dngf8YJ3K83/R1xLwnf7CjHtZi/W3f7+v27EO7J2JHOHwErIi3HesHfBVY53yW9nVbO7mHh7GmeRj7BnNtpvZjTd0/OM/pY2B2X7d/L/fxD6edy5x/ypFJ+3/fuY81wNl93f6UezkR60JYBix1fs4ZoM8l070MuGcDzAA+dNq8HPiRs34iVqzWA48Bfmd9jvN9vbN94oG2QUtMKIqiZDmD2TWkKIqidAEVAkVRlCxHhUBRFCXLUSFQFEXJclQIFEVRshwVAkXpYUTkFBF5tq/boSiZUCFQFEXJclQIFMVBRK5w6sIvFZE/OYXAmkTk1yKyREReFZFyZ9+ZIrLQKW72ZFIN/8ki8opTW36JiExyTl8gIo+LyGoRefBAq0UqSneiQqAogIgcBlyKLfY3E4gCXwDygSXGFgB8A/ixc8jfge8ZY2ZgM1nj6x8E/mCMOQI4HpuVDLY65o3YuvgTgRN6/KYUpYt49r6LomQFpwFHAR84L+u52OJrMeARZ58HgCdEpBgoMca84ay/H3jMqQ812hjzJIAxJgDgnO99Y0yF830pMB54u+dvS1H2jgqBolgEuN8Yc1u7lSI/TNmvs5osnbl7gknLUfR/T+lHqGtIUSyvAp8VkWHQNo/vOOz/SLwC5OXA28aYeqBWRE5y1l8JvGFsPfwKEbnAOYdfRPJ69S4UZT/QtxJFAYwxK0XkB9hZ4VzYaqPfAJqBaSKyGDsT1KXOIVcB9zod/UbgGmf9lcCfRORnzjk+14u3oSj7hVYfVZROEJEmY0xBX7dDUXoSdQ0piqJkOWoRKIqiZDlqESiKomQ5KgSKoihZjgqBoihKlqNCoCiKkuWoECiKomQ5/x9NDIukrIP9CAAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "experimento_ssd7_fault_1.h5\n" + ] + } + ], "source": [ "#Graficar aprendizaje\n", "\n", @@ -746,9 +2556,30 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Processing image set 'train.txt': 100%|██████████| 33/33 [00:00<00:00, 114.45it/s]\n", + "Processing image set 'test.txt': 100%|██████████| 2/2 [00:00<00:00, 70.49it/s]\n", + "Number of images in the evaluation dataset: 2\n", + "\n", + "Producing predictions batch-wise: 100%|██████████| 1/1 [00:04<00:00, 4.89s/it]\n", + "Matching predictions to ground truth, class 1/1.: 100%|██████████| 400/400 [00:00<00:00, 10261.36it/s]\n", + "Computing precisions and recalls, class 1/1\n", + "Computing average precision, class 1/1\n", + "400 instances of class 1 with average precision: 0.4970\n", + "mAP using the weighted average of precisions among classes: 0.4970\n", + "mAP: 0.4970\n", + "1 AP 0.497\n", + "\n", + " mAP 0.497\n" + ] + } + ], "source": [ "\n", "config_path = 'config_7_fault_1.json'\n", @@ -865,9 +2696,20 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "1" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "ceil(val_dataset_size/batch_size)" ] @@ -882,9 +2724,19 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Training on: \t{'1': 1}\n", + "\n" + ] + } + ], "source": [ "from imageio import imread\n", "from keras.preprocessing import image\n", @@ -945,9 +2797,19 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Tiempo Total: 0.466\n", + "Tiempo promedio por imagen: 0.019\n", + "OK\n" + ] + } + ], "source": [ "image_paths = []\n", "for inp in input_path:\n", @@ -1028,9 +2890,17 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 9, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1 : 99\n" + ] + } + ], "source": [ "\n", "# Summary instance training\n", @@ -1044,9 +2914,17 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1 : 99\n" + ] + } + ], "source": [ "for i in summary_category_training.keys():\n", " print(i, ': {:.0f}'.format(summary_category_training[i]))" diff --git a/Primer_reslutado_panel/.ipynb_checkpoints/Panel_Detector-checkpoint.ipynb b/Primer_reslutado_panel/.ipynb_checkpoints/Panel_Detector-checkpoint.ipynb new file mode 100644 index 0000000..5a6e474 --- /dev/null +++ b/Primer_reslutado_panel/.ipynb_checkpoints/Panel_Detector-checkpoint.ipynb @@ -0,0 +1,1896 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Detector de Paneles" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Cargar el modelo ssd7 \n", + "(https://github.com/pierluigiferrari/ssd_keras#how-to-fine-tune-one-of-the-trained-models-on-your-own-dataset)\n", + "\n", + "Training del SSD7 (modelo reducido de SSD). Parámetros en config_7.json y descargar VGG_ILSVRC_16_layers_fc_reduced.h5\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Using TensorFlow backend.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Training on: \t{'panel': 1}\n", + "\n", + "\n", + "Loading pretrained weights.\n", + "\n", + "WARNING:tensorflow:From /home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Colocations handled automatically by placer.\n", + "WARNING:tensorflow:From /home/dl-desktop/Desktop/Rentadrone/ssd_keras-master/keras_loss_function/keras_ssd_loss.py:133: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use tf.cast instead.\n", + "WARNING:tensorflow:From /home/dl-desktop/Desktop/Rentadrone/ssd_keras-master/keras_loss_function/keras_ssd_loss.py:166: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use tf.cast instead.\n", + "WARNING:tensorflow:From /home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/math_grad.py:102: div (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Deprecated in favor of operator or tf.math.divide.\n" + ] + }, + { + "ename": "ResourceExhaustedError", + "evalue": "OOM when allocating tensor with shape[48] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc\n\t [[node training/Adam/Variable_6/Assign (defined at /home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:402) ]]\nHint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.\n\n\nCaused by op 'training/Adam/Variable_6/Assign', defined at:\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel_launcher.py\", line 16, in \n app.launch_new_instance()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/traitlets/config/application.py\", line 658, in launch_instance\n app.start()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelapp.py\", line 505, in start\n self.io_loop.start()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 148, in start\n self.asyncio_loop.run_forever()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/asyncio/base_events.py\", line 438, in run_forever\n self._run_once()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/asyncio/base_events.py\", line 1451, in _run_once\n handle._run()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/asyncio/events.py\", line 145, in _run\n self._callback(*self._args)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/ioloop.py\", line 690, in \n lambda f: self._run_callback(functools.partial(callback, future))\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n ret = callback()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 781, in inner\n self.run()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 742, in run\n yielded = self.gen.send(value)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 357, in process_one\n yield gen.maybe_future(dispatch(*args))\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 267, in dispatch_shell\n yield gen.maybe_future(handler(stream, idents, msg))\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 534, in execute_request\n user_expressions, allow_stdin,\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/ipkernel.py\", line 294, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/zmqshell.py\", line 536, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2848, in run_cell\n raw_cell, store_history, silent, shell_futures)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2874, in _run_cell\n return runner(coro)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/async_helpers.py\", line 67, in _pseudo_sync_runner\n coro.send(None)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 3049, in run_cell_async\n interactivity=interactivity, compiler=compiler, result=result)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 3214, in run_ast_nodes\n if (yield from self.run_code(code, result)):\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 3296, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)\n File \"\", line 124, in \n 'compute_loss': ssd_loss.compute_loss})\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/engine/saving.py\", line 419, in load_model\n model = _deserialize_model(f, custom_objects, compile)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/engine/saving.py\", line 317, in _deserialize_model\n model._make_train_function()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/engine/training.py\", line 509, in _make_train_function\n loss=self.total_loss)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/legacy/interfaces.py\", line 91, in wrapper\n return func(*args, **kwargs)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/optimizers.py\", line 487, in get_updates\n ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/optimizers.py\", line 487, in \n ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py\", line 704, in zeros\n return variable(v, dtype=dtype, name=name)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py\", line 402, in variable\n v = tf.Variable(value, dtype=tf.as_dtype(dtype), name=name)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variables.py\", line 213, in __call__\n return cls._variable_v1_call(*args, **kwargs)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variables.py\", line 176, in _variable_v1_call\n aggregation=aggregation)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variables.py\", line 155, in \n previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variable_scope.py\", line 2495, in default_variable_creator\n expected_shape=expected_shape, import_scope=import_scope)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variables.py\", line 217, in __call__\n return super(VariableMetaclass, cls).__call__(*args, **kwargs)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variables.py\", line 1395, in __init__\n constraint=constraint)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variables.py\", line 1547, in _init_from_args\n validate_shape=validate_shape).op\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/state_ops.py\", line 223, in assign\n validate_shape=validate_shape)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/gen_state_ops.py\", line 64, in assign\n use_locking=use_locking, name=name)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py\", line 788, in _apply_op_helper\n op_def=op_def)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py\", line 507, in new_func\n return func(*args, **kwargs)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3300, in create_op\n op_def=op_def)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 1801, in __init__\n self._traceback = tf_stack.extract_stack()\n\nResourceExhaustedError (see above for traceback): OOM when allocating tensor with shape[48] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc\n\t [[node training/Adam/Variable_6/Assign (defined at /home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:402) ]]\nHint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.\n\n", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mResourceExhaustedError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1333\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1334\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1335\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run_fn\u001b[0;34m(feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[1;32m 1318\u001b[0m return self._call_tf_sessionrun(\n\u001b[0;32m-> 1319\u001b[0;31m options, feed_dict, fetch_list, target_list, run_metadata)\n\u001b[0m\u001b[1;32m 1320\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_call_tf_sessionrun\u001b[0;34m(self, options, feed_dict, fetch_list, target_list, run_metadata)\u001b[0m\n\u001b[1;32m 1406\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1407\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1408\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mResourceExhaustedError\u001b[0m: OOM when allocating tensor with shape[48] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc\n\t [[{{node training/Adam/Variable_6/Assign}}]]\nHint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.\n", + "\nDuring handling of the above exception, another exception occurred:\n", + "\u001b[0;31mResourceExhaustedError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 122\u001b[0m model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n\u001b[1;32m 123\u001b[0m \u001b[0;34m'L2Normalization'\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mL2Normalization\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 124\u001b[0;31m 'compute_loss': ssd_loss.compute_loss})\n\u001b[0m\u001b[1;32m 125\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 126\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/engine/saving.py\u001b[0m in \u001b[0;36mload_model\u001b[0;34m(filepath, custom_objects, compile)\u001b[0m\n\u001b[1;32m 417\u001b[0m \u001b[0mf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mh5dict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilepath\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'r'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 418\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 419\u001b[0;31m \u001b[0mmodel\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_deserialize_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcustom_objects\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcompile\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 420\u001b[0m \u001b[0;32mfinally\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 421\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mopened_new_file\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/engine/saving.py\u001b[0m in \u001b[0;36m_deserialize_model\u001b[0;34m(f, custom_objects, compile)\u001b[0m\n\u001b[1;32m 323\u001b[0m optimizer_weight_names]\n\u001b[1;32m 324\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 325\u001b[0;31m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moptimizer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_weights\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moptimizer_weight_values\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 326\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 327\u001b[0m warnings.warn('Error in loading the saved optimizer '\n", + "\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/optimizers.py\u001b[0m in \u001b[0;36mset_weights\u001b[0;34m(self, weights)\u001b[0m\n\u001b[1;32m 124\u001b[0m 'of the optimizer (' + str(len(params)) + ')')\n\u001b[1;32m 125\u001b[0m \u001b[0mweight_value_tuples\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 126\u001b[0;31m \u001b[0mparam_values\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mK\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbatch_get_value\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 127\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mpv\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mp\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mw\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mzip\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mparam_values\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mweights\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 128\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mpv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0mw\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py\u001b[0m in \u001b[0;36mbatch_get_value\u001b[0;34m(ops)\u001b[0m\n\u001b[1;32m 2418\u001b[0m \"\"\"\n\u001b[1;32m 2419\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2420\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mget_session\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mops\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2421\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2422\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py\u001b[0m in \u001b[0;36mget_session\u001b[0;34m()\u001b[0m\n\u001b[1;32m 204\u001b[0m \u001b[0mv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_keras_initialized\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 205\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0muninitialized_vars\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 206\u001b[0;31m \u001b[0msession\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvariables_initializer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0muninitialized_vars\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 207\u001b[0m \u001b[0;31m# hack for list_devices() function.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 208\u001b[0m \u001b[0;31m# list_devices() function is not available under tensorflow r1.3.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 927\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 928\u001b[0m result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[0;32m--> 929\u001b[0;31m run_metadata_ptr)\n\u001b[0m\u001b[1;32m 930\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 931\u001b[0m \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run\u001b[0;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1150\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mhandle\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mfeed_dict_tensor\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1151\u001b[0m results = self._do_run(handle, final_targets, final_fetches,\n\u001b[0;32m-> 1152\u001b[0;31m feed_dict_tensor, options, run_metadata)\n\u001b[0m\u001b[1;32m 1153\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1154\u001b[0m \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_run\u001b[0;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1326\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1327\u001b[0m return self._do_call(_run_fn, feeds, fetches, targets, options,\n\u001b[0;32m-> 1328\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1329\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1330\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_prun_fn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeeds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetches\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1346\u001b[0m \u001b[0;32mpass\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1347\u001b[0m \u001b[0mmessage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0merror_interpolation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minterpolate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_graph\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1348\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnode_def\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmessage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1349\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1350\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_extend_graph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mResourceExhaustedError\u001b[0m: OOM when allocating tensor with shape[48] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc\n\t [[node training/Adam/Variable_6/Assign (defined at /home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:402) ]]\nHint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.\n\n\nCaused by op 'training/Adam/Variable_6/Assign', defined at:\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel_launcher.py\", line 16, in \n app.launch_new_instance()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/traitlets/config/application.py\", line 658, in launch_instance\n app.start()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelapp.py\", line 505, in start\n self.io_loop.start()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 148, in start\n self.asyncio_loop.run_forever()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/asyncio/base_events.py\", line 438, in run_forever\n self._run_once()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/asyncio/base_events.py\", line 1451, in _run_once\n handle._run()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/asyncio/events.py\", line 145, in _run\n self._callback(*self._args)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/ioloop.py\", line 690, in \n lambda f: self._run_callback(functools.partial(callback, future))\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n ret = callback()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 781, in inner\n self.run()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 742, in run\n yielded = self.gen.send(value)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 357, in process_one\n yield gen.maybe_future(dispatch(*args))\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 267, in dispatch_shell\n yield gen.maybe_future(handler(stream, idents, msg))\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 534, in execute_request\n user_expressions, allow_stdin,\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/ipkernel.py\", line 294, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/zmqshell.py\", line 536, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2848, in run_cell\n raw_cell, store_history, silent, shell_futures)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2874, in _run_cell\n return runner(coro)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/async_helpers.py\", line 67, in _pseudo_sync_runner\n coro.send(None)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 3049, in run_cell_async\n interactivity=interactivity, compiler=compiler, result=result)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 3214, in run_ast_nodes\n if (yield from self.run_code(code, result)):\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 3296, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)\n File \"\", line 124, in \n 'compute_loss': ssd_loss.compute_loss})\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/engine/saving.py\", line 419, in load_model\n model = _deserialize_model(f, custom_objects, compile)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/engine/saving.py\", line 317, in _deserialize_model\n model._make_train_function()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/engine/training.py\", line 509, in _make_train_function\n loss=self.total_loss)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/legacy/interfaces.py\", line 91, in wrapper\n return func(*args, **kwargs)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/optimizers.py\", line 487, in get_updates\n ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/optimizers.py\", line 487, in \n ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py\", line 704, in zeros\n return variable(v, dtype=dtype, name=name)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py\", line 402, in variable\n v = tf.Variable(value, dtype=tf.as_dtype(dtype), name=name)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variables.py\", line 213, in __call__\n return cls._variable_v1_call(*args, **kwargs)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variables.py\", line 176, in _variable_v1_call\n aggregation=aggregation)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variables.py\", line 155, in \n previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variable_scope.py\", line 2495, in default_variable_creator\n expected_shape=expected_shape, import_scope=import_scope)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variables.py\", line 217, in __call__\n return super(VariableMetaclass, cls).__call__(*args, **kwargs)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variables.py\", line 1395, in __init__\n constraint=constraint)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variables.py\", line 1547, in _init_from_args\n validate_shape=validate_shape).op\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/state_ops.py\", line 223, in assign\n validate_shape=validate_shape)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/gen_state_ops.py\", line 64, in assign\n use_locking=use_locking, name=name)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py\", line 788, in _apply_op_helper\n op_def=op_def)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py\", line 507, in new_func\n return func(*args, **kwargs)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3300, in create_op\n op_def=op_def)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 1801, in __init__\n self._traceback = tf_stack.extract_stack()\n\nResourceExhaustedError (see above for traceback): OOM when allocating tensor with shape[48] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc\n\t [[node training/Adam/Variable_6/Assign (defined at /home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:402) ]]\nHint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.\n\n" + ] + } + ], + "source": [ + "from keras.optimizers import Adam, SGD\n", + "from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TerminateOnNaN, CSVLogger\n", + "from keras import backend as K\n", + "from keras.models import load_model\n", + "from math import ceil\n", + "import numpy as np\n", + "from matplotlib import pyplot as plt\n", + "import os\n", + "import json\n", + "import xml.etree.cElementTree as ET\n", + "\n", + "import sys\n", + "sys.path += [os.path.abspath('../../ssd_keras-master')]\n", + "\n", + "from keras_loss_function.keras_ssd_loss import SSDLoss\n", + "from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes\n", + "from keras_layers.keras_layer_DecodeDetections import DecodeDetections\n", + "from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast\n", + "from keras_layers.keras_layer_L2Normalization import L2Normalization\n", + "from ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder\n", + "from ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast\n", + "from data_generator.object_detection_2d_data_generator import DataGenerator\n", + "from data_generator.object_detection_2d_geometric_ops import Resize\n", + "from data_generator.object_detection_2d_photometric_ops import ConvertTo3Channels\n", + "from data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation\n", + "from data_generator.object_detection_2d_misc_utils import apply_inverse_transforms\n", + "from eval_utils.average_precision_evaluator import Evaluator\n", + "from data_generator.data_augmentation_chain_variable_input_size import DataAugmentationVariableInputSize\n", + "from data_generator.data_augmentation_chain_constant_input_size import DataAugmentationConstantInputSize\n", + "\n", + "\n", + "def makedirs(path):\n", + " try:\n", + " os.makedirs(path)\n", + " except OSError:\n", + " if not os.path.isdir(path):\n", + " raise\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "K.tensorflow_backend._get_available_gpus()\n", + "\n", + "\n", + "def lr_schedule(epoch):\n", + " if epoch < 80:\n", + " return 0.001\n", + " elif epoch < 100:\n", + " return 0.0001\n", + " else:\n", + " return 0.00001\n", + "\n", + "config_path = 'config_7_panel.json'\n", + "\n", + "\n", + "with open(config_path) as config_buffer:\n", + " config = json.loads(config_buffer.read())\n", + "\n", + "###############################\n", + "# Parse the annotations\n", + "###############################\n", + "path_imgs_training = config['train']['train_image_folder']\n", + "path_anns_training = config['train']['train_annot_folder']\n", + "path_imgs_val = config['test']['test_image_folder']\n", + "path_anns_val = config['test']['test_annot_folder']\n", + "labels = config['model']['labels']\n", + "categories = {}\n", + "#categories = {\"Razor\": 1, \"Gun\": 2, \"Knife\": 3, \"Shuriken\": 4} #la categoría 0 es la background\n", + "for i in range(len(labels)): categories[labels[i]] = i+1\n", + "print('\\nTraining on: \\t' + str(categories) + '\\n')\n", + "\n", + "####################################\n", + "# Parameters\n", + "###################################\n", + " #%%\n", + "img_height = config['model']['input'] # Height of the model input images\n", + "img_width = config['model']['input'] # Width of the model input images\n", + "img_channels = 3 # Number of color channels of the model input images\n", + "mean_color = [123, 117, 104] # The per-channel mean of the images in the dataset. Do not change this value if you're using any of the pre-trained weights.\n", + "swap_channels = [2, 1, 0] # The color channel order in the original SSD is BGR, so we'll have the model reverse the color channel order of the input images.\n", + "n_classes = len(labels) # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO\n", + "scales_pascal = [0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05] # The anchor box scaling factors used in the original SSD300 for the Pascal VOC datasets\n", + "#scales_coco = [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05] # The anchor box scaling factors used in the original SSD300 for the MS COCO datasets\n", + "scales = scales_pascal\n", + "aspect_ratios = [[1.0, 2.0, 0.5],\n", + " [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n", + " [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n", + " [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n", + " [1.0, 2.0, 0.5],\n", + " [1.0, 2.0, 0.5]] # The anchor box aspect ratios used in the original SSD300; the order matters\n", + "two_boxes_for_ar1 = True\n", + "steps = [8, 16, 32, 64, 100, 300] # The space between two adjacent anchor box center points for each predictor layer.\n", + "offsets = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5] # The offsets of the first anchor box center points from the top and left borders of the image as a fraction of the step size for each predictor layer.\n", + "clip_boxes = False # Whether or not to clip the anchor boxes to lie entirely within the image boundaries\n", + "variances = [0.1, 0.1, 0.2, 0.2] # The variances by which the encoded target coordinates are divided as in the original implementation\n", + "normalize_coords = True\n", + "\n", + "K.clear_session() # Clear previous models from memory.\n", + "\n", + "\n", + "model_path = config['train']['saved_weights_name']\n", + "# 3: Instantiate an optimizer and the SSD loss function and compile the model.\n", + "# If you want to follow the original Caffe implementation, use the preset SGD\n", + "# optimizer, otherwise I'd recommend the commented-out Adam optimizer.\n", + "\n", + "\n", + "if config['model']['backend'] == 'ssd7':\n", + " #weights_path = 'VGG_ILSVRC_16_layers_fc_reduced.h5'\n", + " scales = [0.08, 0.16, 0.32, 0.64, 0.96] # An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`.\n", + " aspect_ratios = [0.5 ,1.0, 2.0] # The list of aspect ratios for the anchor boxes\n", + " two_boxes_for_ar1 = True # Whether or not you want to generate two anchor boxes for aspect ratio 1\n", + " steps = None # In case you'd like to set the step sizes for the anchor box grids manually; not recommended\n", + " offsets = None\n", + "\n", + "if os.path.exists(model_path):\n", + " print(\"\\nLoading pretrained weights.\\n\")\n", + " # We need to create an SSDLoss object in order to pass that to the model loader.\n", + " ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n", + "\n", + " K.clear_session() # Clear previous models from memory.\n", + " model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n", + " 'L2Normalization': L2Normalization,\n", + " 'compute_loss': ssd_loss.compute_loss})\n", + "\n", + "\n", + "else:\n", + " ####################################\n", + " # Build the Keras model.\n", + " ###################################\n", + "\n", + " if config['model']['backend'] == 'ssd300':\n", + " #weights_path = 'VGG_VOC0712Plus_SSD_300x300_ft_iter_160000.h5'\n", + " from models.keras_ssd300 import ssd_300 as ssd\n", + "\n", + " model = ssd_300(image_size=(img_height, img_width, img_channels),\n", + " n_classes=n_classes,\n", + " mode='training',\n", + " l2_regularization=0.0005,\n", + " scales=scales,\n", + " aspect_ratios_per_layer=aspect_ratios,\n", + " two_boxes_for_ar1=two_boxes_for_ar1,\n", + " steps=steps,\n", + " offsets=offsets,\n", + " clip_boxes=clip_boxes,\n", + " variances=variances,\n", + " normalize_coords=normalize_coords,\n", + " subtract_mean=mean_color,\n", + " swap_channels=swap_channels)\n", + "\n", + "\n", + " elif config['model']['backend'] == 'ssd7':\n", + " #weights_path = 'VGG_ILSVRC_16_layers_fc_reduced.h5'\n", + " from models.keras_ssd7 import build_model as ssd\n", + " scales = [0.08, 0.16, 0.32, 0.64, 0.96] # An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`.\n", + " aspect_ratios = [0.5 ,1.0, 2.0] # The list of aspect ratios for the anchor boxes\n", + " two_boxes_for_ar1 = True # Whether or not you want to generate two anchor boxes for aspect ratio 1\n", + " steps = None # In case you'd like to set the step sizes for the anchor box grids manually; not recommended\n", + " offsets = None\n", + " model = ssd(image_size=(img_height, img_width, img_channels),\n", + " n_classes=n_classes,\n", + " mode='training',\n", + " l2_regularization=0.0005,\n", + " scales=scales,\n", + " aspect_ratios_global=aspect_ratios,\n", + " aspect_ratios_per_layer=None,\n", + " two_boxes_for_ar1=two_boxes_for_ar1,\n", + " steps=steps,\n", + " offsets=offsets,\n", + " clip_boxes=clip_boxes,\n", + " variances=variances,\n", + " normalize_coords=normalize_coords,\n", + " subtract_mean=None,\n", + " divide_by_stddev=None)\n", + "\n", + " else :\n", + " print('Wrong Backend')\n", + "\n", + "\n", + "\n", + " print('OK create model')\n", + " #sgd = SGD(lr=config['train']['learning_rate'], momentum=0.9, decay=0.0, nesterov=False)\n", + "\n", + " # TODO: Set the path to the weights you want to load. only for ssd300 or ssd512\n", + "\n", + " weights_path = '../ssd_keras-master/VGG_ILSVRC_16_layers_fc_reduced.h5'\n", + " print(\"\\nLoading pretrained weights VGG.\\n\")\n", + " model.load_weights(weights_path, by_name=True)\n", + "\n", + " # 3: Instantiate an optimizer and the SSD loss function and compile the model.\n", + " # If you want to follow the original Caffe implementation, use the preset SGD\n", + " # optimizer, otherwise I'd recommend the commented-out Adam optimizer.\n", + "\n", + "\n", + " #adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n", + " #sgd = SGD(lr=0.001, momentum=0.9, decay=0.0, nesterov=False)\n", + " optimizer = Adam(lr=config['train']['learning_rate'], beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n", + " ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n", + " model.compile(optimizer=optimizer, loss=ssd_loss.compute_loss)\n", + "\n", + " model.summary()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Instanciar los generadores de datos y entrenamiento del modelo.\n", + "\n", + "*Cambio realizado para leer png y jpg. keras-ssd-master/data_generator/object_detection_2d_data_generator.py función parse_xml\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Processing image set 'train.txt': 100%|██████████| 1/1 [00:00<00:00, 3.02it/s]\n", + "Processing image set 'test.txt': 100%|██████████| 1/1 [00:00<00:00, 2.48it/s]\n", + "panel : 69\n", + "cell : 423\n", + "Number of images in the training dataset:\t 1\n", + "Number of images in the validation dataset:\t 1\n", + "Epoch 1/100\n", + "\n", + "Epoch 00001: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 200s 4s/step - loss: 13.2409 - val_loss: 9.9807\n", + "\n", + "Epoch 00001: val_loss improved from inf to 9.98075, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 2/100\n", + "\n", + "Epoch 00002: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 238s 5s/step - loss: 9.8864 - val_loss: 11.1452\n", + "\n", + "Epoch 00002: val_loss did not improve from 9.98075\n", + "Epoch 3/100\n", + "\n", + "Epoch 00003: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 226s 5s/step - loss: 8.8060 - val_loss: 8.3006\n", + "\n", + "Epoch 00003: val_loss improved from 9.98075 to 8.30060, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 4/100\n", + "\n", + "Epoch 00004: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 199s 4s/step - loss: 7.4999 - val_loss: 8.9384\n", + "\n", + "Epoch 00004: val_loss did not improve from 8.30060\n", + "Epoch 5/100\n", + "\n", + "Epoch 00005: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 187s 4s/step - loss: 7.4727 - val_loss: 7.9512\n", + "\n", + "Epoch 00005: val_loss improved from 8.30060 to 7.95121, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 6/100\n", + "\n", + "Epoch 00006: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 213s 4s/step - loss: 6.8813 - val_loss: 11.2544\n", + "\n", + "Epoch 00006: val_loss did not improve from 7.95121\n", + "Epoch 7/100\n", + "\n", + "Epoch 00007: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 195s 4s/step - loss: 6.4775 - val_loss: 6.9093\n", + "\n", + "Epoch 00007: val_loss improved from 7.95121 to 6.90929, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 8/100\n", + "\n", + "Epoch 00008: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 212s 4s/step - loss: 6.9758 - val_loss: 8.6997\n", + "\n", + "Epoch 00008: val_loss did not improve from 6.90929\n", + "Epoch 9/100\n", + "\n", + "Epoch 00009: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 199s 4s/step - loss: 6.1539 - val_loss: 10.9586\n", + "\n", + "Epoch 00009: val_loss did not improve from 6.90929\n", + "Epoch 10/100\n", + "\n", + "Epoch 00010: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 206s 4s/step - loss: 5.9307 - val_loss: 8.4361\n", + "\n", + "Epoch 00010: val_loss did not improve from 6.90929\n", + "Epoch 11/100\n", + "\n", + "Epoch 00011: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 197s 4s/step - loss: 5.3895 - val_loss: 5.9796\n", + "\n", + "Epoch 00011: val_loss improved from 6.90929 to 5.97960, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 12/100\n", + "\n", + "Epoch 00012: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 184s 4s/step - loss: 5.0889 - val_loss: 5.9283\n", + "\n", + "Epoch 00012: val_loss improved from 5.97960 to 5.92832, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 13/100\n", + "\n", + "Epoch 00013: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 193s 4s/step - loss: 5.7916 - val_loss: 6.7706\n", + "\n", + "Epoch 00013: val_loss did not improve from 5.92832\n", + "Epoch 14/100\n", + "\n", + "Epoch 00014: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 222s 4s/step - loss: 5.3010 - val_loss: 7.8910\n", + "\n", + "Epoch 00014: val_loss did not improve from 5.92832\n", + "Epoch 15/100\n", + "\n", + "Epoch 00015: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 179s 4s/step - loss: 4.9873 - val_loss: 6.0389\n", + "\n", + "Epoch 00015: val_loss did not improve from 5.92832\n", + "Epoch 16/100\n", + "\n", + "Epoch 00016: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 182s 4s/step - loss: 5.4664 - val_loss: 6.4125\n", + "\n", + "Epoch 00016: val_loss did not improve from 5.92832\n", + "Epoch 17/100\n", + "\n", + "Epoch 00017: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 166s 3s/step - loss: 6.0094 - val_loss: 9.2918\n", + "\n", + "Epoch 00017: val_loss did not improve from 5.92832\n", + "Epoch 18/100\n", + "\n", + "Epoch 00018: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 181s 4s/step - loss: 5.1737 - val_loss: 7.6806\n", + "\n", + "Epoch 00018: val_loss did not improve from 5.92832\n", + "Epoch 19/100\n", + "\n", + "Epoch 00019: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 159s 3s/step - loss: 5.2708 - val_loss: 7.1096\n", + "\n", + "Epoch 00019: val_loss did not improve from 5.92832\n", + "Epoch 20/100\n", + "\n", + "Epoch 00020: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 173s 3s/step - loss: 5.4765 - val_loss: 5.4921\n", + "\n", + "Epoch 00020: val_loss improved from 5.92832 to 5.49211, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 21/100\n", + "\n", + "Epoch 00021: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 170s 3s/step - loss: 4.6517 - val_loss: 6.6033\n", + "\n", + "Epoch 00021: val_loss did not improve from 5.49211\n", + "Epoch 22/100\n", + "\n", + "Epoch 00022: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 191s 4s/step - loss: 5.1432 - val_loss: 5.6549\n", + "\n", + "Epoch 00022: val_loss did not improve from 5.49211\n", + "Epoch 23/100\n", + "\n", + "Epoch 00023: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 159s 3s/step - loss: 5.4830 - val_loss: 5.8758\n", + "\n", + "Epoch 00023: val_loss did not improve from 5.49211\n", + "Epoch 24/100\n", + "\n", + "Epoch 00024: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 150s 3s/step - loss: 5.3366 - val_loss: 5.3871\n", + "\n", + "Epoch 00024: val_loss improved from 5.49211 to 5.38706, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 25/100\n", + "\n", + "Epoch 00025: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 138s 3s/step - loss: 5.7189 - val_loss: 8.0760\n", + "\n", + "Epoch 00025: val_loss did not improve from 5.38706\n", + "Epoch 26/100\n", + "\n", + "Epoch 00026: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 144s 3s/step - loss: 6.0929 - val_loss: 12.6163\n", + "\n", + "Epoch 00026: val_loss did not improve from 5.38706\n", + "Epoch 27/100\n", + "\n", + "Epoch 00027: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 147s 3s/step - loss: 5.2239 - val_loss: 9.8536\n", + "\n", + "Epoch 00027: val_loss did not improve from 5.38706\n", + "Epoch 28/100\n", + "\n", + "Epoch 00028: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 158s 3s/step - loss: 5.4414 - val_loss: 6.4950\n", + "\n", + "Epoch 00028: val_loss did not improve from 5.38706\n", + "Epoch 29/100\n", + "\n", + "Epoch 00029: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 157s 3s/step - loss: 5.4436 - val_loss: 9.0002\n", + "\n", + "Epoch 00029: val_loss did not improve from 5.38706\n", + "Epoch 30/100\n", + "\n", + "Epoch 00030: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 162s 3s/step - loss: 4.9780 - val_loss: 4.9993\n", + "\n", + "Epoch 00030: val_loss improved from 5.38706 to 4.99925, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 31/100\n", + "\n", + "Epoch 00031: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 140s 3s/step - loss: 4.9645 - val_loss: 5.6612\n", + "\n", + "Epoch 00031: val_loss did not improve from 4.99925\n", + "Epoch 32/100\n", + "\n", + "Epoch 00032: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 141s 3s/step - loss: 4.5982 - val_loss: 5.2083\n", + "\n", + "Epoch 00032: val_loss did not improve from 4.99925\n", + "Epoch 33/100\n", + "\n", + "Epoch 00033: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 143s 3s/step - loss: 4.3101 - val_loss: 6.4808\n", + "\n", + "Epoch 00033: val_loss did not improve from 4.99925\n", + "Epoch 34/100\n", + "\n", + "Epoch 00034: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 145s 3s/step - loss: 4.4252 - val_loss: 10.9472\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Epoch 00034: val_loss did not improve from 4.99925\n", + "Epoch 35/100\n", + "\n", + "Epoch 00035: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 153s 3s/step - loss: 4.4998 - val_loss: 7.1254\n", + "\n", + "Epoch 00035: val_loss did not improve from 4.99925\n", + "Epoch 36/100\n", + "\n", + "Epoch 00036: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 153s 3s/step - loss: 4.8952 - val_loss: 7.0446\n", + "\n", + "Epoch 00036: val_loss did not improve from 4.99925\n", + "Epoch 37/100\n", + "\n", + "Epoch 00037: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 154s 3s/step - loss: 4.9868 - val_loss: 9.3251\n", + "\n", + "Epoch 00037: val_loss did not improve from 4.99925\n", + "Epoch 38/100\n", + "\n", + "Epoch 00038: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 148s 3s/step - loss: 4.8918 - val_loss: 5.1689\n", + "\n", + "Epoch 00038: val_loss did not improve from 4.99925\n", + "Epoch 39/100\n", + "\n", + "Epoch 00039: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 143s 3s/step - loss: 4.5572 - val_loss: 4.9839\n", + "\n", + "Epoch 00039: val_loss improved from 4.99925 to 4.98394, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 40/100\n", + "\n", + "Epoch 00040: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 150s 3s/step - loss: 4.4722 - val_loss: 5.7133\n", + "\n", + "Epoch 00040: val_loss did not improve from 4.98394\n", + "Epoch 41/100\n", + "\n", + "Epoch 00041: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 152s 3s/step - loss: 4.9414 - val_loss: 5.5843\n", + "\n", + "Epoch 00041: val_loss did not improve from 4.98394\n", + "Epoch 42/100\n", + "\n", + "Epoch 00042: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 148s 3s/step - loss: 4.5857 - val_loss: 5.1884\n", + "\n", + "Epoch 00042: val_loss did not improve from 4.98394\n", + "Epoch 43/100\n", + "\n", + "Epoch 00043: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 149s 3s/step - loss: 4.7094 - val_loss: 6.7545\n", + "\n", + "Epoch 00043: val_loss did not improve from 4.98394\n", + "Epoch 44/100\n", + "\n", + "Epoch 00044: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 151s 3s/step - loss: 5.0428 - val_loss: 5.2691\n", + "\n", + "Epoch 00044: val_loss did not improve from 4.98394\n", + "Epoch 45/100\n", + "\n", + "Epoch 00045: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 146s 3s/step - loss: 4.9842 - val_loss: 6.5112\n", + "\n", + "Epoch 00045: val_loss did not improve from 4.98394\n", + "Epoch 46/100\n", + "\n", + "Epoch 00046: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 147s 3s/step - loss: 4.9108 - val_loss: 6.0670\n", + "\n", + "Epoch 00046: val_loss did not improve from 4.98394\n", + "Epoch 47/100\n", + "\n", + "Epoch 00047: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 155s 3s/step - loss: 4.6837 - val_loss: 5.8351\n", + "\n", + "Epoch 00047: val_loss did not improve from 4.98394\n", + "Epoch 48/100\n", + "\n", + "Epoch 00048: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 149s 3s/step - loss: 5.1042 - val_loss: 5.1778\n", + "\n", + "Epoch 00048: val_loss did not improve from 4.98394\n", + "Epoch 49/100\n", + "\n", + "Epoch 00049: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 144s 3s/step - loss: 4.1312 - val_loss: 5.9606\n", + "\n", + "Epoch 00049: val_loss did not improve from 4.98394\n", + "Epoch 50/100\n", + "\n", + "Epoch 00050: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 122s 2s/step - loss: 4.5373 - val_loss: 5.4351\n", + "\n", + "Epoch 00050: val_loss did not improve from 4.98394\n", + "Epoch 51/100\n", + "\n", + "Epoch 00051: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 135s 3s/step - loss: 4.8955 - val_loss: 6.0315\n", + "\n", + "Epoch 00051: val_loss did not improve from 4.98394\n", + "Epoch 52/100\n", + "\n", + "Epoch 00052: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 150s 3s/step - loss: 4.9445 - val_loss: 5.7199\n", + "\n", + "Epoch 00052: val_loss did not improve from 4.98394\n", + "Epoch 53/100\n", + "\n", + "Epoch 00053: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 139s 3s/step - loss: 3.9748 - val_loss: 5.5974\n", + "\n", + "Epoch 00053: val_loss did not improve from 4.98394\n", + "Epoch 54/100\n", + "\n", + "Epoch 00054: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 153s 3s/step - loss: 4.8783 - val_loss: 8.6056\n", + "\n", + "Epoch 00054: val_loss did not improve from 4.98394\n", + "Epoch 55/100\n", + "\n", + "Epoch 00055: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 141s 3s/step - loss: 4.1649 - val_loss: 6.0042\n", + "\n", + "Epoch 00055: val_loss did not improve from 4.98394\n", + "Epoch 56/100\n", + "\n", + "Epoch 00056: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 149s 3s/step - loss: 4.8997 - val_loss: 9.1298\n", + "\n", + "Epoch 00056: val_loss did not improve from 4.98394\n", + "Epoch 57/100\n", + "\n", + "Epoch 00057: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 151s 3s/step - loss: 4.4433 - val_loss: 7.1151\n", + "\n", + "Epoch 00057: val_loss did not improve from 4.98394\n", + "Epoch 58/100\n", + "\n", + "Epoch 00058: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 147s 3s/step - loss: 4.5827 - val_loss: 5.4356\n", + "\n", + "Epoch 00058: val_loss did not improve from 4.98394\n", + "Epoch 59/100\n", + "\n", + "Epoch 00059: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 137s 3s/step - loss: 3.9437 - val_loss: 4.7926\n", + "\n", + "Epoch 00059: val_loss improved from 4.98394 to 4.79262, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 60/100\n", + "\n", + "Epoch 00060: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 125s 3s/step - loss: 4.0939 - val_loss: 5.7098\n", + "\n", + "Epoch 00060: val_loss did not improve from 4.79262\n", + "Epoch 61/100\n", + "\n", + "Epoch 00061: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 161s 3s/step - loss: 5.1152 - val_loss: 5.2079\n", + "\n", + "Epoch 00061: val_loss did not improve from 4.79262\n", + "Epoch 62/100\n", + "\n", + "Epoch 00062: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 144s 3s/step - loss: 4.2958 - val_loss: 4.9239\n", + "\n", + "Epoch 00062: val_loss did not improve from 4.79262\n", + "Epoch 63/100\n", + "\n", + "Epoch 00063: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 141s 3s/step - loss: 3.8241 - val_loss: 4.5443\n", + "\n", + "Epoch 00063: val_loss improved from 4.79262 to 4.54430, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 64/100\n", + "\n", + "Epoch 00064: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 134s 3s/step - loss: 4.7252 - val_loss: 5.9445\n", + "\n", + "Epoch 00064: val_loss did not improve from 4.54430\n", + "Epoch 65/100\n", + "\n", + "Epoch 00065: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 154s 3s/step - loss: 4.4455 - val_loss: 4.8326\n", + "\n", + "Epoch 00065: val_loss did not improve from 4.54430\n", + "Epoch 66/100\n", + "\n", + "Epoch 00066: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 145s 3s/step - loss: 4.4054 - val_loss: 5.6441\n", + "\n", + "Epoch 00066: val_loss did not improve from 4.54430\n", + "Epoch 67/100\n", + "\n", + "Epoch 00067: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 124s 2s/step - loss: 4.4165 - val_loss: 6.8159\n", + "\n", + "Epoch 00067: val_loss did not improve from 4.54430\n", + "Epoch 68/100\n", + "\n", + "Epoch 00068: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 162s 3s/step - loss: 5.0418 - val_loss: 4.8508\n", + "\n", + "Epoch 00068: val_loss did not improve from 4.54430\n", + "Epoch 69/100\n", + "\n", + "Epoch 00069: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 140s 3s/step - loss: 4.1512 - val_loss: 5.4053\n", + "\n", + "Epoch 00069: val_loss did not improve from 4.54430\n", + "Epoch 70/100\n", + "\n", + "Epoch 00070: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 148s 3s/step - loss: 4.6197 - val_loss: 5.2824\n", + "\n", + "Epoch 00070: val_loss did not improve from 4.54430\n", + "Epoch 71/100\n", + "\n", + "Epoch 00071: LearningRateScheduler setting learning rate to 0.001.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "50/50 [==============================] - 152s 3s/step - loss: 4.2807 - val_loss: 5.5992\n", + "\n", + "Epoch 00071: val_loss did not improve from 4.54430\n", + "Epoch 72/100\n", + "\n", + "Epoch 00072: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 143s 3s/step - loss: 4.5368 - val_loss: 6.5207\n", + "\n", + "Epoch 00072: val_loss did not improve from 4.54430\n", + "Epoch 73/100\n", + "\n", + "Epoch 00073: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 141s 3s/step - loss: 4.0598 - val_loss: 5.2421\n", + "\n", + "Epoch 00073: val_loss did not improve from 4.54430\n", + "Epoch 74/100\n", + "\n", + "Epoch 00074: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 150s 3s/step - loss: 4.4861 - val_loss: 5.4182\n", + "\n", + "Epoch 00074: val_loss did not improve from 4.54430\n", + "Epoch 75/100\n", + "\n", + "Epoch 00075: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 144s 3s/step - loss: 4.5263 - val_loss: 4.3774\n", + "\n", + "Epoch 00075: val_loss improved from 4.54430 to 4.37742, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 76/100\n", + "\n", + "Epoch 00076: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 148s 3s/step - loss: 3.8465 - val_loss: 4.5809\n", + "\n", + "Epoch 00076: val_loss did not improve from 4.37742\n", + "Epoch 77/100\n", + "\n", + "Epoch 00077: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 152s 3s/step - loss: 4.0495 - val_loss: 4.9745\n", + "\n", + "Epoch 00077: val_loss did not improve from 4.37742\n", + "Epoch 78/100\n", + "\n", + "Epoch 00078: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 152s 3s/step - loss: 4.6009 - val_loss: 13.4989\n", + "\n", + "Epoch 00078: val_loss did not improve from 4.37742\n", + "Epoch 79/100\n", + "\n", + "Epoch 00079: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 142s 3s/step - loss: 4.6687 - val_loss: 6.4490\n", + "\n", + "Epoch 00079: val_loss did not improve from 4.37742\n", + "Epoch 80/100\n", + "\n", + "Epoch 00080: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 147s 3s/step - loss: 4.5297 - val_loss: 8.0478\n", + "\n", + "Epoch 00080: val_loss did not improve from 4.37742\n", + "Epoch 81/100\n", + "\n", + "Epoch 00081: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 141s 3s/step - loss: 4.2662 - val_loss: 5.7929\n", + "\n", + "Epoch 00081: val_loss did not improve from 4.37742\n", + "Epoch 82/100\n", + "\n", + "Epoch 00082: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 149s 3s/step - loss: 4.1048 - val_loss: 4.6117\n", + "\n", + "Epoch 00082: val_loss did not improve from 4.37742\n", + "Epoch 83/100\n", + "\n", + "Epoch 00083: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 156s 3s/step - loss: 3.9905 - val_loss: 4.5542\n", + "\n", + "Epoch 00083: val_loss did not improve from 4.37742\n", + "Epoch 84/100\n", + "\n", + "Epoch 00084: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 155s 3s/step - loss: 4.3129 - val_loss: 4.4676\n", + "\n", + "Epoch 00084: val_loss did not improve from 4.37742\n", + "Epoch 85/100\n", + "\n", + "Epoch 00085: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 156s 3s/step - loss: 3.7951 - val_loss: 4.4689\n", + "\n", + "Epoch 00085: val_loss did not improve from 4.37742\n", + "Epoch 86/100\n", + "\n", + "Epoch 00086: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 155s 3s/step - loss: 4.3618 - val_loss: 4.4048\n", + "\n", + "Epoch 00086: val_loss did not improve from 4.37742\n", + "Epoch 87/100\n", + "\n", + "Epoch 00087: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 156s 3s/step - loss: 4.3538 - val_loss: 4.6832\n", + "\n", + "Epoch 00087: val_loss did not improve from 4.37742\n", + "Epoch 88/100\n", + "\n", + "Epoch 00088: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 152s 3s/step - loss: 4.2076 - val_loss: 4.4796\n", + "\n", + "Epoch 00088: val_loss did not improve from 4.37742\n", + "Epoch 89/100\n", + "\n", + "Epoch 00089: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 146s 3s/step - loss: 4.1322 - val_loss: 4.5462\n", + "\n", + "Epoch 00089: val_loss did not improve from 4.37742\n", + "Epoch 90/100\n", + "\n", + "Epoch 00090: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 157s 3s/step - loss: 4.4995 - val_loss: 4.5660\n", + "\n", + "Epoch 00090: val_loss did not improve from 4.37742\n", + "Epoch 91/100\n", + "\n", + "Epoch 00091: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 158s 3s/step - loss: 4.2653 - val_loss: 4.5265\n", + "\n", + "Epoch 00091: val_loss did not improve from 4.37742\n", + "Epoch 92/100\n", + "\n", + "Epoch 00092: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 153s 3s/step - loss: 4.3702 - val_loss: 4.5276\n", + "\n", + "Epoch 00092: val_loss did not improve from 4.37742\n", + "Epoch 93/100\n", + "\n", + "Epoch 00093: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 153s 3s/step - loss: 3.7340 - val_loss: 4.5439\n", + "\n", + "Epoch 00093: val_loss did not improve from 4.37742\n", + "Epoch 94/100\n", + "\n", + "Epoch 00094: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 151s 3s/step - loss: 4.0253 - val_loss: 4.3250\n", + "\n", + "Epoch 00094: val_loss improved from 4.37742 to 4.32498, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 95/100\n", + "\n", + "Epoch 00095: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 143s 3s/step - loss: 4.0254 - val_loss: 4.6277\n", + "\n", + "Epoch 00095: val_loss did not improve from 4.32498\n", + "Epoch 96/100\n", + "\n", + "Epoch 00096: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 148s 3s/step - loss: 3.9857 - val_loss: 4.2953\n", + "\n", + "Epoch 00096: val_loss improved from 4.32498 to 4.29533, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 97/100\n", + "\n", + "Epoch 00097: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 157s 3s/step - loss: 3.6750 - val_loss: 4.5637\n", + "\n", + "Epoch 00097: val_loss did not improve from 4.29533\n", + "Epoch 98/100\n", + "\n", + "Epoch 00098: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 154s 3s/step - loss: 3.7435 - val_loss: 4.3923\n", + "\n", + "Epoch 00098: val_loss did not improve from 4.29533\n", + "Epoch 99/100\n", + "\n", + "Epoch 00099: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 162s 3s/step - loss: 4.0930 - val_loss: 4.4010\n", + "\n", + "Epoch 00099: val_loss did not improve from 4.29533\n", + "Epoch 100/100\n", + "\n", + "Epoch 00100: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 134s 3s/step - loss: 3.8983 - val_loss: 4.4451\n", + "\n", + "Epoch 00100: val_loss did not improve from 4.29533\n" + ] + } + ], + "source": [ + "#ENTRENAMIENTO DE MODELO\n", + "#####################################################################\n", + "# Instantiate two `DataGenerator` objects: One for training, one for validation.\n", + "######################################################################\n", + "# Optional: If you have enough memory, consider loading the images into memory for the reasons explained above.\n", + "\n", + "train_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n", + "val_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n", + "\n", + "# 2: Parse the image and label lists for the training and validation datasets. This can take a while.\n", + "\n", + "\n", + "\n", + "# The XML parser needs to now what object class names to look for and in which order to map them to integers.\n", + "classes = ['background' ] + labels\n", + "\n", + "train_dataset.parse_xml(images_dirs= [config['train']['train_image_folder']],\n", + " image_set_filenames=[config['train']['train_image_set_filename']],\n", + " annotations_dirs=[config['train']['train_annot_folder']],\n", + " classes=classes,\n", + " include_classes='all',\n", + " #classes = ['background', 'panel', 'cell'], \n", + " #include_classes=classes,\n", + " exclude_truncated=False,\n", + " exclude_difficult=False,\n", + " ret=False)\n", + "\n", + "val_dataset.parse_xml(images_dirs= [config['test']['test_image_folder']],\n", + " image_set_filenames=[config['test']['test_image_set_filename']],\n", + " annotations_dirs=[config['test']['test_annot_folder']],\n", + " classes=classes,\n", + " include_classes='all',\n", + " #classes = ['background', 'panel', 'cell'], \n", + " #include_classes=classes,\n", + " exclude_truncated=False,\n", + " exclude_difficult=False,\n", + " ret=False)\n", + "\n", + "#########################\n", + "# 3: Set the batch size.\n", + "#########################\n", + "batch_size = config['train']['batch_size'] # Change the batch size if you like, or if you run into GPU memory issues.\n", + "\n", + "##########################\n", + "# 4: Set the image transformations for pre-processing and data augmentation options.\n", + "##########################\n", + "# For the training generator:\n", + "\n", + "\n", + "# For the validation generator:\n", + "convert_to_3_channels = ConvertTo3Channels()\n", + "resize = Resize(height=img_height, width=img_width)\n", + "\n", + "######################################3\n", + "# 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.\n", + "#########################################\n", + "# The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.\n", + "if config['model']['backend'] == 'ssd300':\n", + " predictor_sizes = [model.get_layer('conv4_3_norm_mbox_conf').output_shape[1:3],\n", + " model.get_layer('fc7_mbox_conf').output_shape[1:3],\n", + " model.get_layer('conv6_2_mbox_conf').output_shape[1:3],\n", + " model.get_layer('conv7_2_mbox_conf').output_shape[1:3],\n", + " model.get_layer('conv8_2_mbox_conf').output_shape[1:3],\n", + " model.get_layer('conv9_2_mbox_conf').output_shape[1:3]]\n", + " ssd_input_encoder = SSDInputEncoder(img_height=img_height,\n", + " img_width=img_width,\n", + " n_classes=n_classes,\n", + " predictor_sizes=predictor_sizes,\n", + " scales=scales,\n", + " aspect_ratios_per_layer=aspect_ratios,\n", + " two_boxes_for_ar1=two_boxes_for_ar1,\n", + " steps=steps,\n", + " offsets=offsets,\n", + " clip_boxes=clip_boxes,\n", + " variances=variances,\n", + " matching_type='multi',\n", + " pos_iou_threshold=0.5,\n", + " neg_iou_limit=0.5,\n", + " normalize_coords=normalize_coords)\n", + "\n", + "elif config['model']['backend'] == 'ssd7':\n", + " predictor_sizes = [model.get_layer('classes4').output_shape[1:3],\n", + " model.get_layer('classes5').output_shape[1:3],\n", + " model.get_layer('classes6').output_shape[1:3],\n", + " model.get_layer('classes7').output_shape[1:3]]\n", + " ssd_input_encoder = SSDInputEncoder(img_height=img_height,\n", + " img_width=img_width,\n", + " n_classes=n_classes,\n", + " predictor_sizes=predictor_sizes,\n", + " scales=scales,\n", + " aspect_ratios_global=aspect_ratios,\n", + " two_boxes_for_ar1=two_boxes_for_ar1,\n", + " steps=steps,\n", + " offsets=offsets,\n", + " clip_boxes=clip_boxes,\n", + " variances=variances,\n", + " matching_type='multi',\n", + " pos_iou_threshold=0.5,\n", + " neg_iou_limit=0.3,\n", + " normalize_coords=normalize_coords)\n", + "\n", + "\n", + "\n", + " \n", + "data_augmentation_chain = DataAugmentationVariableInputSize(resize_height = img_height,\n", + " resize_width = img_width,\n", + " random_brightness=(-48, 48, 0.5),\n", + " random_contrast=(0.5, 1.8, 0.5),\n", + " random_saturation=(0.5, 1.8, 0.5),\n", + " random_hue=(18, 0.5),\n", + " random_flip=0.5,\n", + " n_trials_max=3,\n", + " clip_boxes=True,\n", + " overlap_criterion='area',\n", + " bounds_box_filter=(0.3, 1.0),\n", + " bounds_validator=(0.5, 1.0),\n", + " n_boxes_min=1,\n", + " background=(0,0,0))\n", + "#######################\n", + "# 6: Create the generator handles that will be passed to Keras' `fit_generator()` function.\n", + "#######################\n", + "\n", + "train_generator = train_dataset.generate(batch_size=batch_size,\n", + " shuffle=True,\n", + " transformations= [data_augmentation_chain],\n", + " label_encoder=ssd_input_encoder,\n", + " returns={'processed_images',\n", + " 'encoded_labels'},\n", + " keep_images_without_gt=False)\n", + "\n", + "val_generator = val_dataset.generate(batch_size=batch_size,\n", + " shuffle=False,\n", + " transformations=[convert_to_3_channels,\n", + " resize],\n", + " label_encoder=ssd_input_encoder,\n", + " returns={'processed_images',\n", + " 'encoded_labels'},\n", + " keep_images_without_gt=False)\n", + "\n", + "# Summary instance training\n", + "category_train_list = []\n", + "for image_label in train_dataset.labels:\n", + " category_train_list += [i[0] for i in train_dataset.labels[0]]\n", + "summary_category_training = {train_dataset.classes[i]: category_train_list.count(i) for i in list(set(category_train_list))}\n", + "for i in summary_category_training.keys():\n", + " print(i, ': {:.0f}'.format(summary_category_training[i]))\n", + "\n", + "\n", + "\n", + "# Get the number of samples in the training and validations datasets.\n", + "train_dataset_size = train_dataset.get_dataset_size()\n", + "val_dataset_size = val_dataset.get_dataset_size()\n", + "\n", + "print(\"Number of images in the training dataset:\\t{:>6}\".format(train_dataset_size))\n", + "print(\"Number of images in the validation dataset:\\t{:>6}\".format(val_dataset_size))\n", + "\n", + "\n", + "\n", + "##########################\n", + "# Define model callbacks.\n", + "#########################\n", + "\n", + "# TODO: Set the filepath under which you want to save the model.\n", + "model_checkpoint = ModelCheckpoint(filepath= config['train']['saved_weights_name'],\n", + " monitor='val_loss',\n", + " verbose=1,\n", + " save_best_only=True,\n", + " save_weights_only=False,\n", + " mode='auto',\n", + " period=1)\n", + "#model_checkpoint.best =\n", + "\n", + "csv_logger = CSVLogger(filename='log.csv',\n", + " separator=',',\n", + " append=True)\n", + "\n", + "learning_rate_scheduler = LearningRateScheduler(schedule=lr_schedule,\n", + " verbose=1)\n", + "\n", + "terminate_on_nan = TerminateOnNaN()\n", + "\n", + "callbacks = [model_checkpoint,\n", + " csv_logger,\n", + " learning_rate_scheduler,\n", + " terminate_on_nan]\n", + "\n", + "\n", + "\n", + "batch_images, batch_labels = next(train_generator)\n", + "\n", + "\n", + "initial_epoch = 0\n", + "final_epoch = 100 #config['train']['nb_epochs']\n", + "steps_per_epoch = 50\n", + "\n", + "history = model.fit_generator(generator=train_generator,\n", + " steps_per_epoch=steps_per_epoch,\n", + " epochs=final_epoch,\n", + " callbacks=callbacks,\n", + " validation_data=val_generator,\n", + " validation_steps=ceil(val_dataset_size/batch_size),\n", + " initial_epoch=initial_epoch,\n", + " verbose = 1 if config['train']['debug'] else 2)\n", + "\n", + "history_path = config['train']['saved_weights_name'].split('.')[0] + '_history'\n", + "\n", + "np.save(history_path, history.history)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "dict_keys(['val_loss', 'loss', 'lr'])\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAEWCAYAAABrDZDcAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzsnXd4m9W9xz9Hw5Z3HO/YGQ7ZCdmELPbes6xCW2bpAm5bKNzuXm5LCx0UKGXTUhouZe8VEkIWIXuH7DjLM463LUvn/nHeV8uyLTmyJFvn8zx5tF6975Ejne/5zSOklGg0Go0mcbHEegAajUajiS1aCDQajSbB0UKg0Wg0CY4WAo1Go0lwtBBoNBpNgqOFQKPRaBIcLQQaTRcIIZ4XQtwf4rF7hBBnHut5NJpoo4VAo9FoEhwtBBqNRpPgaCHQ9HkMl8zdQoj1QohGIcQzQogCIcT7Qoh6IcQnQohsn+MvFkJsEkLUCiEWCiHG+rw2RQix2njf/wGOgGtdKIRYa7x3qRBiYg/HfKsQYocQokYI8ZYQYpDxvBBC/FkIUSGEOGp8pgnGa+cLITYbYzsghPhxj/5gGk0AWgg0/YUrgLOAUcBFwPvAfwO5qO/5HQBCiFHAPOAuIA94D3hbCJEkhEgC3gBeAAYC/zHOi/HeqcCzwLeBHOAJ4C0hRHI4AxVCnA78DrgKKAL2Ai8ZL58NnGx8jgHA1UC18dozwLellBnABODTcK6r0XSGFgJNf+ERKWW5lPIA8DnwhZRyjZSyFXgdmGIcdzXwrpTyYymlE3gISAFmAzMBO/AXKaVTSvkK8KXPNW4FnpBSfiGldEkp/wG0Gu8Lh68Dz0opVxvjuw+YJYQYBjiBDGAMIKSUW6SUh4z3OYFxQohMKeURKeXqMK+r0QRFC4Gmv1Duc785yON04/4g1AocACmlGygDio3XDkj/Tox7fe4PBX5kuIVqhRC1wGDjfeEQOIYG1Kq/WEr5KfAo8BhQLoR4UgiRaRx6BXA+sFcI8ZkQYlaY19VogqKFQJNoHERN6IDyyaMm8wPAIaDYeM5kiM/9MuB/pZQDfP6lSinnHeMY0lCupgMAUsq/SimnAeNRLqK7jee/lFJeAuSjXFgvh3ldjSYoWgg0icbLwAVCiDOEEHbgRyj3zlJgGdAO3CGEsAkhLgdm+Lz3KeB2IcSJRlA3TQhxgRAiI8wx/Bu4UQgx2Ygv/BblytojhDjBOL8daARaAJcRw/i6ECLLcGnVAa5j+DtoNB60EGgSCinlNuB64BGgChVYvkhK2SalbAMuB74FHEHFE17zee9KVJzgUeP1Hcax4Y5hPvBz4FWUFXIccI3xciZKcI6g3EfVqDgGwA3AHiFEHXC78Tk0mmNG6I1pNBqNJrHRFoFGo9EkOFoINBqNJsHRQqDRaDQJjhYCjUajSXBssR5AKOTm5sphw4bFehgajUbTp1i1alWVlDKvu+P6hBAMGzaMlStXxnoYGo1G06cQQuzt/ijtGtJoNJqERwuBRqPRJDhaCDQajSbB6RMxgmA4nU72799PS0tLrIfSqzgcDkpKSrDb7bEeikaj6af0WSHYv38/GRkZDBs2DP9mkf0HKSXV1dXs37+f0tLSWA9Ho9H0U/qsa6ilpYWcnJx+KwIAQghycnL6vdWj0WhiS58VAqBfi4BJInxGjUYTW/q0EHRLy1GoPxzrUWg0Gk1c07+FoLVBCUEvtNqura3lb3/7W9jvO//886mtrY34eDQajaan9G8hsKcAEtoj72PvTAhcrq43jXrvvfcYMGBAxMej0Wg0PaXPZg2FhD1F3TqbvfcjxL333svOnTuZPHkydrud9PR0ioqKWLt2LZs3b+bSSy+lrKyMlpYW7rzzTm677TbA2y6joaGB8847j7lz57J06VKKi4t58803SUmJ7Dg1Go2mO/qFEPz67U1sPlgX/MW2BrAeBWtyWOccNyiTX140vtPXH3jgATZu3MjatWtZuHAhF1xwARs3bvSkeT777LMMHDiQ5uZmTjjhBK644gpycnL8zrF9+3bmzZvHU089xVVXXcWrr77K9dfr3Qc1Gk106RdC0CXCAm43WHv3MjNmzPDL9f/rX//K66+/DkBZWRnbt2/vIASlpaVMnjwZgGnTprFnz57eHaRGo9EEoV8IQVcrd2r3QXMtFB4PvZiKmZaW5rm/cOFCPvnkE5YtW0Zqaiqnnnpq0FqA5GSvlWK1Wmlubu618Wk0Gk1n9O9gMajYgHSByxnR02ZkZFBfXx/0taNHj5KdnU1qaipbt25l+fLlEb22RqPRRJJ+YRF0ic0IvrY3gy0pYqfNyclhzpw5TJgwgZSUFAoKCjyvnXvuufz9739n4sSJjB49mpkzZ0bsuhqNRhNphOyFHHsAIcSzwIVAhZRyQsBrPwYeBPKklFXdnWv69OkycGOaLVu2MHbs2O4H4nbB4fWQUQgZRWF8gvgh5M+q0Wg0PgghVkkpp3d3XG+6hp4Hzg18UggxGDgL2NeL1/ZisaqMIaf2v2s0Gk0wek0IpJSLgJogL/0ZuAfoHVMkGPYULQQajUbTCVENFgshLgYOSCnXhXDsbUKIlUKIlZWVlcd2YXsKuNrA3X5s59FoNJp+SNSEQAiRCvwU+EUox0spn5RSTpdSTs/Lyzu2i9tT1a22CjQajaYD0bQIjgNKgXVCiD1ACbBaCFHY61f2bTWh0Wg0Gj+ilj4qpdwA5JuPDTGYHkrW0DFjtYPFpoVAo9FogtBrFoEQYh6wDBgthNgvhLi5t64VEhEOGPe0DTXAX/7yF5qamiI2Fo1GozkWejNr6FopZZGU0i6lLJFSPhPw+rCoWAMmthTVjjpCdRNaCDQaTX+h/1cWm1isgFRCEIGeQ75tqM866yzy8/N5+eWXaW1t5bLLLuPXv/41jY2NXHXVVezfvx+Xy8XPf/5zysvLOXjwIKeddhq5ubksWLDg2D+bRqPRHAP9QwjevxcOb+j6GFcbuFohKR0IQQgKj4fzHuj0Zd821B999BGvvPIKK1asQErJxRdfzKJFi6isrGTQoEG8++67gOpBlJWVxZ/+9CcWLFhAbm5uGB9So9Foeof+33TOxLQCeqGlxkcffcRHH33ElClTmDp1Klu3bmX79u0cf/zxfPLJJ/zkJz/h888/JysrK+LX1mg0mmOlf1gEXazcPTQfgSN7IG9MxHcrk1Jy33338e1vf7vDa6tWreK9997jvvvu4+yzz+YXvwipjEKj0WiiRgJZBMZHle6InM63DfU555zDs88+S0NDAwAHDhygoqKCgwcPkpqayvXXX8+Pf/xjVq9e3eG9Go1GE2v6h0UQChEWAt821Oeddx7XXXcds2bNAiA9PZ1//etf7Nixg7vvvhuLxYLdbufxxx8H4LbbbuO8886jqKhIB4s1Gk3M6bU21JHkmNpQm7Q1QtVXMHA4OPqWr163odZoND0hHtpQxxcRtgg0Go2mv5B4QuDWQqDRaDS+9GkhCMut1Uctgr7gutNoNH2bPisEDoeD6urq0CdKjxC4em9QEUZKSXV1NQ6HI9ZD0Wg0/Zg+mzVUUlLC/v37CWvTmtoKcLSC40jvDSzCOBwOSkpKYj0MjUbTj+mzQmC32yktLQ3vTf97Bkz7Fpz7214Zk0aj0fRF+qxrqEckpYGzMdaj0Gg0mrgiwYQgFdp0+2eNRqPxJbGEwJ4GTi0EGo1G40tiCUFSqqow1mg0Go2HxBICe6q2CDQajSaAxBKCpDQdI9BoNJoAEksI7Kk6a0ij0WgC6DUhEEI8K4SoEEJs9HnuQSHEViHEeiHE60KIAb11/aDorCGNRqPpQG9aBM8D5wY89zEwQUo5EfgKuK8Xr98RnTWk0Wg0Heg1IZBSLgJqAp77SErZbjxcDkS3d4KZNaQbuWk0Go2HWMYIbgLe7+xFIcRtQoiVQoiVYfUT6gp7qmo652qLzPk0Go2mHxATIRBC/BRoB17s7Bgp5ZNSyulSyul5eXmRuXBSmrrVtQQajUbjIepN54QQ3wQuBM6Q0W62b09Vt84mYGBUL63RaDTxSlSFQAhxLvAT4BQpZfSjth6LQAeMNRqNxqQ300fnAcuA0UKI/UKIm4FHgQzgYyHEWiHE33vr+kHxWATaNaTRaDQmvWYRSCmvDfL0M711vZBIMoRAWwQajUbjIcEqi3WwWKPRaAJJLCFI0q4hjUajCSSxhMCuXUMajUYTSGIJgZk1pNtMaDQajYfEEgKPRaBdQxqNRmOSmEKgLQKNRqPxkFhCYLGALUVbBJr+z/s/gd2LYj0KTR8h6i0mYk6SbkWtSQBWPAUWG5SeHOuRaPoAiWURgN6cRtP/cbtVl932lliPRNNHSDwhsKfpOgJN/8btVLfO5tiOQ9NnSDwh0BaBpr9j7rehhUATIoknBPZUHSPQ9G9chkUQr64hKeHtu2DvsliPRGOQeEKQlKazhjT9G1ecu4ZcTlj1HOycH+uRaAwSTwi0RaDp78S7a8i0VOLVYklAEk8IdIxA098xg8Xt8SoErcat3js8Xkg8IdBZQ5r+jsc1FKcrbm0RxB2JJwTaItD0d1x9xCJwaYsgXkg8IbCnKdPZ/LFoNP2NuI8RGOPSFkHckHhCkKQ7kGr6OXHvGtIxgngj8YRAdyDV9HfiPlisYwTxRq8JgRDiWSFEhRBio89zA4UQHwshthu32b11/U4xN6fRcQJNf8V0Dbnb49MFagqAjhHEDb1pETwPnBvw3L3AfCnlSGC+8Ti62PW+xZp+jqvdez8e4wQe15C2COKFXhMCKeUioCbg6UuAfxj3/wFc2lvX75SuYgQVW+EvE6HuUHTHpNFEEt+VdjxOth7XUGtsx6HxEO0YQYGU8hCAcZvf2YFCiNuEECuFECsrKysjNwJ7F66hXQugdi9Ub4/c9TSaaOMrBHFtEWghiBfiNlgspXxSSjldSjk9Ly8vcidO6sI1dNgIZ7TWR+56Gk20cfu4huLZInBpIYgXoi0E5UKIIgDjtiLK1/fZwD6IRVBuCkFD9Maj0UQabRFowiTaQvAW8E3j/jeBN6N8fW/WUKBF4GqHyq3qfmtddMek0UQS30yhuBQCHSOIN3ozfXQesAwYLYTYL4S4GXgAOEsIsR04y3gcXTqzCGp2er+gbdoi0PRhfIUgHmsJtEUQd/Ta5vVSyms7eemM3rpmIJX1rRxpamNUQYb3SY9FECAE5Ru993WMQNOX8XMN6RiBpnviNlgcCf7yyVdc++Ry/yctVrAmd0wfPbwRLDaVVaRjBJq+jLuPWATudnC7YjsWDdDPhSAzxU5dixMppf8LSWlBLIJNkDsKUrK1RaDp2/SVGAFo91Cc0K+FICvFjtMlaXYGrDqS0jrGCMo3QsEESM6ANi0Emj5M3AuBz+Qfj+mtCUi/FwKAo80B/Vbsqf5ZQ001UHcACsZDcnrfswjcLvjsD9BcG+uRaOKBvlJZDLrfUJzQr4Ug06GEoK653f+FwM1pyjep20LDIuhrMYKKzbDgf/Vm4BqFywk2h7ofj8Fi3zHFo1AlIP1aCDq3CAJiBKYQFEyApD5oEZjmv/a3akAFi+0pKvkhHtut+8UItEUQDySmECSl+mcNlW+A1FxIL4DkzL5XR+DUOz5pfHC1gcWuXKDx+J3QMYK4o9fqCOKBzBT18eqCxggCLIKC8SBE34wR6EpNjS+udrAmqftxGSzWMYJ4I0EtAp+sIVc7VGyBwuPV4+QMJQSBKafxjHYNaXxxtYHVBnZHfK6421shySjyjMfxJSD9WggyHCFkDdXsUl/GgvHqcXIGIPvWnsbaItD44mpTFoEtJX5jBI5M477+zsYD/VoIrBZBRrKNupZgMYImlW658HfqOdMiSEpXt30pTmBaBLpkXwOqYteapALG8Zg11N4KjizvfU3M6dcxAlDVxUGzhlyt8LdZ0FAOJ9+jMoZABYtBuYcyCqM72J6iLQKNL642o11KSpy2mGiBzEHqvl68xAX9XgiyUuwdg8WmWerIgmtehOKp3teSDYugLwWMddaQxheX03ANOeLTxaktgrij3wtBZoqtY0HZpGsgNQfGXQK2ZP/Xko0gVl8SAm0RaHxxOcFqVxZBY1WsR9MRHSOIO/p1jACURdDBNZSSDROv6igC0LdjBPpHpQEjWGxXFkG8uYakVO4gbRHEFYkpBF3Rly0C7W/VgKostiYZ2XFx5i40J34zFqe/s3FBvxeCTIe9Y9ZQV/RFIdAWgcYXl9OoLI5Di8BctHgsgjgTqgSl3wtBVoqdpjYXTpc7tDf0RSHwxAj0j0qDv2sobi0CwwWrew3FBSEJgRDiTiFEplA8I4RYLYQ4u7cHFwmyUjspKusMmwOEtY/GCPSPSoN/sNjZFF9V8uZixZZixDDiTKgSlFAtgpuklHXA2UAecCOx2Hi+B3hbUYcoBEJ420z0FbRFoPHFTB+1pwAyvvr5mBaBLVltGRtPY0tgQhUCYdyeDzwnpVzn81zYCCH+SwixSQixUQgxTwjh6Om5uqPTfkNd0df2JDDNf/2j0oARLLarVTfEV+M5M2Zhcygx0IuXuCBUIVglhPgIJQQfCiEygBCd7v4IIYqBO4DpUsoJgBW4pifnCoXMHgtBXS+NqBdo1wVlGh88baiN9VU8fS9Mi8BuCoFevMQDoRaU3QxMBnZJKZuEEANR7qFjuW6KEMIJpAIHj+FcXZJltqJuae/mSB+S0qMfI2iqgSN7/KucQ8WpC8o0Pngqi02LII4az3liBNoiiCdCtQhmAduklLVCiOuBnwFHe3JBKeUB4CFgH3AIOCql/Kgn5wqFnlsEUY4RLH0E/nlpz97brtNHNT64nN421BBfmUOeGIFDxwjiiFCF4HGgSQgxCbgH2Av8sycXFEJkA5cApcAgIM0Ql8DjbhNCrBRCrKysrOzJpYAeBIvB2JwmyhZBYwW0HlUb0YeLtgg0vphtqO2p6nE81RJ4LIJkbRHEEaEKQbuUUqIm8IellA8DGT285pnAbillpZTSCbwGzA48SEr5pJRyupRyel5eXg8vBQ67lWSbJUwhiIFF0GLEJHrSJKxdt6HWGLhdgPQ2nYP4tQhsyXrxEieEKgT1Qoj7gBuAd4UQVsDew2vuA2YKIVKFEAI4A9jSw3OFRNhtJpIyoh8jaD0GIXD6pI/GU864JvqYrhazDTXEWdZQoEWghSAeCFUIrgZaUfUEh4Fi4MGeXFBK+QXwCrAa2GCM4cmenCtUetRvKNrbVfbUIpBSWQTCqh5rn2tiY/7/+1oEcekaMmMEWgjigZCEwJj8XwSyhBAXAi1Syh7FCIzz/VJKOUZKOUFKeYOUsle/DZkp4fYbSifq21WarqhwLRFzRaW7OWpA7cENRmWxESOIS9eQtgjiiVBbTFwFrAC+BlwFfCGEuLI3BxZJ+kQHUtM1FG6qn7naSxlgPNY/rITGYxH41hHEqUWghSBuCLWO4KfACVLKCgAhRB7wCcrFE/dkpdjZXhHGpJ5kCEE04wQ9dQ05A7o5alM7sXEbCx6/OoJ4EgLj+2lN0kIQR4QaI7CYImBQHcZ7Y06mI8guZV3hsQiiVF3scnpXbWG7hoz3ObRFoEF9l8C/sjiuhKDFaOwodIwgjgjVIvhACPEhMM94fDXwXu8MKfJkGTECt1tisYTQIsmzb3GULIIWH8E5VotA52UnNr6uIdMiiKfvRHurd2dAm0MvXOKEkIRASnm3EOIKYA6q2dyTUsrXe3VkESQzxY6UUN/a7mlC1yXRjhG0+hRpt+kYgeYYcPm4hiwWteqOR4sAwJakv69xQsib10spXwVe7cWx9Bpmm4m6ZmdoQhDtfYv9LIIwr+mxCLQQaPARAuN7bo+znv+BFoHbCW63Ei1NzOhSCIQQ9UCwZHoBSCllZq+MKsL4tqIeHMobzP1Uo2YRHINryBMj0MFiDT7BYkMIbCnx13TOtAisSerW1QqWlNiNSdO1EEgpe9pGIq7ISgmz35AnRhAlIYhEjEC7hjTgU1nsYxHEUx2B09c15NMm266FIJYkhD3maTwXalGZzaFK9GNiEYSbNaSDxRoffCuLQRWVxVsdgW+MAPSeBHFAQghB2PsWCxHdPQlMiyAlO3wz3hmYPqp/VAmNb2UxxN8G9oExAtCLlzggMYSgR3sSZEbfIsgY1IMYQaBrSP+oEhrf9FEwNrCPU4vAagiC7o8VcxJCCNKSrFgtIsyisvQoxgiOqqBeyoAexAgCLAIdLE5sfCuLwcjVjych8LUIjFu9eIk5CSEEQggyHbb43aWstQ4cmZCUdgwxAh0s1uBTWWzkgdhT4sw15BsjMIVAWwSxJiGEAHqyJ0GUYwTJmSqw1xOLQFiViIBeXSU6HYLFKXFoEQQKgf7OxpqEEoLwWlHHwiJI70FlsZF6p1dXGvCvLAYjWBxPQtDi/a56YgTaio01CSMEmWG3oo7ivsWmRdAT15Cz2aeJV1L8rK5qdsOjM6D+cKxHklh0qCyOt2BxMItAC0Gs0ULQGdHOGvLECHqQNWQW49gc8ZOBcXgDVG2Dil7dhVQTSIfK4nhrMdESJFishSDWJIwQZKXYw8saMmMEbnfvDcqktd5rEbid4bl3TIsA4ssiaDEa6UVzcx9NkMriVPWdiMb3uDvcLvX97lBZrIUg1iSUEBxtbsPlDnEf4uQMQIIzCttVttSpymAz4BvONdtbvH3ne7Ot7+7PYdFDoR+vhSA2BGs6B/GxQPDdphL8ew1pYkrCCMG4okycLsnasiOhvSFaexK42tXEb1oEEJ57yNns7Tvfmzs+bXgZFv859OPNIrlobe7T32iqgU/vV6vocHA5VRaZxaoex9OeBL7bVPreaosg5iSMEJw8Kg+rRfDp1oruD4bodSA1J0pHD4XAzyJI7r0ffGuDGleoLgZtERwb2z+GRQ9Cxebw3udq81oDEF+7lAVaBJ5eQ1oIYk1MhEAIMUAI8YoQYqsQYosQYlZvXzMrxc60odl8urUytDd49iSIkhAkZ/ZsH4RAi6C3gsVtDYAMPSfd7J/UcrTr4zTBMb8DLWFaVC6n1+UCKkYAcW4RxMHYEpxYWQQPAx9IKccAk4CopJacPiafLYfqOHQ0hMnM3KUs3B9iuLT4WATmj7anFoG1ly0CCL3OQVsEx4bZfDBc15rb6a0qBu9kGw97EpjfTbtPcgPET6ZbAhN1IRBCZAInA88ASCnbpJS10bj26WPyAVgQilWQO1JNrOte6t1B+VkEpmsojB9te0t0YgSmZRSqteIRAh0j6BHmdyBsi6AtwCIwvhvx0GYi0CIwN7DXFkHMiYVFMByoBJ4TQqwRQjwthEgLPEgIcZsQYqUQYmVlZYjunG4YmZ9OSXZKaHGC9HyY9V1Y/xIcWB2R6wfF1yLokWsoSllDHosgRGulVVsEx4SZORaukLra/YXA436JwxiBeV9Xw8ecWAiBDZgKPC6lnAI0AvcGHiSlfFJKOV1KOT0vLy8iFxZCcPqYfJbsqKLFGUI2xtwfQmoufPQzkCGmnYZLUIsgTNeQxyLoxc3A28IUAtMi6G3XWn/F/Du3hGksu9rA6uMaimeLAHo3wUETMrEQgv3AfinlF8bjV1DCEBVOG5NPs9PFF7truj/YkQmn3Qd7l8DWd3tnQC3HKATOZn+LoLdysj0WQaiuITN9VFsEPSLSrqF4tQisvZjgoAmZqAuBlPIwUCaEGG08dQYQZo5cz5k1PAeH3cKCUNNIp34LckfDx7/oHRPWdKH4po+GWlDmqdQ0fuzWXooRuNq9E0koIiWlriM4VnrqGnK3e6uKwSdYHA9CoC2CeCVWWUM/AF4UQqwHJgO/jdaFHXYrc47L5dOtFchQ3D1WG5z1a6jZCdvei/yAWurUBG5LVvnf1qTOJ9vmWpj/P97qUfPH3dt1BL5WQChC0NYA0g0IbRH0lGOyCHzrCEzXUDwIgWkRBAqBriOINTERAinlWsP/P1FKeamUMsRy38hwxtgC9tU0selgiD+yITPV7dH9kR+M2XDOpKvGc199CJ8/BAfXqMeeFZZP07nesFr8hCAE15AZH8goVEIQD31u+ho9TR8NrCOIp1x9z/c1MFishSDWJExlsS8XHF9EktXCK6tCnNgdA1RudlNV5AdjtqA2SUrvXAjqD6nbBsOt1cEi6KWmc61hWgSmEGSVADJ6G/z0JzwFZWEW5LmcARaBUZsSrxaBTh+NCxJSCLJS7Zw1voA31x6grT2E1aoQkJoDjb0gBIEWgT2184mzoVzdNhpCEMwicDsjvwIP1zVkujOyStStdg+FT6RcQ7ZkQMSJEHRiEehgccxJSCEAuHJaCUeanCzYFmLQODUXmqojP5AOFkFa5wVlphB0ZhH0VjdH34k8bIsAHTDuCcdUWewjBELEzwb2OlgctySsEJw0Ipe8jOTQ3UNpudGxCLqKEdQHCEEwi8D3+UgRbozAnLyyBhuPE9QiaKqBhh4WQ3rqCHoSI7D7P2d3xEkdQSsIS0ALDF1QFg8krBDYrBYun1LMgq0VVDWEsIJOy+2dGEFrPSRneR+bG+IEo8HY9tG0DIJlDUHkf1hmjEBYQ+tZE2gRJGpR2Tt3wSs39uy95t/Z2ajSd0MlMFgMaq+L5qjmYwSnvcW7raqJjhHEBQkrBABXTCuh3S15c+3B7g9O7SWLoKUHFkGjscrsYBEk+z8fKUxhSs8P0TVkVMMmumuo7iDU7g3/fS6n8pun5qjH4fz9AmMEAJklUHcg/HFEmvZW//gAxNf2qglMQgvBqIIMJpZkheYeSstVP8hIprq5XaqZm1+MIDX4ZNvW6G381qlFYNxG+odlunbS80NMH61TY0nNNd6foELQWg+NPYgrmf//GUXqNpzMoWCuoayS3kl9DhfTIvCltzLdNGGR0EIAcNX0wWw5VMeNz61gd1UXq11zdRbJgLE5wToC0keDuV/qDbdQykCv3zkw+Ga6BHrDIhBW9TcINVjsyPK28k7UGEFLnXLthJuxY/7/m0IQjpAGBotBCUHdwfBcTL1Be2sQIeil2hdNWCS8EFw7Ywg/u2AsX+45wjl/XsSDH26l3RUk/TLNWN1G0j3k23DOJCnNqMwNqHo2rYCiiWpyaW3wsQgCg8W9kDWUnN51jYM2B01/AAAgAElEQVTf8eYezOmASNwYgfn/2xRCXytfzKyxTNMiCNc1FBAjyCoB6fLGmGKFs7mjEFi1RRAPJLwQWC2CW04azqc/PoULJxbx2IKdPPjhto4Hmm6OSAaMfVtQmySlqfYMgT8O0yIonKhuGyuC7Phkxgg6EYLXb4fNb4U/ztYGSMoIXQhajipxs1iUVZCIFoHb5XWjhfudcQa4hsKKEbQHcQ0Z2Vuxdg91FiPojdoXTVjYuj8kMcjPcPCnqyeTlmzjiUW7mDo0m3PGF3oPSDNaYffE59sZQS0Cc0+CRu9KH7wWgSkEDRVBLIIugsWudrXJji0Zxl0c3jjbTIsgLfQWE44B6n6iCoHvZw7XnWhaBBnG9y9siyCIawjiQAg6iRGAqn2xpHR8jyYqJLxFEMjPLhzLpJIsfvzyOvb4xgzSomQRdLZdZf1hZUbnjVKPG7qwCIIFi5uqANmzNMLWBiVQXWU0+eKbCZWc6e2wmkj4ruLDXTz0NFgspVpdd3ANFavbmAtBJxaB+ZomZmghCCDZZuWxr0/FahXc/q9V3g1sHANUwLRXYgS+dQSd7EnQUA7pBZBurBIbK7w+VzMv29qFRWAWofVECNoa1Mo+KV2JTHfBPTNYDNoigPAtAo9ryPi/DtU15DaCwYHB4uQM9f2NuRAEsQg8CQ5aCGKJFoIglGSn8tCVk9h6uJ63zBoDiwVSB3pz+CNBi89eBCa+riFf6g8rIUjNAYTXIggs14fgPypz3D21CEzXEHS/X0KrT9sMR2ZiBot9P3O4VqTpGnJkKQsxVIvAtAQDXUOg4gQxF4IuLILe2lBJExJaCDrhjLH5lGSn8P7GQ94nI91vqLOsIejoi28oVytEq025qcwYgW8coSsz2yMEYW59aI4lKSO0HdTaW5VAJbxF4CsEPbQI7GmGay1EITX3qQh0DUF81BIEjRF0k+CgiQpaCDpBCMG54wtZvKOKuhbjBxbpfkMtdepHa/f5cSR1ESNILzDGkR++RXAsrqHWen+LoCsh8MQ9TCEIYyLrDVzO2EyApvhZbOF/Z0yLICk1PIvKIwTBLIISOFoW3jgiTVCLQAtBPKCFoAvOO74Qp0t6t7VMzYlcsPjIXti/0lt0ZWK6hnyLytrboLnG6zNOz/fGCPwsgi5iBGbr6raG8Ap4pLGfQFJ6524rXzzurjixCFb/Ex6dEf02zObfYcDQHtQRmBZBqhLSiLiGSlTrj1j+XwSNEWghiAe0EHTBlMHZFGQm8/4GI4c/EhZB+Wb499Xw8CTYtxQmXOn/ejDXkJk6aloE6fnqucAflvmjCuZv9e2C2RKGe6i9VQUhk9M7t1Z8CRQCR5YSNXO1Gm1q9ypXS7iT8bFiWkEDS3tWR2BzgMWqLIKQg8XG3zgwWAw+KaQx7DnUlUWgYwQxRQtBF1gsgnPGF7Lwqwqa2tpVLUFL7bFNau/8F+xbBif/GO7aAOf/wf/1YO4XUwh8LYKGStVa2NcisNoB0UmMwGffhXDcQ6YghRojMFNFzbhHrNtMmAIQ7e6bLXUqyyyrpGd1BObf2pHVA9dQsBhBHBSVdRkj0NXFsUQLQTecO76QFqebRV9V+vQb6n51WdPYxtHmAMFobYADK2HajXD6z7yrNF+C1RGYVcW+MYL2ZhUA9v1heTYh6cQiMI8NZ1I0J/BkX9dQF0VlHVxDmf7n8UVKWPIwHNkT+njCxfys4VhBkaC1Xq3mU3PV9yWcyllnkwoUQw+DxV1ZBDGKE7jaVZuLToVA9xuKJTETAiGEVQixRgjxTqzGEAozSgeSnWrn/Y2HOxaVSQmv3grbP/F7j9stufxvS5j7+095+vNd3u0wy75QbpbSkzq/oMWq2kr7WQSGEHgsAkMQavf5WwRgdHPsxCLIGanu98giCDdYHGgRBJnMGsrh41/AF0+GPp5wMT9rtC0CM4U2NUdNgOEIUVuj1w0XVrC4ixhBRqGyUKJpEbhdsPjP8PkfYdVz6rlA11BXtS+aqBFLi+BOYEsMrx8SNquFs8cVMn9LBbub1KT7lzeX8uWeGuV/3vCy90tusGJPDXuqm8jPSOb+d7dw9p8/Y+nOKtjzucoiGTyz64sGVvDWl6udncw2F+nGbXuQJl42R8cfldutYhtmVXJYFoEhBCFnDQXGCLqwCEx/ddny0McTLh7XUJQtAnMLUs/iIQz3kLPJaxkmZ6n/51BWzF25hixWyCyOrhDsWwaf/Arm/wbe+7F6zvwOm/RW63RNWMRECIQQJcAFwNOxuH64nDuhkIbWdm57TW0ysm//Pm7750qqthkT2N4lfqb/66sP8FDy03wwbRXP3XgCQghu+cdKWrYvhOJpalLtikAhaDisfkAWq3psWgTgn3oKaoUV+KNqrlGr0tzRxuMexgjsIQqBsHjdSKZFEGxVa26Wcmhd5/s0Hyu9ZRHsXAC1XbhZPK6hgepxOELQ1ugTIzCFNASrwN2FawiiX0uwz/h9/HCLiofdvgQmXuV/jK2XWqdrwiJWFsFfgHuATh2nQojbhBArhRArKysjWM3bA04amctdZ47kexfMAOCnp+ThcksWLPhQHdB8BCqVcdPidLF0wzYuFwuwL/sLp5WmMe/WmeTY2rCXr6Nt8OzuLxi4XWV9uf/kn5bvvW8LdA0F2frPLCbLOU5N0j2NEdiS1GqzqxiB6RIx216Y7TOCWQR1RtW2ux0Org59TKEipRJBiGyMwO2Gedeq+EZntBodWM2uteFkm7U1ei0C07IKJYXUXAAEyxqC6NcS7FsOeWMhcxAMGAKFE7yLGRPdayguiLoQCCEuBCqklKu6Ok5K+aSUcrqUcnpeXl5Xh/Y6NquFu84cxaWzJwKCHFHPI9dNpaR5K0dsxg99z2IAPtlSzjTnaixINSlufJXCLAd/P6UNK26e3FeMDNxrIJDAXcoaDnvjA6BWmcL4QQVaBME2+jCLydILVM+ZnsYIoPvGc759hsAnRhBkIqs74N3IfF8vuIecTd7JMZIWQcNh5a6pP9T5MS116rP3ZEMjp0/WUHIYFkFXriHwblDjdoU+lp7idkHZChjSjRtU9xqKC2JhEcwBLhZC7AFeAk4XQvwrBuMIH4tVTcJNVZwyIodp9r280zKZo8lFyv8PvLHmABc41iHT8iF/HKx8FoDxretwCRuP7sjhheXd7GOblOZfUBZoEVisXt9zB4sgyEYfpkWQng8p2T2PEUD3exIE7sHcVYyg7qBaKeaOUoH0SOOb3RXJGIGZ5WSm9QbD4xrqgRC0NfkHiyG0gLFHCDrpLp9VotxHDRXBX48kFVuU+HcnBLrXUFwQdSGQUt4npSyRUg4DrgE+lVJeH+1x9BhzE/vq7SS5GnEMnc7HTSNp3vE51fXNfL7tMHPFesTIs1Wa6ME16t+ez7EMPoHZYwZz/ztb2HLI/4e99XAdL68sU9aC72TrdqmMH1+LALzuoaAWQcCPyvzhp+UFF4L2ts7TG31jBND9ngS+exGY47HYOo8RZBbD4BPV6jHSm5P4fs5IWgRHDCHvTAik9LrIklKVmycsi6DRP30UQrQIzKyhziyCKNYS7FumbrVF0CfQdQThkmY0nju4BoDLL7qYtuJZpDhr+e3zbzBJbiPFVQ+jzoZJV6tJYMnDcGgdYthJPHjlRDJT7Nz50hpPi+sdFfVc8+Ry7nllPc8t2eM/2TZWqR3LfC0CUKt76GgRWJM6rq4aK9RknJINKUFcQ/93PTw6Hco3dfy8rfVqMjdXmYHWSiDm7mQmQnTeZqLugPIfD5mpfPhVX3V+3p5gxgfsaZGNEdQaQlBf3nFLUVDtLNztXrdYak4PgsWBFkEIMQKzDXVXriGITpxg33K1n8KAoV0fZ7EY21VqIYglMRUCKeVCKeWFsRxD2KTmqMn5wGqwp2HNH8NVV16rXjq8nK9lblLBuuGnKV/5hMth0+tqMi89iZz0ZB762kS+Km/ggfe3cuhoM994ZgU2i4WTR+Vx/7ubOdBk8VoEgTUEJuldWQQBrqGGSmUNCGFYBAGT4sHVULMTnj4T1v/H/zWzz5BJdzECc79iX4IVRbndUHdICYGZThvpNFJT8AaWRtgi2KNuXa3BJ2hT9MxJ3PzOhEJ7m5rQAy2CkFxDZrC4M9dQFDeo2bdcCbyZNNAVwb6zmqiiLYJwSctVBWUHV0PRJLBYseUMQ2aWcF3+Ps53rIehs72TwPSb1K01GUpU1tGpo/O5cc4wnl+6hysfX0ZdSzvP33gCj399KqMLM/l4ez3uVmOy9VQVdyIEthTqW5x878XV7K1uNLKGAoLFjRXe/O1A15DTqFA+4VYomgyv3QKLHvK+bu5FYGIPxTUUTAgCLIKmKuWvzixW2UypubAvwnECM0YwsDTCMQKfGE8wf3vghkPhWARmC+oeBYu7cQ05stT5elsIasugbj8MmRXa8cGsVE1U0UIQLmbLgMMboHiqek4IxLC5jG34grSjO2DUOd7jB02F4ulQerLf6v0n545hdEEGlfWtPHnDNCYUZ5GWbOPpb06nzZKKpb2JpodnwEvXqTeYqzkTnxjBJ1vKeXfDIV76six4+mhDhVc4UrLVZG1mjpgpnMXT4JtvwbCTVMdOE3MvApOuLAK32xsk9cURRAjMySizWK0ah8zsBYvAFILhxmeOUAziyB7IGqLumxabL+bq3XQNmYuHUPBtQQ3KJZeUHmawuJP0UYhOCqkZ+O8uPmCSXtB14F3T62ghCJe0XECqyXbQFO/zw+Z6V3MjfYRACPjGG/C15/1O47BbmXfbTN69Yy6zR+R6ni8ekMKZZ1/APlHEsqpUPsu9lqavzVMuFF/MmIEthflb1Kp0/pZyQwgCYwSVXuFIyVbjN10a5qSQVaImkKFzVOsKs22zuReBSVdC0Fqnzt3BIsjo6EIxBcj8XINPhJpdkc1oaa5VMZr0QjWuSOyd7GxRaaODT1CPu7IIfF1DoXY/NeMvpmsIQt/3OSQhiMJOZfuWqcVD/vjQjk8viE4mk6ZTtBCES5p30vZYBADD5qjbgcdB7gj/9yRnBK0mHpiWxMiCjA7PD591KTn3bmTJjL9xY9l5nPZmEv9ZWYbb7ROYNAJ/7cmZLPqqktQkK1+VN1DvtPoHi6VUQpDu4xoCryluTgpmIDFvFCCheod63CFGEJA+umM+PDYTqncG33HNfBxoEXiEwLB0zNVjJNNIm2ogZaByPUBk3A9HywCphAu8rjtfAv8OqTnq7+gMwQ/uydJK9T7nCHFPAnc3dQQQnerifcuVUHaWxhqI2VZdEzO0EISLWSnqGADZpd7ns0uVhTDp2ohcJi3Zxi8uGsdr351DYVYKd7+yngsfWax6FoGaOG94g5WuUdS1tHPnGaqh3J6jLn+LoOWo8h37WQR4fea1ZYDwTshmG4rKbeo2MEZgWgSmm2X3IlVV/eKVSgwguEUQ6OOuO6AmLDPPvmiSiqNEsrCs+YiRKRXwmQOob3Hy67c3UdsUQr8bMz5QOFGNN9gEFugaCqeWwHQN2X2EIDnExnPdVRaDEoLmmq4D/uHicsLWd2HtPFj5nMo+666fli/pBepvE6s9KzRaCMLGtAgGTfHPiBACblsIp9wd0ctNHjyA178zm4evmczRZifXPfUFX+yqVtc77jQ+3VaJ3Sr4+syhjMhPZ+eRNn8h8C0mg+AWQUaht+dLzghAQNVXvLB8L0eP1tBu83FTJKUBkj+9v07VPNTsUuesO0j7K7eoS1p8jofgMYK6g8jMQdzywmoWb69SLq2SEzwV2hGhuQZSs711DZ1YBB9sPMxzS/bw4hf7uj9n7R51mz2sc5dGoGsosGttV5iuIV8rLNTNaUJ1DUHkNqhxu+C121Qs643b4Z27AAnHnR76OczvZmNsW8kkMloIwsXMvvF1C/UyFovgksnFfPzDkynMdPD7D7Z62lR8urWCmcNzSE+2ccbYfHYfcakGcy4jp9y3mAyCCEGZ/74IdgdkD6Xl0Bbuf2czVmcj83c10dqugsubqtTtvz/fwqLtVXBktwqGX/E0lma14v1wR0CdQXKGWq36ukbqDtCQnM8nW8q597X1qqZi2Bw4vD70rRm7o/mI4RoyPnMntQSfb1cTtKegryuO7FHpjukFkFEQPFjs6c/k4xqCEC0CM2vI1zUU4uY0LqdKHe0qZdNTSxCC6HWHlKqr6KbX1P4ad6xRzeXu2e2NoYSCGe/S7qGYoYUgXNLz4cI/w4zbon7p1CQbd545ktX7avlkSwV7qxvZUdHAaaPViuqssQU0S8Mva2YOmTuTdWURBG6Qkzua2n2bkNJFumhh6xHJd/61mv+sLOO5lWrVNjjdzdOLdkLNHhg4nINFZ/Kb9m9SJ1N5an07TpdPhk6wzWnqDlAu1QS5/0gzzy7ZrQLV0h25NNKmGm8RHQR1DbndkiU7qhiYlsTe6ia+2N1NUPfIXtUWw2Lp3CJoqVPBXrPBmqfxXAhC4OzENRRq+mhX8QHwEYIIxAk+/R/VQmXOXXDy3So7a8AQb8fVUPEIgQ4YxwotBD1h+k0dC7yixNemlTA8N42HPtzGx5vVCuqMsWqSnzIkG1uSf3/3wwdVVtCSQ4KF2yposBguh+YjakUXRAhq00vJbt7HjVPVRH3y+FI+3VrB3a+spyhPPXf1pIFs3rEL2uphYCn/WLaHf7rO5o1zlrClLon3Nvg0ZAvMhZcS6g6yszWTIQNTOWtcAY99uoOKrOOVf3tvBNxDUqrPmDqwS9fQ5kN1VDe28aOzR5GebOPllWUqkN2ZZXBkj3ILgRLXoMHio/4ptIEWgau987qGtoA6AggjWNzetVsIVLWvsBy7EGz/RG04M/WbcOavju1c5iJFWwQxQwtBH8NmtfDDs0exrbyeh+dv57i8NIbmqEnDahEMK1Srsa37K7np+S95+bNVuKTghpd28q3nvuTKJ1bgTspQk2JjFbhaWXkkjX8t3+tpefHuoUyShZPvjFNiMmXkYB762iSunTGY758zGYALRmcwyq5cKi0ZQ/n3F/s4b0IR188sZXhuGs8s3u11swT21G+qBlcb646mMX1YNj89fyxtLjcPflqm6hn2LDn2P1RrnXKRpWQrd5ctJahryHQLnTW2gIsmDaJhw3vwp7Gw4H+Dn7d2r7dtQnqhikMEFvC11vtnTqUMUJNvU5Wxq93N8NiJwYOjphAEWgSBrrVguNqCBor/8MFW3jeF2WqDjEHHLgT7vwQEnPeH0KqHuyJNC0Gs0ULQBzl/QhETijOpb2nn9DH5fq+NKVZuiFufW8LKPTWcOVjgThnIK9+dy8PXTGZ3VSMV7am0NVTTUr0HgL+vc/KzNzZy6oML+eNH23h1r5qEBhxZr06alMGV00r43eUTSU5VE1yGpY0rStVENm+7lfqWdm6aW4rFIrhpbinr9x9lhelmCdycxtiQZkdrFicMG8iw3DRumlPKK6v3U54zXfVxOtbN7s28/RTDTdFJ19XFOyoZU5hBfqaDaycP5BeWZ3ALq6qu3jHf/+DmI2pl7msRQMcgp9mC2sRiVddvqoY1/4LNb6jYwu5FHccdzDVkZmF15x4K4hoqq2nibwt38vM3N9LYasSNBkSglqB6h3IDBbY46Ql2h/qM2jUUM7QQ9EEsFsF9543FZhGcf3yR32ujilVQ+IbphXx+z+mMy2zFnlnI1CHZXDK5mCe/MZ0qVyprv9rNg/+nJrqzZ03nhZtnMGiAg0c+3UGlw1jxHjC2jAhMHwVoa+SMggbcUvC75c1MHjyAaUNV/OGKqSVkp9p5evFudayZqWI06jMzVg7JHKYb7/ne6SPIS0/md5uNPX6PtZ7AnPTNmEjKgA7umOY2F1/uPsJJI5V4Hv/VY5SIKn6T+WvIG6OyYep8XFxm6mi28fcx3YOBAePWuo7V1am5qj/V+z+BoXNVVtDmNzuO29yUxuLz0/RsTtOdELR3yN1/a52q16hqaOMfy/aoJyNRXVy9w8gwixC6ujimaCHoo8wZkcv6X53NlCHZfs87UtRK8rZZg8hKtRvtJbwb+5wyKo9BhUXYWmtJaVaTxFVnzOSkkXm8+p3Z/PuWE3nkptOVuX7A2DXMr6DMWKm2NZLdeoAjtjzasHPzXG9NRUqSletnDuWTLeXsqKhXE+eQ2ap1hZQei6DJkc9xeercmQ47T31jOp81l9KOlfZdxxgnMNpLNNkyWbCtAunI6iAEK/bU0OZyM3dkHhxci/jicbYMuoLny4ezfvZf1er81Vu8GVhms7lAiyBwJRvoGgIVJzi0VvnwL38SRp0LW9/xntvEd79iE/NcR3Z3/ZmDWARvrzvItKHZnDY6jyc+20Vdi9MQggM9b7khpaoZibgQaIsgVmgh6MOkJgWp3LQmq1uzuMi3vYTBwNwCJgx0850pySq7xVg1CyGYPSKXyYMHQN5ob7vl5IDKYlAVsDW7SSkYwU1zSjl3gn/w/Fuzh5GWZOOB97eqJ6Z9U3U43bNY1RxgZfjQYVgsXv/ypMED+N3Vs1jvLmXv6o/8K6m7Yd6Kffz4P+u8cQlj0n96VS03PvclayoF7QFtHj7/qpIkm4UZQzLh7TshNZeiKx5gUJaDK16pYtHI+1TgetEf1BvMv4cnRqCyXZ55fxmPfrrde+JA1xBAmhEwvvivqm/UuEuUq2hvQDzEd1Mak8EzlFX16i1eKy0YbqefEGw7XM/Ww/VcPGkQPzxrNEebnTy7eLfPBjXBV+A7KupVbUdnNFSoJIGc4zo/Jly6qy4u39z1Z9ccE1oI+hs2Qwg86aOV3pWrSUo2SW1HSWs+pCaFYMG+3FHe+4FN50C5MGp2kVo4kl9cNA671f+rlJOezPdOG8EnWypYuqNKTXyOLFj9D1pryjgss5lWmksg504oxFo6l8HNW3nik40hfeTWdhcPfbiNV1btZ/U+Y9VvTPqvbm5iTGEGuxvtVFeWs3BbhUcsFu+o4oRh2aTs/1yt1s++nwE5+bx7x0mcMiqPb6w+jiVpZyM/+4Py5x/Zq2IOhttHGrUZRysP8PjCnTSYPvhgrbin3wzn/A7GXUJ1Qytb0k9UIhzoHmpr8O8zBCrz6VvvqnP+89LO02vNOgKDt9YdwGq4D48vyeKc8QU88/luGhyGaAeJEyzeXsUljy7hm8+tYGdlJ11ma4wK8ogKQYFql94Zb9+pXHWaXkELQX/DIwRtsOEV5WrIG+1/jBk4rS3rWENg4vseX4vAlgIIlTbZVKVaPHfCjXOGUTwghfvf3YLL6oCJV8Pmt2g7sJ6DMocThmUHfd/EOReQJFx88fn7HDra7Pea2y1xBVgK76w7RHVjGzaL4LklhvvEiBHsb3Vw/6UTOHXyKDJp5FvPfcmJv53PnS+tYevhek4amaeylCw2GKu2xshOS+Kpb0znVxeN43u117FPDML5n5tVjMOMDwAvrjxMjUxnak4bjW0u3lx7QLl6nE0dXUPHnQazvktZTROXPLaEi/6+itqS02DL2/57CDuDWASgrnvj+6ow8IXLvO08fPFxDUkpeWvdQeaMyCUvQ30n/uusUTS0tfPiFsMlFBAneGvdQW58fgUl2amk2K1eay4Qsw9VRF1D+crKCNb6wtmihLp6R2iFdZqw0ULQ3zCFoGYnvPNDtQfCpOv8j3EMUAHZqq9UBkkw/CwCHyGwWJRVUG6s1rM7FwKH3cq9541h86E6Xl29X+Wcu1rJOLqNCnKYUJwV9H1iyEyksHCBWMofP9zmeb6prZ3LH1/KdU8tp90oWJNS8vzSPYzIT+ebs4fx/sbDSjyaa2gUqRxXkMW0odnk5BSQQgsPXTaWGaUD+Xx7FRYBZ4zJV90yiyb55e4LIfjWnFKe+/ap/Lf1v3A3HlF7UBhuobVltfzm7c00JuVySpGLsUWZ/Gv5PmRrQJ8hH3ZXNXLVE8uob2lnYFoSDx8apwr+zG0dwXANpfFVeT03PPMFv3prE2+uPcCB2mblUvr6f1SX2x2fdPzDuZyeOoI1ZbWU1TRz8SRv19oxhZlcO2MIj642WpD4WATPLt7NHfPWMGVINi/fPovvnHocH28uZ/muIEVw1TuU4GR1/O6U17Xw3RdXccljSzjtoYXM/O18fvLKejYe6KYOoquiskPrvK7Ow+u7Po+mR2gh6G+Ym4HP/42a7C9/omMXSDOTxtnUuUXgEQLhX9wE6vHhDer+wOFdDufCiUVMGTKAhz7cxpq2YtyDpgHgTi8i2WYN/iZHJmLajXzNsoATN/yczWWVuN2SH728jrVltXyxu4ZHF6hV6ep9R9hw4Cjfmj2Mb80ehpSSF5btpba6nGpXGtfOGIIQwlNdfOX4dB69biorf3omK392FiMH2pXvuZNNVKYMyeZPd9zA8xmqj9IzmyWjfvY+lz62hLyMZAoHDUU0VnD9zCFsOVTHxl37PZ/Bl/X7a7n6iWW0truZd+tM/nTVZP6vdgxtItnfPWTsV/zQh9v4YncN//dlGXe+tJZT/rBATcoDh6sMpGAToo8QvLX2IEk2C+eM99/i9JcXjWPkkEHUyVSqD+7E5Zb86q1N/OadzZwzvoB/3jSDrBQ7N80ppSjLwW/f29IxVlO9U43D4v//53S5+d6Lq1mwtZKsFDsTirOYPiybt9Yd5MJHFnPF40tZW+YfsK9rcfKjl9fx2UHDPRlMCPav8N4/tK7j65pjRgtBf8MMFrY1wLm/Cz5Rp/i4ZIKs6gC1T0BShrIGAmMIST57AHfhGgK1sv7FheOobXZy2d+W8sv9SgjS8oZ0/Tku+CMtc+7ma9ZFuP91JY9/sIr3Nx7mZxeM5bIpxTzy6Q7W7DvCc0v2kOGwcfnUYgYbVcrzVuzj0OGD1IkMLp9S4v+ZDZeRxSIYmJakVvmuNrWrXCcUZDr45h338+mI+6gffwM3zSnlv84cxb9vPRF7ViHUl3PJ5GLSkqx8uNrYdzk5k8bWduat2Mcljy3h4keX4KXOZoEAABivSURBVJbw0m0zGTcok7kjc7lmzljmt0+kdcMb3gyetibq3El8tLmc2085jg2/Opt375hLXkYyD364DQlQNBEOb6DFqWIj+6qN2gMjWNzY2s7b6w5yxph8Mhz+BWbJNit/v2EaFZY8Nm/ZxLdfWMXzS/dw05xS/vb1aTjsanJPSbLy47NHs37/Ud5ef9D/D9JJ6uiDH25j5d4jPHDF8fzzphk8cu0UHr1uKsv/+wx+fuE4DtY2c9UTy3httRLLA7XNXPn4Ul5dvZ8HPjfSfYMFjMu+UJlaGUVaCHqJEBuGa/oMpktizIUw5Ybgx/gJQScWgRCQO1JtwhKIaSGk5QV1gQQyZUg2S35yOit217B6Rz6LvtrO0BMv7fpNQuA462d8XpvJiRt/xYAvriZ51D3cPPd86lvbWbG7hh/MW8Ohoy3cNGeYJ4PqxjmlfLipnNb2KlIyclQKLXTeb2jvUnXbzbaKjiQbp19/Lx16ahr57+lJVi6bWszqlR+DDT7e1cS9ry6gurGNUQXp/PzCcVw+pZjsNG9Wzz3njubRzbM4r/lLKnevI++4KeBsYnNlO6lJVm6cPQyb1cL4QVl8//QR/PT1jSz8qpLTCo+H5Y/zx/c38dTSMpbvqublb8/CYlQWP7ZgB9WNbdxyUnCRzs9w4Cg5jrZ9u5i/tZxfXjSOG+d0PPayKcU8s3g3f/hgGxdOHITVIlQ8o2aX/y58wIebDvPkol1cP3MIl0z2300vK0WlF182pZjvvriKH768ji/31PDJlgpanC6ev/EE3l2aBHth8drNzB13sffNUkLZCig9RaXlHlzb5f9TOCzdWcXTn+/mVxeNZ0hOkLhMAhF1i0AIMVgIsUAIsUUIsUkIcWe0x9CvScuF6/4Dlz7eeel/KEIAMPwUKJjQ8XkzZtBFfCCQvIxkLphYxM8vP4GT732DEeMmh/S+mZd/n3syHgB7Krfsuwfx2q1kuo7yp6smcaC2GbeUfGPWMM/xJ5YOZGxRJlk0kJfvk9LqCGi2Z7JvGeSNDb9Rmkl6gWcT++tnDiXFrTJtHllSwciCdF65fRYf3nUyN88t9RMBUDGUyy+7GoD/e/VlGlvbcbc2sLXGxddPHOJ3/NemDWbwwBT++NE2ZMHx4Gpj8fIljC3KZOXeI2qbUpeTRpeFpz/fzeVTipk2tPPPlFlQyihHLS/dOjOoCICymn5w+ggO1DazaLuR0XO0TFlQOSPYW93IO+sP8vsPtvLj/6xjYkkWP79wXKfXHJiWxAs3n8gNM4cyb0UZSVYLr35nNqeOzue315+KGwurNm/luqeWc9dLa/jVW5t4f/EKZSUMngFFk5BVX/HQO6v5n3c2d98pthOa2tr55Zsbue6pL/h0awXzvoxAJ9YwKa9r4aUV+3r8GSJNLCyCduBHUsrVQogMYJUQ4mMp5eYYjKV/Mursrl/3CIFQfWc648xfBX/etAi6cQtFArvVwu/vuhm7+xuw9C+q9UPNLk68ZT6/uWQCtY1tDB7oXc0JIfj5BWMpnNeEI9dHCEyLwLffkNulVpvHX9nzAXqqi8sZUziamYPsUAX3XjaDWSecqOITXTB85HhaUwoYWr+GO+et5om2JlpI5paT/F16STYLd5w+krtfWc8nR/I5Czgp/RB33n4rN//jS373/hauym5j4+EmkmwW7j1vTNfjzirB1lrLicXJXR52xtgCBqapHfJOG53vyRhaVJ3JN15eCIDNIji+JIu/XjOl87iPgd1q4X8uncB5EwoZXZhBTrq6vt1uR6bncZLDzaet7aypbaamsY2atkWclwQvHiwktRkuQ7J0yWeslqMozHRw68ldx6gCOXy0hWueXMae6iZunDOMzQfr+GDjYe45Z3S3/1eR5MlFu3hm8W5cUvL1E4d2/4ZeJuoWgZTykJRytXG/HtgCFHf9Lk1EMSfFjCLvhjTh4BGC8H6EPSXZZsWS5IBT74UL/qiCuzvnc8PMofzA2JnNl9nDs0lxNSB8LZ/A9tugMp9a61TVc08J6Jx56wxVGzF73PDQJhYhSB5xMmek7mDR1oNYcVFaXEBBZscePpdNKWZ4bhrf+aCOZpnETSMaSEu28b+XHU+r0011bQP769q544wR5Ad5vx9mbKiu6w1qkmwWLptSzMeby6lpbIPqXQA8tMrFhOJM3vnBXDb95hxe/+4cP0Hujtkjcj0i4PlTpOczdWAbb35/Lp/dfRrrf3k29044Sotw8IvlkgfWqc/0xBkWzptQyAMfbGXV3hD3gjZ4eP52Dta2MO/WmfzyovFcNGkQu6sa2V7RSc1ED3hz7QEeM5IZOuOzr5SFdf87W9hTFcHd4npITIPFQohhwBSgQ4WMEOI2IcRKIcTKykq9c1FEsaeoeoCu3EJd0QPXUMSYdK2axD57sPNW0S1HAenv7jELvHxjBGZ8YGjX8YEuMdMe640gZxfpo50ydDaprZX8ZLJq4jdjZPD/F5vVwl1njaJdWqhJH0lhkwpMH5eXzvdOG4F0O3E4HHxrdgj/L559CbrpOdTexrXjknG6JG+sOQDVO3Da0lhfm8wPzxrFhOKsbq2AkAnoNySEYFD9BhxDT+DTu8/gpR9dBml55DVs4/dXTqR4QArf//caJVAhUFbTxH9WlnHNjMHMOk5Vep89rgAh4MONQdqJ94CdlQ3c/Z/1PPjhNhZsDd4yY/+RJnZUNHDL3FJsVsGP/rOuQ21MtImZEAgh0oFXgbuklB2qRKSUT0opp0spp+fl5XU8gebYSMvzK44KC7MXThRcQx2wJcGcO6FseefbWnoazvkIgcUKyVn+FsHepZA1pOeCCB1312qpU62gbWF05Rw6F4CbC9QqMnvAgE4PvWhiEa9/dzZFo09QKaSGGN5+6nAy7JITjysgyRbCz9r8zLXdCMH8XzNi3kmcVdTCyyvLcFfvYKe7kOOLB3g2RIoYgf2G2hpVmvLgExmak0ZpXrqq9zi0jkyHnb99fSrVDW3cMW8NTW3tnZ/X4JFPt2OxCL57qjfjKT/TwdQh2XywySsEVQ2tnPuXRapAMAzcbsl9r23AYbcwPC+Nn73h0/HVh4Xb1ML2mhlD+J9LJrBq7xGeWBSkQDCKxEQIhBB2lAi8KKV8LRZjSHi+9hyc/vOevTfKrqEOTLlBTRqLHgz+uqcFdUDlcsoAb4xAShUoPhZrAJSl4buJfWu9qiEIx9+cO1IJ8/aP1OPAug0fhBBMGZKNpWiisnyMFX2yzUqa1U1uVoiWSEYRCGvX7aidzbDmBXA28kvb82w9XMfRsi1scxZwxxkjI+9TN/sNmZbewTWqFmbwDO8xRZOhYgs4m5lQnMX9l01gyc4qrnx8mSq664S91Y28uvoA180YQmGWv0ifO76QTQfrKKtRabi/fnszWw/Xc/+7W2huc/kdu6uygScX7eT2F1Yx63fzueGZLzzpuy99WcaK3TX89IKx/OGKiRyobeahj7YRyMJtlZRkp3BcXhqXTB7E+ccX8qePvuK/X9/gGUO0iUXWkACeAbZIKf8U7etrDEqm99wiGHk2TP2Gd+etaGN3wOw7YPdnKtgbiNF5tEMmUMoAr0VQvVP1YeombbRbhFB/x81vQOVXyjUUjlvIPMfQ2d423YHdR4NROFHdmoV9YBSUhZj/YbWpWpG9S+DjX8JzF6h+Pr5sflOJzdiLKKlcxBX2ZWS1HqIhbShnjo2wNQBK3N1O7/+R2Yq8xGf/46JJShzKVW7JVdMH8+w3T6CspomLH1nMh5sOs3RHFR9sPMS76w9RVtOElJK/zt+BzSL47qkd+yOdM14F/D/cdJj5W8p5e91BzhxbQGV9Ky8s3+M5bndVIxc+spjfvreVzYfqmDokmzX7ajn34UU8vnAnv3t/CzOHD+Sq6YOZPmwg188cwvNL9/gV0bW2u1i6s4pTR+chhEAIwQNXTOSqEwargPxDC7nnlXVRF4RYZA3NAW4ANgghzKTg/5ZSvheDsWh6wrA56l8smX6j2irx3R/BZU9AgU/aYuBeBCYp2d4YwZdPA0KlyB4rlzwG866Fp89UXUYD+wyFwtA53grjLiwCDwXj1a5nhzfAmAvUc6HsWezLwOFKTPevVK1G9i5W9Scjz1Kvr/qHOubK5+Cp0/ht+dNYpGTCxGm9k2Hj29Y7daDqAZUz0l/Qiyap20NroUQVJ542Jp/XvzeHW/+5km+/0LFDaUFmMpX1rdw4pzRoEH1ITirjijJ5Y+0BqhtU3cffvj6VW/65kscX7uS6E4eSZLVwx7w1JNksvHfHSQzLVf9HB2qbuffV9fz+g60k2Sz87vKJnr/NPeeO4ePN5fzklfW8+f05OOxWVu45QlObi1NGeYU002Hnt5cdzw9OH8ETn+1i3op9vLb6AFefMJgfnD6ygwXTG0RdCKSUi4Ho5Wlp+idJaXDRX+CtH8Df56g+RnP/S/m+O3MNOQaoPvwVW2DFk0pMIuHeGjwDbv0U5l0DFZth2Enhn2Ooj7CGIgRJqaq695DRasLtVivlcITgsr+rv0fh8UpU/jYTPvxvGH6qKhrbtxTO/LVqW3HhwyQ9fQYAxx8/NfRrhINvvOVoGeycD6fc63/MgCHq//XQWuW6qt0HjixG5Bfy1vfn8OWeGlKTbGQ67LjckjVlR1i55wiH61q4/ZTOu6WeO6GQP338FULAo9fNJslm4UdnjeKSx5bw3OLdNLS28//t3XtwVNUdwPHvLwlEngaEAIISQBQJIC+VpwpUBWyFKpZaVEQcxxZHrHZ8tLa2MjJqtVU7VLAigiA44gOl1ioRocxUJCAqBZGnEgQBFWy0oOCvf/zOyibkASHLwr2/z8xO9t69u3vOns357T33PN7fvItJV3b7PggANM+pxbRrzmLO8k+om51Fq6TH6h9Xg3sv6cSoJ5cw/pVV3D2kA2+u3kbNzAx6tTnwbLrZ8bX4/cX5XH9uGybMX8vqwnl8snwU6380iV7dux3eZ1sJH1nsjl3th1ilu+A++4W/dIpVaJnZgBw4FXRi1tV/3GbNN/3urL60NGgJo1+zif6a5B/683PbW6DavfPgmobAKvBNS+x+YlK2jEP4l65/ot0SLrzHglnhE1bBZtSAziPssRbdkDOvhWXTyGhcjbOOJksEgs/WwMIHbZW4vjeXPEbEzgremWELHYFdmL9qDvVO7kH/diXnVurY4vgSAw7LMygEgpE9875fae+Mk3I4v30T/vrmOv73rQ3ySzQjlUySMLRL2T3g+7XL5do+rXh80QZ6tWnEgg+30yuvDnW+LoJN62zt502L7aL9KQOgwzCaNmzNOJkIWU+xs0YumfVT373UA4E7ttVuCIPug7Oug3VvWLNC8Vb75VhqUjRq5djU2RsWwOAH9i8WU12y68Glf6vaczMy7DrB6lfKnoa6LE07wornrPdTwd22L6eSOZwqcupAm8ph/nircNsNLrG6HQPvhZ5jDgyw1SXRNFQwzq61DJ++fzbdZL3H2plfTku7Lbwfnh5ugbj0lOtlKd5uI/CTmrfaNqnHyzf0oV2zktd3bj7/VF5f+Sltc+vyu/zP4KFOFnh6jrFp1TMy7SL/u7MsTRfcU3KZUeDWs2txwqp/UfvZPzKLNTTcVQwPhwclA3Lz7YdE4RRYPNEu4otArxvJOfe2ktPAp4gHAhcNJ7SpfKGU40K3zNx86DYq9Wk6VK37wZrX96ezMk072t8pg6yr7MV/gQ6HMUpaBC4cD5P6gn4H3a4u+XhmVmq7DGfXt0p2907rDNCinOaQNv3tlnDy2TD5Aph+qS3gs/U9WD4TdqyGy6ZC06RpUv49wZq/GrSC/KHQ9kI7S9y2ko5fboasa/Z/rsDpzeoz7equdFk/keynH7bvWFY2vHwjvDHOejh9vcO6Ju/ZZZ/bwHvts9xTDHPGUHPli/wc+Jgm/HPfmZzfqzuNmuXZeJgTu+yfqXb3Lvjg79ZpoNuokte9UkyOlrkuKtK9e3ctLCxMdzLcse792fDcaBg5F1pVoR0/1fbttbb5xqdWfixYBfZYP2h1jk0HUtX5kkp79dewcSFct/CAX7cp90hXq0SvX2QDHw/WlndhymCbdResmUnVrpuMnGuV6jvTYc4YC7hgq85pUvfQzGyr5C+fCXk2toPtq+05RUugyxUw6H5rutuw0K4zZWTZIMdTBljvq7cm2HWNTj+BWSMsGPX9FXQaTsH2esxb9Snjf9zxiE1nISJLVbV7pcd5IHCxsW+vLUDfKEVt3FGhemjjIKrT1hXWhFeVQX4bF1kTTfshVtnv/AievMi61fYeC/Pusgvhl8+yCv+rsGZ0vabWpLSn2FZ/+2KjreOxY42NValZx5oSK5uTShXm3ADLp1uwyMq2Hldt+lXhg6geHgicc27HWgsGxVtttb6rXqy4V9bXn8OMy2BzqG/yL7FrUKXX/S7Pvr0w5xc2TmXYZFtHIY08EDjnHFgwWDoF+t5ycM1n33xlF8zz+sBpg1KfvhTyQOCcczF3sIHAl6p0zrmY80DgnHMx54HAOedizgOBc87FnAcC55yLOQ8EzjkXcx4InHMu5jwQOOdczB0TA8pEZDvwURWf3gjYUY3JOVbEMd9xzDPEM99xzDMcer5bqmrjyg46JgLB4RCRwoMZWRc1ccx3HPMM8cx3HPMMqcu3Nw0551zMeSBwzrmYi0MgeCzdCUiTOOY7jnmGeOY7jnmGFOU78tcInHPOVSwOZwTOOecq4IHAOediLtKBQEQGishqEVkrIrenOz2pICInich8EVklIv8RkbFhf0MReV1E1oS/DdKd1uomIpki8o6IzA3brURkccjzMyJSM91prG4ikiMis0Xkg1DmPaNe1iLyy/DdXiEiM0XkuCiWtYg8ISLbRGRF0r4yy1bMI6Fue09Euh7Oe0c2EIhIJjABGAS0By4XkfbpTVVK7AVuUdXTgR7AmJDP24ECVW0LFITtqBkLrEravg/4c8jzF8DotKQqtR4GXlXVdsAZWP4jW9Yi0hy4Eeiuqh2ATOCnRLOsnwQGltpXXtkOAtqG23XAo4fzxpENBMBZwFpVXa+q3wCzgCFpTlO1U9Utqros3P8vVjE0x/I6NRw2FRianhSmhoi0AC4CHg/bAvQHZodDopjn+sA5wGQAVf1GVXcS8bIGsoBaIpIF1Aa2EMGyVtWFwOeldpdXtkOAaWreAnJEpFlV3zvKgaA5sClpuyjsiywRyQO6AIuBJqq6BSxYALnpS1lKPATcCnwXtk8Adqrq3rAdxfJuDWwHpoQmscdFpA4RLmtV3Qw8AHyMBYBdwFKiX9YJ5ZVttdZvUQ4EUsa+yPaVFZG6wHPATar6ZbrTk0oi8kNgm6ouTd5dxqFRK+8soCvwqKp2Ab4iQs1AZQlt4kOAVsCJQB2sWaS0qJV1Zar1+x7lQFAEnJS03QL4JE1pSSkRqYEFgRmq+nzY/WniVDH83Zau9KVAb+BiEdmINfn1x84QckLzAUSzvIuAIlVdHLZnY4EhymX9A2CDqm5X1W+B54FeRL+sE8or22qt36IcCJYAbUPvgprYBaaX0pymahfaxicDq1T1T0kPvQSMDPdHAnOOdNpSRVXvUNUWqpqHlesbqjoCmA8MC4dFKs8AqroV2CQip4VdA4CVRLissSahHiJSO3zXE3mOdFknKa9sXwKuCr2HegC7Ek1IVaKqkb0Bg4EPgXXAb9KdnhTlsQ92SvgesDzcBmNt5gXAmvC3YbrTmqL8nwfMDfdbA28Da4Fngex0py8F+e0MFIbyfhFoEPWyBv4AfACsAJ4CsqNY1sBM7DrIt9gv/tHllS3WNDQh1G3vY72qqvzePsWEc87FXJSbhpxzzh0EDwTOORdzHgiccy7mPBA451zMeSBwzrmY80DgXIqJyHmJGVKdOxp5IHDOuZjzQOBcICJXiMjbIrJcRCaF9Q6KReRBEVkmIgUi0jgc21lE3gpzwb+QNE/8KSIyT0TeDc9pE16+btI6AjPCKFnnjgoeCJwDROR0YDjQW1U7A/uAEdgkZ8tUtSuwALgrPGUacJuqdsJGdib2zwAmqOoZ2Jw4iWH/XYCbsLUxWmPzJTl3VMiq/BDnYmEA0A1YEn6s18Im+PoOeCYcMx14XkSOB3JUdUHYPxV4VkTqAc1V9QUAVd0NEF7vbVUtCtvLgTxgUeqz5VzlPBA4ZwSYqqp3lNgp8ttSx1U0J0tFzT17ku7vw//33FHEm4acMwXAMBHJhe/Xim2J/Y8kZrn8GbBIVXcBX4hI37D/SmCB2joQRSIyNLxGtojUPqK5cK4K/FeJc4CqrhSRO4HXRCQDmwFyDLb4S76ILMVWxxoenjISmBgq+vXAqLD/SmCSiNwdXuOyI5gN56rEZx91rgIiUqyqddOdDudSyZuGnHMu5vyMwDnnYs7PCJxzLuY8EDjnXMx5IHDOuZjzQOCcczHngcA552Lu/77AmefMQ0nPAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "experimento_ssd7_panel.h5\n" + ] + } + ], + "source": [ + "#Graficar aprendizaje\n", + "\n", + "history_path =config['train']['saved_weights_name'].split('.')[0] + '_history'\n", + "\n", + "hist_load = np.load(history_path + '.npy',allow_pickle=True).item()\n", + "\n", + "print(hist_load.keys())\n", + "\n", + "# summarize history for loss\n", + "plt.plot(hist_load['loss'])\n", + "plt.plot(hist_load['val_loss'])\n", + "plt.title('model loss')\n", + "plt.ylabel('loss')\n", + "plt.xlabel('epoch')\n", + "plt.legend(['train', 'test'], loc='upper left')\n", + "plt.show()\n", + "\n", + "print(config['train']['saved_weights_name'])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Evaluación del Modelo" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "config_path = 'config_7_panel.json'\n", + "\n", + "with open(config_path) as config_buffer:\n", + " config = json.loads(config_buffer.read())\n", + "\n", + " \n", + "model_mode = 'training'\n", + "# TODO: Set the path to the `.h5` file of the model to be loaded.\n", + "model_path = config['train']['saved_weights_name']\n", + "\n", + "# We need to create an SSDLoss object in order to pass that to the model loader.\n", + "ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n", + "\n", + "K.clear_session() # Clear previous models from memory.\n", + "\n", + "model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n", + " 'L2Normalization': L2Normalization,\n", + " 'DecodeDetections': DecodeDetections,\n", + " 'compute_loss': ssd_loss.compute_loss})\n", + "\n", + "\n", + " \n", + "train_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n", + "val_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n", + "\n", + "# 2: Parse the image and label lists for the training and validation datasets. This can take a while.\n", + "\n", + "\n", + "\n", + "# The XML parser needs to now what object class names to look for and in which order to map them to integers.\n", + "classes = ['background' ] + labels\n", + "\n", + "train_dataset.parse_xml(images_dirs= [config['train']['train_image_folder']],\n", + " image_set_filenames=[config['train']['train_image_set_filename']],\n", + " annotations_dirs=[config['train']['train_annot_folder']],\n", + " classes=classes,\n", + " include_classes='all',\n", + " #classes = ['background', 'panel', 'cell'], \n", + " #include_classes=classes,\n", + " exclude_truncated=False,\n", + " exclude_difficult=False,\n", + " ret=False)\n", + "\n", + "val_dataset.parse_xml(images_dirs= [config['test']['test_image_folder']],\n", + " image_set_filenames=[config['test']['test_image_set_filename']],\n", + " annotations_dirs=[config['test']['test_annot_folder']],\n", + " classes=classes,\n", + " include_classes='all',\n", + " #classes = ['background', 'panel', 'cell'], \n", + " #include_classes=classes,\n", + " exclude_truncated=False,\n", + " exclude_difficult=False,\n", + " ret=False)\n", + "\n", + "#########################\n", + "# 3: Set the batch size.\n", + "#########################\n", + "batch_size = config['train']['batch_size'] # Change the batch size if you like, or if you run into GPU memory issues.\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "evaluator = Evaluator(model=model,\n", + " n_classes=n_classes,\n", + " data_generator=val_dataset,\n", + " model_mode='training')\n", + "\n", + "results = evaluator(img_height=img_height,\n", + " img_width=img_width,\n", + " batch_size=4,\n", + " data_generator_mode='resize',\n", + " round_confidences=False,\n", + " matching_iou_threshold=0.5,\n", + " border_pixels='include',\n", + " sorting_algorithm='quicksort',\n", + " average_precision_mode='sample',\n", + " num_recall_points=11,\n", + " ignore_neutral_boxes=True,\n", + " return_precisions=True,\n", + " return_recalls=True,\n", + " return_average_precisions=True,\n", + " verbose=True)\n", + "\n", + "mean_average_precision, average_precisions, precisions, recalls = results\n", + "total_instances = []\n", + "precisions = []\n", + "\n", + "for i in range(1, len(average_precisions)):\n", + " \n", + " print('{:.0f} instances of class'.format(len(recalls[i])),\n", + " classes[i], 'with average precision: {:.4f}'.format(average_precisions[i]))\n", + " total_instances.append(len(recalls[i]))\n", + " precisions.append(average_precisions[i])\n", + "\n", + "if sum(total_instances) == 0:\n", + " \n", + " print('No test instances found.')\n", + "\n", + "else:\n", + "\n", + " print('mAP using the weighted average of precisions among classes: {:.4f}'.format(sum([a * b for a, b in zip(total_instances, precisions)]) / sum(total_instances)))\n", + " print('mAP: {:.4f}'.format(sum(precisions) / sum(x > 0 for x in total_instances)))\n", + "\n", + " for i in range(1, len(average_precisions)):\n", + " print(\"{:<14}{:<6}{}\".format(classes[i], 'AP', round(average_precisions[i], 3)))\n", + " print()\n", + " print(\"{:<14}{:<6}{}\".format('','mAP', round(mean_average_precision, 3)))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Cargar nuevamente el modelo desde los pesos.\n", + "Predicción" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Training on: \t{'panel': 1}\n", + "\n", + "__________________________________________________________________________________________________\n", + "Layer (type) Output Shape Param # Connected to \n", + "==================================================================================================\n", + "input_1 (InputLayer) (None, 400, 400, 3) 0 \n", + "__________________________________________________________________________________________________\n", + "identity_layer (Lambda) (None, 400, 400, 3) 0 input_1[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv1 (Conv2D) (None, 400, 400, 32) 2432 identity_layer[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn1 (BatchNormalization) (None, 400, 400, 32) 128 conv1[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu1 (ELU) (None, 400, 400, 32) 0 bn1[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool1 (MaxPooling2D) (None, 200, 200, 32) 0 elu1[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2 (Conv2D) (None, 200, 200, 48) 13872 pool1[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn2 (BatchNormalization) (None, 200, 200, 48) 192 conv2[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu2 (ELU) (None, 200, 200, 48) 0 bn2[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool2 (MaxPooling2D) (None, 100, 100, 48) 0 elu2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv3 (Conv2D) (None, 100, 100, 64) 27712 pool2[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn3 (BatchNormalization) (None, 100, 100, 64) 256 conv3[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu3 (ELU) (None, 100, 100, 64) 0 bn3[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool3 (MaxPooling2D) (None, 50, 50, 64) 0 elu3[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv4 (Conv2D) (None, 50, 50, 64) 36928 pool3[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn4 (BatchNormalization) (None, 50, 50, 64) 256 conv4[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu4 (ELU) (None, 50, 50, 64) 0 bn4[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool4 (MaxPooling2D) (None, 25, 25, 64) 0 elu4[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv5 (Conv2D) (None, 25, 25, 48) 27696 pool4[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn5 (BatchNormalization) (None, 25, 25, 48) 192 conv5[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu5 (ELU) (None, 25, 25, 48) 0 bn5[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool5 (MaxPooling2D) (None, 12, 12, 48) 0 elu5[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv6 (Conv2D) (None, 12, 12, 48) 20784 pool5[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn6 (BatchNormalization) (None, 12, 12, 48) 192 conv6[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu6 (ELU) (None, 12, 12, 48) 0 bn6[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool6 (MaxPooling2D) (None, 6, 6, 48) 0 elu6[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv7 (Conv2D) (None, 6, 6, 32) 13856 pool6[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn7 (BatchNormalization) (None, 6, 6, 32) 128 conv7[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu7 (ELU) (None, 6, 6, 32) 0 bn7[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes4 (Conv2D) (None, 50, 50, 8) 4616 elu4[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes5 (Conv2D) (None, 25, 25, 8) 3464 elu5[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes6 (Conv2D) (None, 12, 12, 8) 3464 elu6[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes7 (Conv2D) (None, 6, 6, 8) 2312 elu7[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes4 (Conv2D) (None, 50, 50, 16) 9232 elu4[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes5 (Conv2D) (None, 25, 25, 16) 6928 elu5[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes6 (Conv2D) (None, 12, 12, 16) 6928 elu6[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes7 (Conv2D) (None, 6, 6, 16) 4624 elu7[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes4_reshape (Reshape) (None, 10000, 2) 0 classes4[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes5_reshape (Reshape) (None, 2500, 2) 0 classes5[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes6_reshape (Reshape) (None, 576, 2) 0 classes6[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes7_reshape (Reshape) (None, 144, 2) 0 classes7[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors4 (AnchorBoxes) (None, 50, 50, 4, 8) 0 boxes4[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors5 (AnchorBoxes) (None, 25, 25, 4, 8) 0 boxes5[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors6 (AnchorBoxes) (None, 12, 12, 4, 8) 0 boxes6[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors7 (AnchorBoxes) (None, 6, 6, 4, 8) 0 boxes7[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes_concat (Concatenate) (None, 13220, 2) 0 classes4_reshape[0][0] \n", + " classes5_reshape[0][0] \n", + " classes6_reshape[0][0] \n", + " classes7_reshape[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes4_reshape (Reshape) (None, 10000, 4) 0 boxes4[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes5_reshape (Reshape) (None, 2500, 4) 0 boxes5[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes6_reshape (Reshape) (None, 576, 4) 0 boxes6[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes7_reshape (Reshape) (None, 144, 4) 0 boxes7[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors4_reshape (Reshape) (None, 10000, 8) 0 anchors4[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors5_reshape (Reshape) (None, 2500, 8) 0 anchors5[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors6_reshape (Reshape) (None, 576, 8) 0 anchors6[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors7_reshape (Reshape) (None, 144, 8) 0 anchors7[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes_softmax (Activation) (None, 13220, 2) 0 classes_concat[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes_concat (Concatenate) (None, 13220, 4) 0 boxes4_reshape[0][0] \n", + " boxes5_reshape[0][0] \n", + " boxes6_reshape[0][0] \n", + " boxes7_reshape[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors_concat (Concatenate) (None, 13220, 8) 0 anchors4_reshape[0][0] \n", + " anchors5_reshape[0][0] \n", + " anchors6_reshape[0][0] \n", + " anchors7_reshape[0][0] \n", + "__________________________________________________________________________________________________\n", + "predictions (Concatenate) (None, 13220, 14) 0 classes_softmax[0][0] \n", + " boxes_concat[0][0] \n", + " anchors_concat[0][0] \n", + "==================================================================================================\n", + "Total params: 186,192\n", + "Trainable params: 185,520\n", + "Non-trainable params: 672\n", + "__________________________________________________________________________________________________\n" + ] + } + ], + "source": [ + "#############################\n", + "####Prediction\n", + "#############################\n", + "\n", + "from imageio import imread\n", + "from keras.preprocessing import image\n", + "import time\n", + "\n", + "def makedirs(path):\n", + " try:\n", + " os.makedirs(path)\n", + " except OSError:\n", + " if not os.path.isdir(path):\n", + " raise\n", + "\n", + "\n", + "config_path = 'config_7_panel.json'\n", + "input_path = ['panel_jpg/Mision_1/', 'panel_jpg/Mision_2/']\n", + "output_path = 'result_ssd7_panel/'\n", + "\n", + "with open(config_path) as config_buffer:\n", + " config = json.loads(config_buffer.read())\n", + "\n", + "makedirs(output_path)\n", + "###############################\n", + "# Parse the annotations\n", + "###############################\n", + "score_threshold = 0.5\n", + "score_threshold_iou = 0.5\n", + "labels = config['model']['labels']\n", + "categories = {}\n", + "#categories = {\"Razor\": 1, \"Gun\": 2, \"Knife\": 3, \"Shuriken\": 4} #la categoría 0 es la background\n", + "for i in range(len(labels)): categories[labels[i]] = i+1\n", + "print('\\nTraining on: \\t' + str(categories) + '\\n')\n", + "\n", + "img_height = config['model']['input'] # Height of the model input images\n", + "img_width = config['model']['input'] # Width of the model input images\n", + "img_channels = 3 # Number of color channels of the model input images\n", + "n_classes = len(labels) # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO\n", + "classes = ['background'] + labels\n", + "\n", + "model_mode = 'training'\n", + "# TODO: Set the path to the `.h5` file of the model to be loaded.\n", + "model_path = config['train']['saved_weights_name']\n", + "\n", + "# We need to create an SSDLoss object in order to pass that to the model loader.\n", + "ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n", + "\n", + "K.clear_session() # Clear previous models from memory.\n", + "\n", + "model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n", + " 'L2Normalization': L2Normalization,\n", + " 'DecodeDetections': DecodeDetections,\n", + " 'compute_loss': ssd_loss.compute_loss})\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "model.summary()\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Tiempo Total: 1.040\n", + "Tiempo promedio por imagen: 0.104\n", + "OK\n" + ] + } + ], + "source": [ + "image_paths = []\n", + "for inp in input_path:\n", + " if os.path.isdir(inp):\n", + " for inp_file in os.listdir(inp):\n", + " image_paths += [inp + inp_file]\n", + " else:\n", + " image_paths += [inp]\n", + "\n", + "image_paths = [inp_file for inp_file in image_paths if (inp_file[-4:] in ['.jpg', '.png', 'JPEG'])]\n", + "times = []\n", + "\n", + "\n", + "for img_path in image_paths:\n", + " orig_images = [] # Store the images here.\n", + " input_images = [] # Store resized versions of the images here.\n", + " #print(img_path)\n", + "\n", + " # preprocess image for network\n", + " orig_images.append(imread(img_path))\n", + " img = image.load_img(img_path, target_size=(img_height, img_width))\n", + " img = image.img_to_array(img)\n", + " input_images.append(img)\n", + " input_images = np.array(input_images)\n", + " # process image\n", + " start = time.time()\n", + " y_pred = model.predict(input_images)\n", + " y_pred_decoded = decode_detections(y_pred,\n", + " confidence_thresh=score_threshold,\n", + " iou_threshold=score_threshold_iou,\n", + " top_k=200,\n", + " normalize_coords=True,\n", + " img_height=img_height,\n", + " img_width=img_width)\n", + "\n", + "\n", + " #print(\"processing time: \", time.time() - start)\n", + " times.append(time.time() - start)\n", + " # correct for image scale\n", + "\n", + " # visualize detections\n", + " # Set the colors for the bounding boxes\n", + " colors = plt.cm.brg(np.linspace(0, 1, 21)).tolist()\n", + "\n", + " plt.figure(figsize=(20,12))\n", + " plt.imshow(orig_images[0],cmap = 'gray')\n", + "\n", + " current_axis = plt.gca()\n", + " #print(y_pred)\n", + " for box in y_pred_decoded[0]:\n", + " # Transform the predicted bounding boxes for the 300x300 image to the original image dimensions.\n", + "\n", + " xmin = box[2] * orig_images[0].shape[1] / img_width\n", + " ymin = box[3] * orig_images[0].shape[0] / img_height\n", + " xmax = box[4] * orig_images[0].shape[1] / img_width\n", + " ymax = box[5] * orig_images[0].shape[0] / img_height\n", + "\n", + " color = colors[int(box[0])]\n", + " label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])\n", + " current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color=color, fill=False, linewidth=2))\n", + " current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':color, 'alpha':1.0})\n", + "\n", + " #plt.figure(figsize=(15, 15))\n", + " #plt.axis('off')\n", + " save_path = output_path + img_path.split('/')[-1]\n", + " plt.savefig(save_path)\n", + " plt.close()\n", + " \n", + "file = open(output_path + 'time.txt','w')\n", + "\n", + "file.write('Tiempo promedio:' + str(np.mean(times)))\n", + "\n", + "file.close()\n", + "print('Tiempo Total: {:.3f}'.format(np.sum(times)))\n", + "print('Tiempo promedio por imagen: {:.3f}'.format(np.mean(times)))\n", + "print('OK')" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "panel : 69\n", + "cell : 423\n" + ] + } + ], + "source": [ + "\n", + "# Summary instance training\n", + "category_train_list = []\n", + "for image_label in train_dataset.labels:\n", + " category_train_list += [i[0] for i in train_dataset.labels[0]]\n", + "summary_category_training = {train_dataset.classes[i]: category_train_list.count(i) for i in list(set(category_train_list))}\n", + "for i in summary_category_training.keys():\n", + " print(i, ': {:.0f}'.format(summary_category_training[i]))\n" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "__________________________________________________________________________________________________\n", + "Layer (type) Output Shape Param # Connected to \n", + "==================================================================================================\n", + "input_1 (InputLayer) (None, 400, 400, 3) 0 \n", + "__________________________________________________________________________________________________\n", + "identity_layer (Lambda) (None, 400, 400, 3) 0 input_1[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv1 (Conv2D) (None, 400, 400, 32) 2432 identity_layer[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn1 (BatchNormalization) (None, 400, 400, 32) 128 conv1[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu1 (ELU) (None, 400, 400, 32) 0 bn1[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool1 (MaxPooling2D) (None, 200, 200, 32) 0 elu1[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2 (Conv2D) (None, 200, 200, 48) 13872 pool1[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn2 (BatchNormalization) (None, 200, 200, 48) 192 conv2[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu2 (ELU) (None, 200, 200, 48) 0 bn2[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool2 (MaxPooling2D) (None, 100, 100, 48) 0 elu2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv3 (Conv2D) (None, 100, 100, 64) 27712 pool2[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn3 (BatchNormalization) (None, 100, 100, 64) 256 conv3[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu3 (ELU) (None, 100, 100, 64) 0 bn3[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool3 (MaxPooling2D) (None, 50, 50, 64) 0 elu3[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv4 (Conv2D) (None, 50, 50, 64) 36928 pool3[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn4 (BatchNormalization) (None, 50, 50, 64) 256 conv4[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu4 (ELU) (None, 50, 50, 64) 0 bn4[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool4 (MaxPooling2D) (None, 25, 25, 64) 0 elu4[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv5 (Conv2D) (None, 25, 25, 48) 27696 pool4[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn5 (BatchNormalization) (None, 25, 25, 48) 192 conv5[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu5 (ELU) (None, 25, 25, 48) 0 bn5[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool5 (MaxPooling2D) (None, 12, 12, 48) 0 elu5[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv6 (Conv2D) (None, 12, 12, 48) 20784 pool5[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn6 (BatchNormalization) (None, 12, 12, 48) 192 conv6[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu6 (ELU) (None, 12, 12, 48) 0 bn6[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool6 (MaxPooling2D) (None, 6, 6, 48) 0 elu6[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv7 (Conv2D) (None, 6, 6, 32) 13856 pool6[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn7 (BatchNormalization) (None, 6, 6, 32) 128 conv7[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu7 (ELU) (None, 6, 6, 32) 0 bn7[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes4 (Conv2D) (None, 50, 50, 12) 6924 elu4[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes5 (Conv2D) (None, 25, 25, 12) 5196 elu5[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes6 (Conv2D) (None, 12, 12, 12) 5196 elu6[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes7 (Conv2D) (None, 6, 6, 12) 3468 elu7[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes4 (Conv2D) (None, 50, 50, 16) 9232 elu4[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes5 (Conv2D) (None, 25, 25, 16) 6928 elu5[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes6 (Conv2D) (None, 12, 12, 16) 6928 elu6[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes7 (Conv2D) (None, 6, 6, 16) 4624 elu7[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes4_reshape (Reshape) (None, 10000, 3) 0 classes4[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes5_reshape (Reshape) (None, 2500, 3) 0 classes5[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes6_reshape (Reshape) (None, 576, 3) 0 classes6[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes7_reshape (Reshape) (None, 144, 3) 0 classes7[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors4 (AnchorBoxes) (None, 50, 50, 4, 8) 0 boxes4[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors5 (AnchorBoxes) (None, 25, 25, 4, 8) 0 boxes5[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors6 (AnchorBoxes) (None, 12, 12, 4, 8) 0 boxes6[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors7 (AnchorBoxes) (None, 6, 6, 4, 8) 0 boxes7[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes_concat (Concatenate) (None, 13220, 3) 0 classes4_reshape[0][0] \n", + " classes5_reshape[0][0] \n", + " classes6_reshape[0][0] \n", + " classes7_reshape[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes4_reshape (Reshape) (None, 10000, 4) 0 boxes4[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes5_reshape (Reshape) (None, 2500, 4) 0 boxes5[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes6_reshape (Reshape) (None, 576, 4) 0 boxes6[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes7_reshape (Reshape) (None, 144, 4) 0 boxes7[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors4_reshape (Reshape) (None, 10000, 8) 0 anchors4[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors5_reshape (Reshape) (None, 2500, 8) 0 anchors5[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors6_reshape (Reshape) (None, 576, 8) 0 anchors6[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors7_reshape (Reshape) (None, 144, 8) 0 anchors7[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes_softmax (Activation) (None, 13220, 3) 0 classes_concat[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes_concat (Concatenate) (None, 13220, 4) 0 boxes4_reshape[0][0] \n", + " boxes5_reshape[0][0] \n", + " boxes6_reshape[0][0] \n", + " boxes7_reshape[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors_concat (Concatenate) (None, 13220, 8) 0 anchors4_reshape[0][0] \n", + " anchors5_reshape[0][0] \n", + " anchors6_reshape[0][0] \n", + " anchors7_reshape[0][0] \n", + "__________________________________________________________________________________________________\n", + "predictions (Concatenate) (None, 13220, 15) 0 classes_softmax[0][0] \n", + " boxes_concat[0][0] \n", + " anchors_concat[0][0] \n", + "==================================================================================================\n", + "Total params: 193,120\n", + "Trainable params: 192,448\n", + "Non-trainable params: 672\n", + "__________________________________________________________________________________________________\n" + ] + } + ], + "source": [ + "\n", + "\n", + "\n", + "model.summary()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/Primer_reslutado_panel/Panel_Detector.ipynb b/Primer_reslutado_panel/Panel_Detector.ipynb new file mode 100644 index 0000000..5a6e474 --- /dev/null +++ b/Primer_reslutado_panel/Panel_Detector.ipynb @@ -0,0 +1,1896 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Detector de Paneles" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Cargar el modelo ssd7 \n", + "(https://github.com/pierluigiferrari/ssd_keras#how-to-fine-tune-one-of-the-trained-models-on-your-own-dataset)\n", + "\n", + "Training del SSD7 (modelo reducido de SSD). Parámetros en config_7.json y descargar VGG_ILSVRC_16_layers_fc_reduced.h5\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Using TensorFlow backend.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Training on: \t{'panel': 1}\n", + "\n", + "\n", + "Loading pretrained weights.\n", + "\n", + "WARNING:tensorflow:From /home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Colocations handled automatically by placer.\n", + "WARNING:tensorflow:From /home/dl-desktop/Desktop/Rentadrone/ssd_keras-master/keras_loss_function/keras_ssd_loss.py:133: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use tf.cast instead.\n", + "WARNING:tensorflow:From /home/dl-desktop/Desktop/Rentadrone/ssd_keras-master/keras_loss_function/keras_ssd_loss.py:166: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use tf.cast instead.\n", + "WARNING:tensorflow:From /home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/math_grad.py:102: div (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Deprecated in favor of operator or tf.math.divide.\n" + ] + }, + { + "ename": "ResourceExhaustedError", + "evalue": "OOM when allocating tensor with shape[48] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc\n\t [[node training/Adam/Variable_6/Assign (defined at /home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:402) ]]\nHint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.\n\n\nCaused by op 'training/Adam/Variable_6/Assign', defined at:\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel_launcher.py\", line 16, in \n app.launch_new_instance()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/traitlets/config/application.py\", line 658, in launch_instance\n app.start()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelapp.py\", line 505, in start\n self.io_loop.start()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 148, in start\n self.asyncio_loop.run_forever()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/asyncio/base_events.py\", line 438, in run_forever\n self._run_once()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/asyncio/base_events.py\", line 1451, in _run_once\n handle._run()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/asyncio/events.py\", line 145, in _run\n self._callback(*self._args)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/ioloop.py\", line 690, in \n lambda f: self._run_callback(functools.partial(callback, future))\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n ret = callback()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 781, in inner\n self.run()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 742, in run\n yielded = self.gen.send(value)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 357, in process_one\n yield gen.maybe_future(dispatch(*args))\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 267, in dispatch_shell\n yield gen.maybe_future(handler(stream, idents, msg))\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 534, in execute_request\n user_expressions, allow_stdin,\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/ipkernel.py\", line 294, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/zmqshell.py\", line 536, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2848, in run_cell\n raw_cell, store_history, silent, shell_futures)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2874, in _run_cell\n return runner(coro)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/async_helpers.py\", line 67, in _pseudo_sync_runner\n coro.send(None)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 3049, in run_cell_async\n interactivity=interactivity, compiler=compiler, result=result)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 3214, in run_ast_nodes\n if (yield from self.run_code(code, result)):\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 3296, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)\n File \"\", line 124, in \n 'compute_loss': ssd_loss.compute_loss})\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/engine/saving.py\", line 419, in load_model\n model = _deserialize_model(f, custom_objects, compile)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/engine/saving.py\", line 317, in _deserialize_model\n model._make_train_function()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/engine/training.py\", line 509, in _make_train_function\n loss=self.total_loss)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/legacy/interfaces.py\", line 91, in wrapper\n return func(*args, **kwargs)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/optimizers.py\", line 487, in get_updates\n ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/optimizers.py\", line 487, in \n ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py\", line 704, in zeros\n return variable(v, dtype=dtype, name=name)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py\", line 402, in variable\n v = tf.Variable(value, dtype=tf.as_dtype(dtype), name=name)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variables.py\", line 213, in __call__\n return cls._variable_v1_call(*args, **kwargs)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variables.py\", line 176, in _variable_v1_call\n aggregation=aggregation)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variables.py\", line 155, in \n previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variable_scope.py\", line 2495, in default_variable_creator\n expected_shape=expected_shape, import_scope=import_scope)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variables.py\", line 217, in __call__\n return super(VariableMetaclass, cls).__call__(*args, **kwargs)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variables.py\", line 1395, in __init__\n constraint=constraint)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variables.py\", line 1547, in _init_from_args\n validate_shape=validate_shape).op\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/state_ops.py\", line 223, in assign\n validate_shape=validate_shape)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/gen_state_ops.py\", line 64, in assign\n use_locking=use_locking, name=name)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py\", line 788, in _apply_op_helper\n op_def=op_def)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py\", line 507, in new_func\n return func(*args, **kwargs)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3300, in create_op\n op_def=op_def)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 1801, in __init__\n self._traceback = tf_stack.extract_stack()\n\nResourceExhaustedError (see above for traceback): OOM when allocating tensor with shape[48] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc\n\t [[node training/Adam/Variable_6/Assign (defined at /home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:402) ]]\nHint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.\n\n", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mResourceExhaustedError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1333\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1334\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1335\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run_fn\u001b[0;34m(feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[1;32m 1318\u001b[0m return self._call_tf_sessionrun(\n\u001b[0;32m-> 1319\u001b[0;31m options, feed_dict, fetch_list, target_list, run_metadata)\n\u001b[0m\u001b[1;32m 1320\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_call_tf_sessionrun\u001b[0;34m(self, options, feed_dict, fetch_list, target_list, run_metadata)\u001b[0m\n\u001b[1;32m 1406\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1407\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1408\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mResourceExhaustedError\u001b[0m: OOM when allocating tensor with shape[48] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc\n\t [[{{node training/Adam/Variable_6/Assign}}]]\nHint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.\n", + "\nDuring handling of the above exception, another exception occurred:\n", + "\u001b[0;31mResourceExhaustedError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 122\u001b[0m model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n\u001b[1;32m 123\u001b[0m \u001b[0;34m'L2Normalization'\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mL2Normalization\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 124\u001b[0;31m 'compute_loss': ssd_loss.compute_loss})\n\u001b[0m\u001b[1;32m 125\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 126\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/engine/saving.py\u001b[0m in \u001b[0;36mload_model\u001b[0;34m(filepath, custom_objects, compile)\u001b[0m\n\u001b[1;32m 417\u001b[0m \u001b[0mf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mh5dict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilepath\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'r'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 418\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 419\u001b[0;31m \u001b[0mmodel\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_deserialize_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcustom_objects\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcompile\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 420\u001b[0m \u001b[0;32mfinally\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 421\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mopened_new_file\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/engine/saving.py\u001b[0m in \u001b[0;36m_deserialize_model\u001b[0;34m(f, custom_objects, compile)\u001b[0m\n\u001b[1;32m 323\u001b[0m optimizer_weight_names]\n\u001b[1;32m 324\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 325\u001b[0;31m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moptimizer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_weights\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moptimizer_weight_values\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 326\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 327\u001b[0m warnings.warn('Error in loading the saved optimizer '\n", + "\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/optimizers.py\u001b[0m in \u001b[0;36mset_weights\u001b[0;34m(self, weights)\u001b[0m\n\u001b[1;32m 124\u001b[0m 'of the optimizer (' + str(len(params)) + ')')\n\u001b[1;32m 125\u001b[0m \u001b[0mweight_value_tuples\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 126\u001b[0;31m \u001b[0mparam_values\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mK\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbatch_get_value\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 127\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mpv\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mp\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mw\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mzip\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mparam_values\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mweights\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 128\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mpv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0mw\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py\u001b[0m in \u001b[0;36mbatch_get_value\u001b[0;34m(ops)\u001b[0m\n\u001b[1;32m 2418\u001b[0m \"\"\"\n\u001b[1;32m 2419\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2420\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mget_session\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mops\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2421\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2422\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py\u001b[0m in \u001b[0;36mget_session\u001b[0;34m()\u001b[0m\n\u001b[1;32m 204\u001b[0m \u001b[0mv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_keras_initialized\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 205\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0muninitialized_vars\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 206\u001b[0;31m \u001b[0msession\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvariables_initializer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0muninitialized_vars\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 207\u001b[0m \u001b[0;31m# hack for list_devices() function.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 208\u001b[0m \u001b[0;31m# list_devices() function is not available under tensorflow r1.3.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 927\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 928\u001b[0m result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[0;32m--> 929\u001b[0;31m run_metadata_ptr)\n\u001b[0m\u001b[1;32m 930\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 931\u001b[0m \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run\u001b[0;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1150\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mhandle\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mfeed_dict_tensor\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1151\u001b[0m results = self._do_run(handle, final_targets, final_fetches,\n\u001b[0;32m-> 1152\u001b[0;31m feed_dict_tensor, options, run_metadata)\n\u001b[0m\u001b[1;32m 1153\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1154\u001b[0m \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_run\u001b[0;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1326\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1327\u001b[0m return self._do_call(_run_fn, feeds, fetches, targets, options,\n\u001b[0;32m-> 1328\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1329\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1330\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_prun_fn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeeds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetches\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1346\u001b[0m \u001b[0;32mpass\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1347\u001b[0m \u001b[0mmessage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0merror_interpolation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minterpolate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_graph\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1348\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnode_def\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmessage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1349\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1350\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_extend_graph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mResourceExhaustedError\u001b[0m: OOM when allocating tensor with shape[48] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc\n\t [[node training/Adam/Variable_6/Assign (defined at /home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:402) ]]\nHint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.\n\n\nCaused by op 'training/Adam/Variable_6/Assign', defined at:\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel_launcher.py\", line 16, in \n app.launch_new_instance()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/traitlets/config/application.py\", line 658, in launch_instance\n app.start()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelapp.py\", line 505, in start\n self.io_loop.start()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 148, in start\n self.asyncio_loop.run_forever()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/asyncio/base_events.py\", line 438, in run_forever\n self._run_once()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/asyncio/base_events.py\", line 1451, in _run_once\n handle._run()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/asyncio/events.py\", line 145, in _run\n self._callback(*self._args)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/ioloop.py\", line 690, in \n lambda f: self._run_callback(functools.partial(callback, future))\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n ret = callback()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 781, in inner\n self.run()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 742, in run\n yielded = self.gen.send(value)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 357, in process_one\n yield gen.maybe_future(dispatch(*args))\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 267, in dispatch_shell\n yield gen.maybe_future(handler(stream, idents, msg))\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 534, in execute_request\n user_expressions, allow_stdin,\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/ipkernel.py\", line 294, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/zmqshell.py\", line 536, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2848, in run_cell\n raw_cell, store_history, silent, shell_futures)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2874, in _run_cell\n return runner(coro)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/async_helpers.py\", line 67, in _pseudo_sync_runner\n coro.send(None)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 3049, in run_cell_async\n interactivity=interactivity, compiler=compiler, result=result)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 3214, in run_ast_nodes\n if (yield from self.run_code(code, result)):\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 3296, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)\n File \"\", line 124, in \n 'compute_loss': ssd_loss.compute_loss})\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/engine/saving.py\", line 419, in load_model\n model = _deserialize_model(f, custom_objects, compile)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/engine/saving.py\", line 317, in _deserialize_model\n model._make_train_function()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/engine/training.py\", line 509, in _make_train_function\n loss=self.total_loss)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/legacy/interfaces.py\", line 91, in wrapper\n return func(*args, **kwargs)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/optimizers.py\", line 487, in get_updates\n ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/optimizers.py\", line 487, in \n ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py\", line 704, in zeros\n return variable(v, dtype=dtype, name=name)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py\", line 402, in variable\n v = tf.Variable(value, dtype=tf.as_dtype(dtype), name=name)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variables.py\", line 213, in __call__\n return cls._variable_v1_call(*args, **kwargs)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variables.py\", line 176, in _variable_v1_call\n aggregation=aggregation)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variables.py\", line 155, in \n previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variable_scope.py\", line 2495, in default_variable_creator\n expected_shape=expected_shape, import_scope=import_scope)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variables.py\", line 217, in __call__\n return super(VariableMetaclass, cls).__call__(*args, **kwargs)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variables.py\", line 1395, in __init__\n constraint=constraint)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/variables.py\", line 1547, in _init_from_args\n validate_shape=validate_shape).op\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/state_ops.py\", line 223, in assign\n validate_shape=validate_shape)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/gen_state_ops.py\", line 64, in assign\n use_locking=use_locking, name=name)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py\", line 788, in _apply_op_helper\n op_def=op_def)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py\", line 507, in new_func\n return func(*args, **kwargs)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3300, in create_op\n op_def=op_def)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 1801, in __init__\n self._traceback = tf_stack.extract_stack()\n\nResourceExhaustedError (see above for traceback): OOM when allocating tensor with shape[48] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc\n\t [[node training/Adam/Variable_6/Assign (defined at /home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:402) ]]\nHint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.\n\n" + ] + } + ], + "source": [ + "from keras.optimizers import Adam, SGD\n", + "from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TerminateOnNaN, CSVLogger\n", + "from keras import backend as K\n", + "from keras.models import load_model\n", + "from math import ceil\n", + "import numpy as np\n", + "from matplotlib import pyplot as plt\n", + "import os\n", + "import json\n", + "import xml.etree.cElementTree as ET\n", + "\n", + "import sys\n", + "sys.path += [os.path.abspath('../../ssd_keras-master')]\n", + "\n", + "from keras_loss_function.keras_ssd_loss import SSDLoss\n", + "from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes\n", + "from keras_layers.keras_layer_DecodeDetections import DecodeDetections\n", + "from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast\n", + "from keras_layers.keras_layer_L2Normalization import L2Normalization\n", + "from ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder\n", + "from ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast\n", + "from data_generator.object_detection_2d_data_generator import DataGenerator\n", + "from data_generator.object_detection_2d_geometric_ops import Resize\n", + "from data_generator.object_detection_2d_photometric_ops import ConvertTo3Channels\n", + "from data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation\n", + "from data_generator.object_detection_2d_misc_utils import apply_inverse_transforms\n", + "from eval_utils.average_precision_evaluator import Evaluator\n", + "from data_generator.data_augmentation_chain_variable_input_size import DataAugmentationVariableInputSize\n", + "from data_generator.data_augmentation_chain_constant_input_size import DataAugmentationConstantInputSize\n", + "\n", + "\n", + "def makedirs(path):\n", + " try:\n", + " os.makedirs(path)\n", + " except OSError:\n", + " if not os.path.isdir(path):\n", + " raise\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "K.tensorflow_backend._get_available_gpus()\n", + "\n", + "\n", + "def lr_schedule(epoch):\n", + " if epoch < 80:\n", + " return 0.001\n", + " elif epoch < 100:\n", + " return 0.0001\n", + " else:\n", + " return 0.00001\n", + "\n", + "config_path = 'config_7_panel.json'\n", + "\n", + "\n", + "with open(config_path) as config_buffer:\n", + " config = json.loads(config_buffer.read())\n", + "\n", + "###############################\n", + "# Parse the annotations\n", + "###############################\n", + "path_imgs_training = config['train']['train_image_folder']\n", + "path_anns_training = config['train']['train_annot_folder']\n", + "path_imgs_val = config['test']['test_image_folder']\n", + "path_anns_val = config['test']['test_annot_folder']\n", + "labels = config['model']['labels']\n", + "categories = {}\n", + "#categories = {\"Razor\": 1, \"Gun\": 2, \"Knife\": 3, \"Shuriken\": 4} #la categoría 0 es la background\n", + "for i in range(len(labels)): categories[labels[i]] = i+1\n", + "print('\\nTraining on: \\t' + str(categories) + '\\n')\n", + "\n", + "####################################\n", + "# Parameters\n", + "###################################\n", + " #%%\n", + "img_height = config['model']['input'] # Height of the model input images\n", + "img_width = config['model']['input'] # Width of the model input images\n", + "img_channels = 3 # Number of color channels of the model input images\n", + "mean_color = [123, 117, 104] # The per-channel mean of the images in the dataset. Do not change this value if you're using any of the pre-trained weights.\n", + "swap_channels = [2, 1, 0] # The color channel order in the original SSD is BGR, so we'll have the model reverse the color channel order of the input images.\n", + "n_classes = len(labels) # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO\n", + "scales_pascal = [0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05] # The anchor box scaling factors used in the original SSD300 for the Pascal VOC datasets\n", + "#scales_coco = [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05] # The anchor box scaling factors used in the original SSD300 for the MS COCO datasets\n", + "scales = scales_pascal\n", + "aspect_ratios = [[1.0, 2.0, 0.5],\n", + " [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n", + " [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n", + " [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n", + " [1.0, 2.0, 0.5],\n", + " [1.0, 2.0, 0.5]] # The anchor box aspect ratios used in the original SSD300; the order matters\n", + "two_boxes_for_ar1 = True\n", + "steps = [8, 16, 32, 64, 100, 300] # The space between two adjacent anchor box center points for each predictor layer.\n", + "offsets = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5] # The offsets of the first anchor box center points from the top and left borders of the image as a fraction of the step size for each predictor layer.\n", + "clip_boxes = False # Whether or not to clip the anchor boxes to lie entirely within the image boundaries\n", + "variances = [0.1, 0.1, 0.2, 0.2] # The variances by which the encoded target coordinates are divided as in the original implementation\n", + "normalize_coords = True\n", + "\n", + "K.clear_session() # Clear previous models from memory.\n", + "\n", + "\n", + "model_path = config['train']['saved_weights_name']\n", + "# 3: Instantiate an optimizer and the SSD loss function and compile the model.\n", + "# If you want to follow the original Caffe implementation, use the preset SGD\n", + "# optimizer, otherwise I'd recommend the commented-out Adam optimizer.\n", + "\n", + "\n", + "if config['model']['backend'] == 'ssd7':\n", + " #weights_path = 'VGG_ILSVRC_16_layers_fc_reduced.h5'\n", + " scales = [0.08, 0.16, 0.32, 0.64, 0.96] # An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`.\n", + " aspect_ratios = [0.5 ,1.0, 2.0] # The list of aspect ratios for the anchor boxes\n", + " two_boxes_for_ar1 = True # Whether or not you want to generate two anchor boxes for aspect ratio 1\n", + " steps = None # In case you'd like to set the step sizes for the anchor box grids manually; not recommended\n", + " offsets = None\n", + "\n", + "if os.path.exists(model_path):\n", + " print(\"\\nLoading pretrained weights.\\n\")\n", + " # We need to create an SSDLoss object in order to pass that to the model loader.\n", + " ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n", + "\n", + " K.clear_session() # Clear previous models from memory.\n", + " model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n", + " 'L2Normalization': L2Normalization,\n", + " 'compute_loss': ssd_loss.compute_loss})\n", + "\n", + "\n", + "else:\n", + " ####################################\n", + " # Build the Keras model.\n", + " ###################################\n", + "\n", + " if config['model']['backend'] == 'ssd300':\n", + " #weights_path = 'VGG_VOC0712Plus_SSD_300x300_ft_iter_160000.h5'\n", + " from models.keras_ssd300 import ssd_300 as ssd\n", + "\n", + " model = ssd_300(image_size=(img_height, img_width, img_channels),\n", + " n_classes=n_classes,\n", + " mode='training',\n", + " l2_regularization=0.0005,\n", + " scales=scales,\n", + " aspect_ratios_per_layer=aspect_ratios,\n", + " two_boxes_for_ar1=two_boxes_for_ar1,\n", + " steps=steps,\n", + " offsets=offsets,\n", + " clip_boxes=clip_boxes,\n", + " variances=variances,\n", + " normalize_coords=normalize_coords,\n", + " subtract_mean=mean_color,\n", + " swap_channels=swap_channels)\n", + "\n", + "\n", + " elif config['model']['backend'] == 'ssd7':\n", + " #weights_path = 'VGG_ILSVRC_16_layers_fc_reduced.h5'\n", + " from models.keras_ssd7 import build_model as ssd\n", + " scales = [0.08, 0.16, 0.32, 0.64, 0.96] # An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`.\n", + " aspect_ratios = [0.5 ,1.0, 2.0] # The list of aspect ratios for the anchor boxes\n", + " two_boxes_for_ar1 = True # Whether or not you want to generate two anchor boxes for aspect ratio 1\n", + " steps = None # In case you'd like to set the step sizes for the anchor box grids manually; not recommended\n", + " offsets = None\n", + " model = ssd(image_size=(img_height, img_width, img_channels),\n", + " n_classes=n_classes,\n", + " mode='training',\n", + " l2_regularization=0.0005,\n", + " scales=scales,\n", + " aspect_ratios_global=aspect_ratios,\n", + " aspect_ratios_per_layer=None,\n", + " two_boxes_for_ar1=two_boxes_for_ar1,\n", + " steps=steps,\n", + " offsets=offsets,\n", + " clip_boxes=clip_boxes,\n", + " variances=variances,\n", + " normalize_coords=normalize_coords,\n", + " subtract_mean=None,\n", + " divide_by_stddev=None)\n", + "\n", + " else :\n", + " print('Wrong Backend')\n", + "\n", + "\n", + "\n", + " print('OK create model')\n", + " #sgd = SGD(lr=config['train']['learning_rate'], momentum=0.9, decay=0.0, nesterov=False)\n", + "\n", + " # TODO: Set the path to the weights you want to load. only for ssd300 or ssd512\n", + "\n", + " weights_path = '../ssd_keras-master/VGG_ILSVRC_16_layers_fc_reduced.h5'\n", + " print(\"\\nLoading pretrained weights VGG.\\n\")\n", + " model.load_weights(weights_path, by_name=True)\n", + "\n", + " # 3: Instantiate an optimizer and the SSD loss function and compile the model.\n", + " # If you want to follow the original Caffe implementation, use the preset SGD\n", + " # optimizer, otherwise I'd recommend the commented-out Adam optimizer.\n", + "\n", + "\n", + " #adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n", + " #sgd = SGD(lr=0.001, momentum=0.9, decay=0.0, nesterov=False)\n", + " optimizer = Adam(lr=config['train']['learning_rate'], beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n", + " ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n", + " model.compile(optimizer=optimizer, loss=ssd_loss.compute_loss)\n", + "\n", + " model.summary()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Instanciar los generadores de datos y entrenamiento del modelo.\n", + "\n", + "*Cambio realizado para leer png y jpg. keras-ssd-master/data_generator/object_detection_2d_data_generator.py función parse_xml\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Processing image set 'train.txt': 100%|██████████| 1/1 [00:00<00:00, 3.02it/s]\n", + "Processing image set 'test.txt': 100%|██████████| 1/1 [00:00<00:00, 2.48it/s]\n", + "panel : 69\n", + "cell : 423\n", + "Number of images in the training dataset:\t 1\n", + "Number of images in the validation dataset:\t 1\n", + "Epoch 1/100\n", + "\n", + "Epoch 00001: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 200s 4s/step - loss: 13.2409 - val_loss: 9.9807\n", + "\n", + "Epoch 00001: val_loss improved from inf to 9.98075, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 2/100\n", + "\n", + "Epoch 00002: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 238s 5s/step - loss: 9.8864 - val_loss: 11.1452\n", + "\n", + "Epoch 00002: val_loss did not improve from 9.98075\n", + "Epoch 3/100\n", + "\n", + "Epoch 00003: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 226s 5s/step - loss: 8.8060 - val_loss: 8.3006\n", + "\n", + "Epoch 00003: val_loss improved from 9.98075 to 8.30060, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 4/100\n", + "\n", + "Epoch 00004: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 199s 4s/step - loss: 7.4999 - val_loss: 8.9384\n", + "\n", + "Epoch 00004: val_loss did not improve from 8.30060\n", + "Epoch 5/100\n", + "\n", + "Epoch 00005: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 187s 4s/step - loss: 7.4727 - val_loss: 7.9512\n", + "\n", + "Epoch 00005: val_loss improved from 8.30060 to 7.95121, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 6/100\n", + "\n", + "Epoch 00006: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 213s 4s/step - loss: 6.8813 - val_loss: 11.2544\n", + "\n", + "Epoch 00006: val_loss did not improve from 7.95121\n", + "Epoch 7/100\n", + "\n", + "Epoch 00007: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 195s 4s/step - loss: 6.4775 - val_loss: 6.9093\n", + "\n", + "Epoch 00007: val_loss improved from 7.95121 to 6.90929, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 8/100\n", + "\n", + "Epoch 00008: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 212s 4s/step - loss: 6.9758 - val_loss: 8.6997\n", + "\n", + "Epoch 00008: val_loss did not improve from 6.90929\n", + "Epoch 9/100\n", + "\n", + "Epoch 00009: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 199s 4s/step - loss: 6.1539 - val_loss: 10.9586\n", + "\n", + "Epoch 00009: val_loss did not improve from 6.90929\n", + "Epoch 10/100\n", + "\n", + "Epoch 00010: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 206s 4s/step - loss: 5.9307 - val_loss: 8.4361\n", + "\n", + "Epoch 00010: val_loss did not improve from 6.90929\n", + "Epoch 11/100\n", + "\n", + "Epoch 00011: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 197s 4s/step - loss: 5.3895 - val_loss: 5.9796\n", + "\n", + "Epoch 00011: val_loss improved from 6.90929 to 5.97960, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 12/100\n", + "\n", + "Epoch 00012: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 184s 4s/step - loss: 5.0889 - val_loss: 5.9283\n", + "\n", + "Epoch 00012: val_loss improved from 5.97960 to 5.92832, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 13/100\n", + "\n", + "Epoch 00013: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 193s 4s/step - loss: 5.7916 - val_loss: 6.7706\n", + "\n", + "Epoch 00013: val_loss did not improve from 5.92832\n", + "Epoch 14/100\n", + "\n", + "Epoch 00014: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 222s 4s/step - loss: 5.3010 - val_loss: 7.8910\n", + "\n", + "Epoch 00014: val_loss did not improve from 5.92832\n", + "Epoch 15/100\n", + "\n", + "Epoch 00015: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 179s 4s/step - loss: 4.9873 - val_loss: 6.0389\n", + "\n", + "Epoch 00015: val_loss did not improve from 5.92832\n", + "Epoch 16/100\n", + "\n", + "Epoch 00016: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 182s 4s/step - loss: 5.4664 - val_loss: 6.4125\n", + "\n", + "Epoch 00016: val_loss did not improve from 5.92832\n", + "Epoch 17/100\n", + "\n", + "Epoch 00017: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 166s 3s/step - loss: 6.0094 - val_loss: 9.2918\n", + "\n", + "Epoch 00017: val_loss did not improve from 5.92832\n", + "Epoch 18/100\n", + "\n", + "Epoch 00018: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 181s 4s/step - loss: 5.1737 - val_loss: 7.6806\n", + "\n", + "Epoch 00018: val_loss did not improve from 5.92832\n", + "Epoch 19/100\n", + "\n", + "Epoch 00019: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 159s 3s/step - loss: 5.2708 - val_loss: 7.1096\n", + "\n", + "Epoch 00019: val_loss did not improve from 5.92832\n", + "Epoch 20/100\n", + "\n", + "Epoch 00020: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 173s 3s/step - loss: 5.4765 - val_loss: 5.4921\n", + "\n", + "Epoch 00020: val_loss improved from 5.92832 to 5.49211, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 21/100\n", + "\n", + "Epoch 00021: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 170s 3s/step - loss: 4.6517 - val_loss: 6.6033\n", + "\n", + "Epoch 00021: val_loss did not improve from 5.49211\n", + "Epoch 22/100\n", + "\n", + "Epoch 00022: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 191s 4s/step - loss: 5.1432 - val_loss: 5.6549\n", + "\n", + "Epoch 00022: val_loss did not improve from 5.49211\n", + "Epoch 23/100\n", + "\n", + "Epoch 00023: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 159s 3s/step - loss: 5.4830 - val_loss: 5.8758\n", + "\n", + "Epoch 00023: val_loss did not improve from 5.49211\n", + "Epoch 24/100\n", + "\n", + "Epoch 00024: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 150s 3s/step - loss: 5.3366 - val_loss: 5.3871\n", + "\n", + "Epoch 00024: val_loss improved from 5.49211 to 5.38706, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 25/100\n", + "\n", + "Epoch 00025: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 138s 3s/step - loss: 5.7189 - val_loss: 8.0760\n", + "\n", + "Epoch 00025: val_loss did not improve from 5.38706\n", + "Epoch 26/100\n", + "\n", + "Epoch 00026: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 144s 3s/step - loss: 6.0929 - val_loss: 12.6163\n", + "\n", + "Epoch 00026: val_loss did not improve from 5.38706\n", + "Epoch 27/100\n", + "\n", + "Epoch 00027: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 147s 3s/step - loss: 5.2239 - val_loss: 9.8536\n", + "\n", + "Epoch 00027: val_loss did not improve from 5.38706\n", + "Epoch 28/100\n", + "\n", + "Epoch 00028: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 158s 3s/step - loss: 5.4414 - val_loss: 6.4950\n", + "\n", + "Epoch 00028: val_loss did not improve from 5.38706\n", + "Epoch 29/100\n", + "\n", + "Epoch 00029: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 157s 3s/step - loss: 5.4436 - val_loss: 9.0002\n", + "\n", + "Epoch 00029: val_loss did not improve from 5.38706\n", + "Epoch 30/100\n", + "\n", + "Epoch 00030: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 162s 3s/step - loss: 4.9780 - val_loss: 4.9993\n", + "\n", + "Epoch 00030: val_loss improved from 5.38706 to 4.99925, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 31/100\n", + "\n", + "Epoch 00031: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 140s 3s/step - loss: 4.9645 - val_loss: 5.6612\n", + "\n", + "Epoch 00031: val_loss did not improve from 4.99925\n", + "Epoch 32/100\n", + "\n", + "Epoch 00032: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 141s 3s/step - loss: 4.5982 - val_loss: 5.2083\n", + "\n", + "Epoch 00032: val_loss did not improve from 4.99925\n", + "Epoch 33/100\n", + "\n", + "Epoch 00033: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 143s 3s/step - loss: 4.3101 - val_loss: 6.4808\n", + "\n", + "Epoch 00033: val_loss did not improve from 4.99925\n", + "Epoch 34/100\n", + "\n", + "Epoch 00034: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 145s 3s/step - loss: 4.4252 - val_loss: 10.9472\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Epoch 00034: val_loss did not improve from 4.99925\n", + "Epoch 35/100\n", + "\n", + "Epoch 00035: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 153s 3s/step - loss: 4.4998 - val_loss: 7.1254\n", + "\n", + "Epoch 00035: val_loss did not improve from 4.99925\n", + "Epoch 36/100\n", + "\n", + "Epoch 00036: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 153s 3s/step - loss: 4.8952 - val_loss: 7.0446\n", + "\n", + "Epoch 00036: val_loss did not improve from 4.99925\n", + "Epoch 37/100\n", + "\n", + "Epoch 00037: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 154s 3s/step - loss: 4.9868 - val_loss: 9.3251\n", + "\n", + "Epoch 00037: val_loss did not improve from 4.99925\n", + "Epoch 38/100\n", + "\n", + "Epoch 00038: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 148s 3s/step - loss: 4.8918 - val_loss: 5.1689\n", + "\n", + "Epoch 00038: val_loss did not improve from 4.99925\n", + "Epoch 39/100\n", + "\n", + "Epoch 00039: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 143s 3s/step - loss: 4.5572 - val_loss: 4.9839\n", + "\n", + "Epoch 00039: val_loss improved from 4.99925 to 4.98394, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 40/100\n", + "\n", + "Epoch 00040: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 150s 3s/step - loss: 4.4722 - val_loss: 5.7133\n", + "\n", + "Epoch 00040: val_loss did not improve from 4.98394\n", + "Epoch 41/100\n", + "\n", + "Epoch 00041: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 152s 3s/step - loss: 4.9414 - val_loss: 5.5843\n", + "\n", + "Epoch 00041: val_loss did not improve from 4.98394\n", + "Epoch 42/100\n", + "\n", + "Epoch 00042: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 148s 3s/step - loss: 4.5857 - val_loss: 5.1884\n", + "\n", + "Epoch 00042: val_loss did not improve from 4.98394\n", + "Epoch 43/100\n", + "\n", + "Epoch 00043: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 149s 3s/step - loss: 4.7094 - val_loss: 6.7545\n", + "\n", + "Epoch 00043: val_loss did not improve from 4.98394\n", + "Epoch 44/100\n", + "\n", + "Epoch 00044: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 151s 3s/step - loss: 5.0428 - val_loss: 5.2691\n", + "\n", + "Epoch 00044: val_loss did not improve from 4.98394\n", + "Epoch 45/100\n", + "\n", + "Epoch 00045: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 146s 3s/step - loss: 4.9842 - val_loss: 6.5112\n", + "\n", + "Epoch 00045: val_loss did not improve from 4.98394\n", + "Epoch 46/100\n", + "\n", + "Epoch 00046: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 147s 3s/step - loss: 4.9108 - val_loss: 6.0670\n", + "\n", + "Epoch 00046: val_loss did not improve from 4.98394\n", + "Epoch 47/100\n", + "\n", + "Epoch 00047: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 155s 3s/step - loss: 4.6837 - val_loss: 5.8351\n", + "\n", + "Epoch 00047: val_loss did not improve from 4.98394\n", + "Epoch 48/100\n", + "\n", + "Epoch 00048: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 149s 3s/step - loss: 5.1042 - val_loss: 5.1778\n", + "\n", + "Epoch 00048: val_loss did not improve from 4.98394\n", + "Epoch 49/100\n", + "\n", + "Epoch 00049: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 144s 3s/step - loss: 4.1312 - val_loss: 5.9606\n", + "\n", + "Epoch 00049: val_loss did not improve from 4.98394\n", + "Epoch 50/100\n", + "\n", + "Epoch 00050: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 122s 2s/step - loss: 4.5373 - val_loss: 5.4351\n", + "\n", + "Epoch 00050: val_loss did not improve from 4.98394\n", + "Epoch 51/100\n", + "\n", + "Epoch 00051: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 135s 3s/step - loss: 4.8955 - val_loss: 6.0315\n", + "\n", + "Epoch 00051: val_loss did not improve from 4.98394\n", + "Epoch 52/100\n", + "\n", + "Epoch 00052: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 150s 3s/step - loss: 4.9445 - val_loss: 5.7199\n", + "\n", + "Epoch 00052: val_loss did not improve from 4.98394\n", + "Epoch 53/100\n", + "\n", + "Epoch 00053: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 139s 3s/step - loss: 3.9748 - val_loss: 5.5974\n", + "\n", + "Epoch 00053: val_loss did not improve from 4.98394\n", + "Epoch 54/100\n", + "\n", + "Epoch 00054: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 153s 3s/step - loss: 4.8783 - val_loss: 8.6056\n", + "\n", + "Epoch 00054: val_loss did not improve from 4.98394\n", + "Epoch 55/100\n", + "\n", + "Epoch 00055: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 141s 3s/step - loss: 4.1649 - val_loss: 6.0042\n", + "\n", + "Epoch 00055: val_loss did not improve from 4.98394\n", + "Epoch 56/100\n", + "\n", + "Epoch 00056: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 149s 3s/step - loss: 4.8997 - val_loss: 9.1298\n", + "\n", + "Epoch 00056: val_loss did not improve from 4.98394\n", + "Epoch 57/100\n", + "\n", + "Epoch 00057: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 151s 3s/step - loss: 4.4433 - val_loss: 7.1151\n", + "\n", + "Epoch 00057: val_loss did not improve from 4.98394\n", + "Epoch 58/100\n", + "\n", + "Epoch 00058: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 147s 3s/step - loss: 4.5827 - val_loss: 5.4356\n", + "\n", + "Epoch 00058: val_loss did not improve from 4.98394\n", + "Epoch 59/100\n", + "\n", + "Epoch 00059: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 137s 3s/step - loss: 3.9437 - val_loss: 4.7926\n", + "\n", + "Epoch 00059: val_loss improved from 4.98394 to 4.79262, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 60/100\n", + "\n", + "Epoch 00060: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 125s 3s/step - loss: 4.0939 - val_loss: 5.7098\n", + "\n", + "Epoch 00060: val_loss did not improve from 4.79262\n", + "Epoch 61/100\n", + "\n", + "Epoch 00061: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 161s 3s/step - loss: 5.1152 - val_loss: 5.2079\n", + "\n", + "Epoch 00061: val_loss did not improve from 4.79262\n", + "Epoch 62/100\n", + "\n", + "Epoch 00062: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 144s 3s/step - loss: 4.2958 - val_loss: 4.9239\n", + "\n", + "Epoch 00062: val_loss did not improve from 4.79262\n", + "Epoch 63/100\n", + "\n", + "Epoch 00063: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 141s 3s/step - loss: 3.8241 - val_loss: 4.5443\n", + "\n", + "Epoch 00063: val_loss improved from 4.79262 to 4.54430, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 64/100\n", + "\n", + "Epoch 00064: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 134s 3s/step - loss: 4.7252 - val_loss: 5.9445\n", + "\n", + "Epoch 00064: val_loss did not improve from 4.54430\n", + "Epoch 65/100\n", + "\n", + "Epoch 00065: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 154s 3s/step - loss: 4.4455 - val_loss: 4.8326\n", + "\n", + "Epoch 00065: val_loss did not improve from 4.54430\n", + "Epoch 66/100\n", + "\n", + "Epoch 00066: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 145s 3s/step - loss: 4.4054 - val_loss: 5.6441\n", + "\n", + "Epoch 00066: val_loss did not improve from 4.54430\n", + "Epoch 67/100\n", + "\n", + "Epoch 00067: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 124s 2s/step - loss: 4.4165 - val_loss: 6.8159\n", + "\n", + "Epoch 00067: val_loss did not improve from 4.54430\n", + "Epoch 68/100\n", + "\n", + "Epoch 00068: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 162s 3s/step - loss: 5.0418 - val_loss: 4.8508\n", + "\n", + "Epoch 00068: val_loss did not improve from 4.54430\n", + "Epoch 69/100\n", + "\n", + "Epoch 00069: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 140s 3s/step - loss: 4.1512 - val_loss: 5.4053\n", + "\n", + "Epoch 00069: val_loss did not improve from 4.54430\n", + "Epoch 70/100\n", + "\n", + "Epoch 00070: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 148s 3s/step - loss: 4.6197 - val_loss: 5.2824\n", + "\n", + "Epoch 00070: val_loss did not improve from 4.54430\n", + "Epoch 71/100\n", + "\n", + "Epoch 00071: LearningRateScheduler setting learning rate to 0.001.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "50/50 [==============================] - 152s 3s/step - loss: 4.2807 - val_loss: 5.5992\n", + "\n", + "Epoch 00071: val_loss did not improve from 4.54430\n", + "Epoch 72/100\n", + "\n", + "Epoch 00072: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 143s 3s/step - loss: 4.5368 - val_loss: 6.5207\n", + "\n", + "Epoch 00072: val_loss did not improve from 4.54430\n", + "Epoch 73/100\n", + "\n", + "Epoch 00073: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 141s 3s/step - loss: 4.0598 - val_loss: 5.2421\n", + "\n", + "Epoch 00073: val_loss did not improve from 4.54430\n", + "Epoch 74/100\n", + "\n", + "Epoch 00074: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 150s 3s/step - loss: 4.4861 - val_loss: 5.4182\n", + "\n", + "Epoch 00074: val_loss did not improve from 4.54430\n", + "Epoch 75/100\n", + "\n", + "Epoch 00075: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 144s 3s/step - loss: 4.5263 - val_loss: 4.3774\n", + "\n", + "Epoch 00075: val_loss improved from 4.54430 to 4.37742, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 76/100\n", + "\n", + "Epoch 00076: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 148s 3s/step - loss: 3.8465 - val_loss: 4.5809\n", + "\n", + "Epoch 00076: val_loss did not improve from 4.37742\n", + "Epoch 77/100\n", + "\n", + "Epoch 00077: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 152s 3s/step - loss: 4.0495 - val_loss: 4.9745\n", + "\n", + "Epoch 00077: val_loss did not improve from 4.37742\n", + "Epoch 78/100\n", + "\n", + "Epoch 00078: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 152s 3s/step - loss: 4.6009 - val_loss: 13.4989\n", + "\n", + "Epoch 00078: val_loss did not improve from 4.37742\n", + "Epoch 79/100\n", + "\n", + "Epoch 00079: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 142s 3s/step - loss: 4.6687 - val_loss: 6.4490\n", + "\n", + "Epoch 00079: val_loss did not improve from 4.37742\n", + "Epoch 80/100\n", + "\n", + "Epoch 00080: LearningRateScheduler setting learning rate to 0.001.\n", + "50/50 [==============================] - 147s 3s/step - loss: 4.5297 - val_loss: 8.0478\n", + "\n", + "Epoch 00080: val_loss did not improve from 4.37742\n", + "Epoch 81/100\n", + "\n", + "Epoch 00081: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 141s 3s/step - loss: 4.2662 - val_loss: 5.7929\n", + "\n", + "Epoch 00081: val_loss did not improve from 4.37742\n", + "Epoch 82/100\n", + "\n", + "Epoch 00082: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 149s 3s/step - loss: 4.1048 - val_loss: 4.6117\n", + "\n", + "Epoch 00082: val_loss did not improve from 4.37742\n", + "Epoch 83/100\n", + "\n", + "Epoch 00083: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 156s 3s/step - loss: 3.9905 - val_loss: 4.5542\n", + "\n", + "Epoch 00083: val_loss did not improve from 4.37742\n", + "Epoch 84/100\n", + "\n", + "Epoch 00084: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 155s 3s/step - loss: 4.3129 - val_loss: 4.4676\n", + "\n", + "Epoch 00084: val_loss did not improve from 4.37742\n", + "Epoch 85/100\n", + "\n", + "Epoch 00085: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 156s 3s/step - loss: 3.7951 - val_loss: 4.4689\n", + "\n", + "Epoch 00085: val_loss did not improve from 4.37742\n", + "Epoch 86/100\n", + "\n", + "Epoch 00086: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 155s 3s/step - loss: 4.3618 - val_loss: 4.4048\n", + "\n", + "Epoch 00086: val_loss did not improve from 4.37742\n", + "Epoch 87/100\n", + "\n", + "Epoch 00087: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 156s 3s/step - loss: 4.3538 - val_loss: 4.6832\n", + "\n", + "Epoch 00087: val_loss did not improve from 4.37742\n", + "Epoch 88/100\n", + "\n", + "Epoch 00088: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 152s 3s/step - loss: 4.2076 - val_loss: 4.4796\n", + "\n", + "Epoch 00088: val_loss did not improve from 4.37742\n", + "Epoch 89/100\n", + "\n", + "Epoch 00089: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 146s 3s/step - loss: 4.1322 - val_loss: 4.5462\n", + "\n", + "Epoch 00089: val_loss did not improve from 4.37742\n", + "Epoch 90/100\n", + "\n", + "Epoch 00090: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 157s 3s/step - loss: 4.4995 - val_loss: 4.5660\n", + "\n", + "Epoch 00090: val_loss did not improve from 4.37742\n", + "Epoch 91/100\n", + "\n", + "Epoch 00091: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 158s 3s/step - loss: 4.2653 - val_loss: 4.5265\n", + "\n", + "Epoch 00091: val_loss did not improve from 4.37742\n", + "Epoch 92/100\n", + "\n", + "Epoch 00092: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 153s 3s/step - loss: 4.3702 - val_loss: 4.5276\n", + "\n", + "Epoch 00092: val_loss did not improve from 4.37742\n", + "Epoch 93/100\n", + "\n", + "Epoch 00093: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 153s 3s/step - loss: 3.7340 - val_loss: 4.5439\n", + "\n", + "Epoch 00093: val_loss did not improve from 4.37742\n", + "Epoch 94/100\n", + "\n", + "Epoch 00094: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 151s 3s/step - loss: 4.0253 - val_loss: 4.3250\n", + "\n", + "Epoch 00094: val_loss improved from 4.37742 to 4.32498, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 95/100\n", + "\n", + "Epoch 00095: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 143s 3s/step - loss: 4.0254 - val_loss: 4.6277\n", + "\n", + "Epoch 00095: val_loss did not improve from 4.32498\n", + "Epoch 96/100\n", + "\n", + "Epoch 00096: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 148s 3s/step - loss: 3.9857 - val_loss: 4.2953\n", + "\n", + "Epoch 00096: val_loss improved from 4.32498 to 4.29533, saving model to experimento_ssd7_panel_cell.h5\n", + "Epoch 97/100\n", + "\n", + "Epoch 00097: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 157s 3s/step - loss: 3.6750 - val_loss: 4.5637\n", + "\n", + "Epoch 00097: val_loss did not improve from 4.29533\n", + "Epoch 98/100\n", + "\n", + "Epoch 00098: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 154s 3s/step - loss: 3.7435 - val_loss: 4.3923\n", + "\n", + "Epoch 00098: val_loss did not improve from 4.29533\n", + "Epoch 99/100\n", + "\n", + "Epoch 00099: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 162s 3s/step - loss: 4.0930 - val_loss: 4.4010\n", + "\n", + "Epoch 00099: val_loss did not improve from 4.29533\n", + "Epoch 100/100\n", + "\n", + "Epoch 00100: LearningRateScheduler setting learning rate to 0.0001.\n", + "50/50 [==============================] - 134s 3s/step - loss: 3.8983 - val_loss: 4.4451\n", + "\n", + "Epoch 00100: val_loss did not improve from 4.29533\n" + ] + } + ], + "source": [ + "#ENTRENAMIENTO DE MODELO\n", + "#####################################################################\n", + "# Instantiate two `DataGenerator` objects: One for training, one for validation.\n", + "######################################################################\n", + "# Optional: If you have enough memory, consider loading the images into memory for the reasons explained above.\n", + "\n", + "train_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n", + "val_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n", + "\n", + "# 2: Parse the image and label lists for the training and validation datasets. This can take a while.\n", + "\n", + "\n", + "\n", + "# The XML parser needs to now what object class names to look for and in which order to map them to integers.\n", + "classes = ['background' ] + labels\n", + "\n", + "train_dataset.parse_xml(images_dirs= [config['train']['train_image_folder']],\n", + " image_set_filenames=[config['train']['train_image_set_filename']],\n", + " annotations_dirs=[config['train']['train_annot_folder']],\n", + " classes=classes,\n", + " include_classes='all',\n", + " #classes = ['background', 'panel', 'cell'], \n", + " #include_classes=classes,\n", + " exclude_truncated=False,\n", + " exclude_difficult=False,\n", + " ret=False)\n", + "\n", + "val_dataset.parse_xml(images_dirs= [config['test']['test_image_folder']],\n", + " image_set_filenames=[config['test']['test_image_set_filename']],\n", + " annotations_dirs=[config['test']['test_annot_folder']],\n", + " classes=classes,\n", + " include_classes='all',\n", + " #classes = ['background', 'panel', 'cell'], \n", + " #include_classes=classes,\n", + " exclude_truncated=False,\n", + " exclude_difficult=False,\n", + " ret=False)\n", + "\n", + "#########################\n", + "# 3: Set the batch size.\n", + "#########################\n", + "batch_size = config['train']['batch_size'] # Change the batch size if you like, or if you run into GPU memory issues.\n", + "\n", + "##########################\n", + "# 4: Set the image transformations for pre-processing and data augmentation options.\n", + "##########################\n", + "# For the training generator:\n", + "\n", + "\n", + "# For the validation generator:\n", + "convert_to_3_channels = ConvertTo3Channels()\n", + "resize = Resize(height=img_height, width=img_width)\n", + "\n", + "######################################3\n", + "# 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.\n", + "#########################################\n", + "# The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.\n", + "if config['model']['backend'] == 'ssd300':\n", + " predictor_sizes = [model.get_layer('conv4_3_norm_mbox_conf').output_shape[1:3],\n", + " model.get_layer('fc7_mbox_conf').output_shape[1:3],\n", + " model.get_layer('conv6_2_mbox_conf').output_shape[1:3],\n", + " model.get_layer('conv7_2_mbox_conf').output_shape[1:3],\n", + " model.get_layer('conv8_2_mbox_conf').output_shape[1:3],\n", + " model.get_layer('conv9_2_mbox_conf').output_shape[1:3]]\n", + " ssd_input_encoder = SSDInputEncoder(img_height=img_height,\n", + " img_width=img_width,\n", + " n_classes=n_classes,\n", + " predictor_sizes=predictor_sizes,\n", + " scales=scales,\n", + " aspect_ratios_per_layer=aspect_ratios,\n", + " two_boxes_for_ar1=two_boxes_for_ar1,\n", + " steps=steps,\n", + " offsets=offsets,\n", + " clip_boxes=clip_boxes,\n", + " variances=variances,\n", + " matching_type='multi',\n", + " pos_iou_threshold=0.5,\n", + " neg_iou_limit=0.5,\n", + " normalize_coords=normalize_coords)\n", + "\n", + "elif config['model']['backend'] == 'ssd7':\n", + " predictor_sizes = [model.get_layer('classes4').output_shape[1:3],\n", + " model.get_layer('classes5').output_shape[1:3],\n", + " model.get_layer('classes6').output_shape[1:3],\n", + " model.get_layer('classes7').output_shape[1:3]]\n", + " ssd_input_encoder = SSDInputEncoder(img_height=img_height,\n", + " img_width=img_width,\n", + " n_classes=n_classes,\n", + " predictor_sizes=predictor_sizes,\n", + " scales=scales,\n", + " aspect_ratios_global=aspect_ratios,\n", + " two_boxes_for_ar1=two_boxes_for_ar1,\n", + " steps=steps,\n", + " offsets=offsets,\n", + " clip_boxes=clip_boxes,\n", + " variances=variances,\n", + " matching_type='multi',\n", + " pos_iou_threshold=0.5,\n", + " neg_iou_limit=0.3,\n", + " normalize_coords=normalize_coords)\n", + "\n", + "\n", + "\n", + " \n", + "data_augmentation_chain = DataAugmentationVariableInputSize(resize_height = img_height,\n", + " resize_width = img_width,\n", + " random_brightness=(-48, 48, 0.5),\n", + " random_contrast=(0.5, 1.8, 0.5),\n", + " random_saturation=(0.5, 1.8, 0.5),\n", + " random_hue=(18, 0.5),\n", + " random_flip=0.5,\n", + " n_trials_max=3,\n", + " clip_boxes=True,\n", + " overlap_criterion='area',\n", + " bounds_box_filter=(0.3, 1.0),\n", + " bounds_validator=(0.5, 1.0),\n", + " n_boxes_min=1,\n", + " background=(0,0,0))\n", + "#######################\n", + "# 6: Create the generator handles that will be passed to Keras' `fit_generator()` function.\n", + "#######################\n", + "\n", + "train_generator = train_dataset.generate(batch_size=batch_size,\n", + " shuffle=True,\n", + " transformations= [data_augmentation_chain],\n", + " label_encoder=ssd_input_encoder,\n", + " returns={'processed_images',\n", + " 'encoded_labels'},\n", + " keep_images_without_gt=False)\n", + "\n", + "val_generator = val_dataset.generate(batch_size=batch_size,\n", + " shuffle=False,\n", + " transformations=[convert_to_3_channels,\n", + " resize],\n", + " label_encoder=ssd_input_encoder,\n", + " returns={'processed_images',\n", + " 'encoded_labels'},\n", + " keep_images_without_gt=False)\n", + "\n", + "# Summary instance training\n", + "category_train_list = []\n", + "for image_label in train_dataset.labels:\n", + " category_train_list += [i[0] for i in train_dataset.labels[0]]\n", + "summary_category_training = {train_dataset.classes[i]: category_train_list.count(i) for i in list(set(category_train_list))}\n", + "for i in summary_category_training.keys():\n", + " print(i, ': {:.0f}'.format(summary_category_training[i]))\n", + "\n", + "\n", + "\n", + "# Get the number of samples in the training and validations datasets.\n", + "train_dataset_size = train_dataset.get_dataset_size()\n", + "val_dataset_size = val_dataset.get_dataset_size()\n", + "\n", + "print(\"Number of images in the training dataset:\\t{:>6}\".format(train_dataset_size))\n", + "print(\"Number of images in the validation dataset:\\t{:>6}\".format(val_dataset_size))\n", + "\n", + "\n", + "\n", + "##########################\n", + "# Define model callbacks.\n", + "#########################\n", + "\n", + "# TODO: Set the filepath under which you want to save the model.\n", + "model_checkpoint = ModelCheckpoint(filepath= config['train']['saved_weights_name'],\n", + " monitor='val_loss',\n", + " verbose=1,\n", + " save_best_only=True,\n", + " save_weights_only=False,\n", + " mode='auto',\n", + " period=1)\n", + "#model_checkpoint.best =\n", + "\n", + "csv_logger = CSVLogger(filename='log.csv',\n", + " separator=',',\n", + " append=True)\n", + "\n", + "learning_rate_scheduler = LearningRateScheduler(schedule=lr_schedule,\n", + " verbose=1)\n", + "\n", + "terminate_on_nan = TerminateOnNaN()\n", + "\n", + "callbacks = [model_checkpoint,\n", + " csv_logger,\n", + " learning_rate_scheduler,\n", + " terminate_on_nan]\n", + "\n", + "\n", + "\n", + "batch_images, batch_labels = next(train_generator)\n", + "\n", + "\n", + "initial_epoch = 0\n", + "final_epoch = 100 #config['train']['nb_epochs']\n", + "steps_per_epoch = 50\n", + "\n", + "history = model.fit_generator(generator=train_generator,\n", + " steps_per_epoch=steps_per_epoch,\n", + " epochs=final_epoch,\n", + " callbacks=callbacks,\n", + " validation_data=val_generator,\n", + " validation_steps=ceil(val_dataset_size/batch_size),\n", + " initial_epoch=initial_epoch,\n", + " verbose = 1 if config['train']['debug'] else 2)\n", + "\n", + "history_path = config['train']['saved_weights_name'].split('.')[0] + '_history'\n", + "\n", + "np.save(history_path, history.history)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "dict_keys(['val_loss', 'loss', 'lr'])\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAEWCAYAAABrDZDcAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzsnXd4m9W9xz9Hw5Z3HO/YGQ7ZCdmELPbes6xCW2bpAm5bKNzuXm5LCx0UKGXTUhouZe8VEkIWIXuH7DjLM463LUvn/nHeV8uyLTmyJFvn8zx5tF6975Ejne/5zSOklGg0Go0mcbHEegAajUajiS1aCDQajSbB0UKg0Wg0CY4WAo1Go0lwtBBoNBpNgqOFQKPRaBIcLQQaTRcIIZ4XQtwf4rF7hBBnHut5NJpoo4VAo9FoEhwtBBqNRpPgaCHQ9HkMl8zdQoj1QohGIcQzQogCIcT7Qoh6IcQnQohsn+MvFkJsEkLUCiEWCiHG+rw2RQix2njf/wGOgGtdKIRYa7x3qRBiYg/HfKsQYocQokYI8ZYQYpDxvBBC/FkIUSGEOGp8pgnGa+cLITYbYzsghPhxj/5gGk0AWgg0/YUrgLOAUcBFwPvAfwO5qO/5HQBCiFHAPOAuIA94D3hbCJEkhEgC3gBeAAYC/zHOi/HeqcCzwLeBHOAJ4C0hRHI4AxVCnA78DrgKKAL2Ai8ZL58NnGx8jgHA1UC18dozwLellBnABODTcK6r0XSGFgJNf+ERKWW5lPIA8DnwhZRyjZSyFXgdmGIcdzXwrpTyYymlE3gISAFmAzMBO/AXKaVTSvkK8KXPNW4FnpBSfiGldEkp/wG0Gu8Lh68Dz0opVxvjuw+YJYQYBjiBDGAMIKSUW6SUh4z3OYFxQohMKeURKeXqMK+r0QRFC4Gmv1Duc785yON04/4g1AocACmlGygDio3XDkj/Tox7fe4PBX5kuIVqhRC1wGDjfeEQOIYG1Kq/WEr5KfAo8BhQLoR4UgiRaRx6BXA+sFcI8ZkQYlaY19VogqKFQJNoHERN6IDyyaMm8wPAIaDYeM5kiM/9MuB/pZQDfP6lSinnHeMY0lCupgMAUsq/SimnAeNRLqK7jee/lFJeAuSjXFgvh3ldjSYoWgg0icbLwAVCiDOEEHbgRyj3zlJgGdAO3CGEsAkhLgdm+Lz3KeB2IcSJRlA3TQhxgRAiI8wx/Bu4UQgx2Ygv/BblytojhDjBOL8daARaAJcRw/i6ECLLcGnVAa5j+DtoNB60EGgSCinlNuB64BGgChVYvkhK2SalbAMuB74FHEHFE17zee9KVJzgUeP1Hcax4Y5hPvBz4FWUFXIccI3xciZKcI6g3EfVqDgGwA3AHiFEHXC78Tk0mmNG6I1pNBqNJrHRFoFGo9EkOFoINBqNJsHRQqDRaDQJjhYCjUajSXBssR5AKOTm5sphw4bFehgajUbTp1i1alWVlDKvu+P6hBAMGzaMlStXxnoYGo1G06cQQuzt/ijtGtJoNJqERwuBRqPRJDhaCDQajSbB6RMxgmA4nU72799PS0tLrIfSqzgcDkpKSrDb7bEeikaj6af0WSHYv38/GRkZDBs2DP9mkf0HKSXV1dXs37+f0tLSWA9Ho9H0U/qsa6ilpYWcnJx+KwIAQghycnL6vdWj0WhiS58VAqBfi4BJInxGjUYTW/q0EHRLy1GoPxzrUWg0Gk1c07+FoLVBCUEvtNqura3lb3/7W9jvO//886mtrY34eDQajaan9G8hsKcAEtoj72PvTAhcrq43jXrvvfcYMGBAxMej0Wg0PaXPZg2FhD1F3TqbvfcjxL333svOnTuZPHkydrud9PR0ioqKWLt2LZs3b+bSSy+lrKyMlpYW7rzzTm677TbA2y6joaGB8847j7lz57J06VKKi4t58803SUmJ7Dg1Go2mO/qFEPz67U1sPlgX/MW2BrAeBWtyWOccNyiTX140vtPXH3jgATZu3MjatWtZuHAhF1xwARs3bvSkeT777LMMHDiQ5uZmTjjhBK644gpycnL8zrF9+3bmzZvHU089xVVXXcWrr77K9dfr3Qc1Gk106RdC0CXCAm43WHv3MjNmzPDL9f/rX//K66+/DkBZWRnbt2/vIASlpaVMnjwZgGnTprFnz57eHaRGo9EEoV8IQVcrd2r3QXMtFB4PvZiKmZaW5rm/cOFCPvnkE5YtW0Zqaiqnnnpq0FqA5GSvlWK1Wmlubu618Wk0Gk1n9O9gMajYgHSByxnR02ZkZFBfXx/0taNHj5KdnU1qaipbt25l+fLlEb22RqPRRJJ+YRF0ic0IvrY3gy0pYqfNyclhzpw5TJgwgZSUFAoKCjyvnXvuufz9739n4sSJjB49mpkzZ0bsuhqNRhNphOyFHHsAIcSzwIVAhZRyQsBrPwYeBPKklFXdnWv69OkycGOaLVu2MHbs2O4H4nbB4fWQUQgZRWF8gvgh5M+q0Wg0PgghVkkpp3d3XG+6hp4Hzg18UggxGDgL2NeL1/ZisaqMIaf2v2s0Gk0wek0IpJSLgJogL/0ZuAfoHVMkGPYULQQajUbTCVENFgshLgYOSCnXhXDsbUKIlUKIlZWVlcd2YXsKuNrA3X5s59FoNJp+SNSEQAiRCvwU+EUox0spn5RSTpdSTs/Lyzu2i9tT1a22CjQajaYD0bQIjgNKgXVCiD1ACbBaCFHY61f2bTWh0Wg0Gj+ilj4qpdwA5JuPDTGYHkrW0DFjtYPFpoVAo9FogtBrFoEQYh6wDBgthNgvhLi5t64VEhEOGPe0DTXAX/7yF5qamiI2Fo1GozkWejNr6FopZZGU0i6lLJFSPhPw+rCoWAMmthTVjjpCdRNaCDQaTX+h/1cWm1isgFRCEIGeQ75tqM866yzy8/N5+eWXaW1t5bLLLuPXv/41jY2NXHXVVezfvx+Xy8XPf/5zysvLOXjwIKeddhq5ubksWLDg2D+bRqPRHAP9QwjevxcOb+j6GFcbuFohKR0IQQgKj4fzHuj0Zd821B999BGvvPIKK1asQErJxRdfzKJFi6isrGTQoEG8++67gOpBlJWVxZ/+9CcWLFhAbm5uGB9So9Foeof+33TOxLQCeqGlxkcffcRHH33ElClTmDp1Klu3bmX79u0cf/zxfPLJJ/zkJz/h888/JysrK+LX1mg0mmOlf1gEXazcPTQfgSN7IG9MxHcrk1Jy33338e1vf7vDa6tWreK9997jvvvu4+yzz+YXvwipjEKj0WiiRgJZBMZHle6InM63DfU555zDs88+S0NDAwAHDhygoqKCgwcPkpqayvXXX8+Pf/xjVq9e3eG9Go1GE2v6h0UQChEWAt821Oeddx7XXXcds2bNAiA9PZ1//etf7Nixg7vvvhuLxYLdbufxxx8H4LbbbuO8886jqKhIB4s1Gk3M6bU21JHkmNpQm7Q1QtVXMHA4OPqWr163odZoND0hHtpQxxcRtgg0Go2mv5B4QuDWQqDRaDS+9GkhCMut1Uctgr7gutNoNH2bPisEDoeD6urq0CdKjxC4em9QEUZKSXV1NQ6HI9ZD0Wg0/Zg+mzVUUlLC/v37CWvTmtoKcLSC40jvDSzCOBwOSkpKYj0MjUbTj+mzQmC32yktLQ3vTf97Bkz7Fpz7214Zk0aj0fRF+qxrqEckpYGzMdaj0Gg0mrgiwYQgFdp0+2eNRqPxJbGEwJ4GTi0EGo1G40tiCUFSqqow1mg0Go2HxBICe6q2CDQajSaAxBKCpDQdI9BoNJoAEksI7Kk6a0ij0WgC6DUhEEI8K4SoEEJs9HnuQSHEViHEeiHE60KIAb11/aDorCGNRqPpQG9aBM8D5wY89zEwQUo5EfgKuK8Xr98RnTWk0Wg0Heg1IZBSLgJqAp77SErZbjxcDkS3d4KZNaQbuWk0Go2HWMYIbgLe7+xFIcRtQoiVQoiVYfUT6gp7qmo652qLzPk0Go2mHxATIRBC/BRoB17s7Bgp5ZNSyulSyul5eXmRuXBSmrrVtQQajUbjIepN54QQ3wQuBM6Q0W62b09Vt84mYGBUL63RaDTxSlSFQAhxLvAT4BQpZfSjth6LQAeMNRqNxqQ300fnAcuA0UKI/UKIm4FHgQzgYyHEWiHE33vr+kHxWATaNaTRaDQmvWYRSCmvDfL0M711vZBIMoRAWwQajUbjIcEqi3WwWKPRaAJJLCFI0q4hjUajCSSxhMCuXUMajUYTSGIJgZk1pNtMaDQajYfEEgKPRaBdQxqNRmOSmEKgLQKNRqPxkFhCYLGALUVbBJr+z/s/gd2LYj0KTR8h6i0mYk6SbkWtSQBWPAUWG5SeHOuRaPoAiWURgN6cRtP/cbtVl932lliPRNNHSDwhsKfpOgJN/8btVLfO5tiOQ9NnSDwh0BaBpr9j7rehhUATIoknBPZUHSPQ9G9chkUQr64hKeHtu2DvsliPRGOQeEKQlKazhjT9G1ecu4ZcTlj1HOycH+uRaAwSTwi0RaDp78S7a8i0VOLVYklAEk8IdIxA098xg8Xt8SoErcat3js8Xkg8IdBZQ5r+jsc1FKcrbm0RxB2JJwTaItD0d1x9xCJwaYsgXkg8IbCnKdPZ/LFoNP2NuI8RGOPSFkHckHhCkKQ7kGr6OXHvGtIxgngj8YRAdyDV9HfiPlisYwTxRq8JgRDiWSFEhRBio89zA4UQHwshthu32b11/U4xN6fRcQJNf8V0Dbnb49MFagqAjhHEDb1pETwPnBvw3L3AfCnlSGC+8Ti62PW+xZp+jqvdez8e4wQe15C2COKFXhMCKeUioCbg6UuAfxj3/wFc2lvX75SuYgQVW+EvE6HuUHTHpNFEEt+VdjxOth7XUGtsx6HxEO0YQYGU8hCAcZvf2YFCiNuEECuFECsrKysjNwJ7F66hXQugdi9Ub4/c9TSaaOMrBHFtEWghiBfiNlgspXxSSjldSjk9Ly8vcidO6sI1dNgIZ7TWR+56Gk20cfu4huLZInBpIYgXoi0E5UKIIgDjtiLK1/fZwD6IRVBuCkFD9Maj0UQabRFowiTaQvAW8E3j/jeBN6N8fW/WUKBF4GqHyq3qfmtddMek0UQS30yhuBQCHSOIN3ozfXQesAwYLYTYL4S4GXgAOEsIsR04y3gcXTqzCGp2er+gbdoi0PRhfIUgHmsJtEUQd/Ta5vVSyms7eemM3rpmIJX1rRxpamNUQYb3SY9FECAE5Ru993WMQNOX8XMN6RiBpnviNlgcCf7yyVdc++Ry/yctVrAmd0wfPbwRLDaVVaRjBJq+jLuPWATudnC7YjsWDdDPhSAzxU5dixMppf8LSWlBLIJNkDsKUrK1RaDp2/SVGAFo91Cc0K+FICvFjtMlaXYGrDqS0jrGCMo3QsEESM6ANi0Emj5M3AuBz+Qfj+mtCUi/FwKAo80B/Vbsqf5ZQ001UHcACsZDcnrfswjcLvjsD9BcG+uRaOKBvlJZDLrfUJzQr4Ug06GEoK653f+FwM1pyjep20LDIuhrMYKKzbDgf/Vm4BqFywk2h7ofj8Fi3zHFo1AlIP1aCDq3CAJiBKYQFEyApD5oEZjmv/a3akAFi+0pKvkhHtut+8UItEUQDySmECSl+mcNlW+A1FxIL4DkzL5XR+DUOz5pfHC1gcWuXKDx+J3QMYK4o9fqCOKBzBT18eqCxggCLIKC8SBE34wR6EpNjS+udrAmqftxGSzWMYJ4I0EtAp+sIVc7VGyBwuPV4+QMJQSBKafxjHYNaXxxtYHVBnZHfK6421shySjyjMfxJSD9WggyHCFkDdXsUl/GgvHqcXIGIPvWnsbaItD44mpTFoEtJX5jBI5M477+zsYD/VoIrBZBRrKNupZgMYImlW658HfqOdMiSEpXt30pTmBaBLpkXwOqYteapALG8Zg11N4KjizvfU3M6dcxAlDVxUGzhlyt8LdZ0FAOJ9+jMoZABYtBuYcyCqM72J6iLQKNL642o11KSpy2mGiBzEHqvl68xAX9XgiyUuwdg8WmWerIgmtehOKp3teSDYugLwWMddaQxheX03ANOeLTxaktgrij3wtBZoqtY0HZpGsgNQfGXQK2ZP/Xko0gVl8SAm0RaHxxOcFqVxZBY1WsR9MRHSOIO/p1jACURdDBNZSSDROv6igC0LdjBPpHpQEjWGxXFkG8uYakVO4gbRHEFYkpBF3Rly0C7W/VgKostiYZ2XFx5i40J34zFqe/s3FBvxeCTIe9Y9ZQV/RFIdAWgcYXl9OoLI5Di8BctHgsgjgTqgSl3wtBVoqdpjYXTpc7tDf0RSHwxAj0j0qDv2sobi0CwwWrew3FBSEJgRDiTiFEplA8I4RYLYQ4u7cHFwmyUjspKusMmwOEtY/GCPSPSoN/sNjZFF9V8uZixZZixDDiTKgSlFAtgpuklHXA2UAecCOx2Hi+B3hbUYcoBEJ420z0FbRFoPHFTB+1pwAyvvr5mBaBLVltGRtPY0tgQhUCYdyeDzwnpVzn81zYCCH+SwixSQixUQgxTwjh6Om5uqPTfkNd0df2JDDNf/2j0oARLLarVTfEV+M5M2Zhcygx0IuXuCBUIVglhPgIJQQfCiEygBCd7v4IIYqBO4DpUsoJgBW4pifnCoXMHgtBXS+NqBdo1wVlGh88baiN9VU8fS9Mi8BuCoFevMQDoRaU3QxMBnZJKZuEEANR7qFjuW6KEMIJpAIHj+FcXZJltqJuae/mSB+S0qMfI2iqgSN7/KucQ8WpC8o0Pngqi02LII4az3liBNoiiCdCtQhmAduklLVCiOuBnwFHe3JBKeUB4CFgH3AIOCql/Kgn5wqFnlsEUY4RLH0E/nlpz97brtNHNT64nN421BBfmUOeGIFDxwjiiFCF4HGgSQgxCbgH2Av8sycXFEJkA5cApcAgIM0Ql8DjbhNCrBRCrKysrOzJpYAeBIvB2JwmyhZBYwW0HlUb0YeLtgg0vphtqO2p6nE81RJ4LIJkbRHEEaEKQbuUUqIm8IellA8DGT285pnAbillpZTSCbwGzA48SEr5pJRyupRyel5eXg8vBQ67lWSbJUwhiIFF0GLEJHrSJKxdt6HWGLhdgPQ2nYP4tQhsyXrxEieEKgT1Qoj7gBuAd4UQVsDew2vuA2YKIVKFEAI4A9jSw3OFRNhtJpIyoh8jaD0GIXD6pI/GU864JvqYrhazDTXEWdZQoEWghSAeCFUIrgZaUfUEh4Fi4MGeXFBK+QXwCrAa2GCM4cmenCtUetRvKNrbVfbUIpBSWQTCqh5rn2tiY/7/+1oEcekaMmMEWgjigZCEwJj8XwSyhBAXAi1Syh7FCIzz/VJKOUZKOUFKeYOUsle/DZkp4fYbSifq21WarqhwLRFzRaW7OWpA7cENRmWxESOIS9eQtgjiiVBbTFwFrAC+BlwFfCGEuLI3BxZJ+kQHUtM1FG6qn7naSxlgPNY/rITGYxH41hHEqUWghSBuCLWO4KfACVLKCgAhRB7wCcrFE/dkpdjZXhHGpJ5kCEE04wQ9dQ05A7o5alM7sXEbCx6/OoJ4EgLj+2lN0kIQR4QaI7CYImBQHcZ7Y06mI8guZV3hsQiiVF3scnpXbWG7hoz3ObRFoEF9l8C/sjiuhKDFaOwodIwgjgjVIvhACPEhMM94fDXwXu8MKfJkGTECt1tisYTQIsmzb3GULIIWH8E5VotA52UnNr6uIdMiiKfvRHurd2dAm0MvXOKEkIRASnm3EOIKYA6q2dyTUsrXe3VkESQzxY6UUN/a7mlC1yXRjhG0+hRpt+kYgeYYcPm4hiwWteqOR4sAwJakv69xQsib10spXwVe7cWx9Bpmm4m6ZmdoQhDtfYv9LIIwr+mxCLQQaPARAuN7bo+znv+BFoHbCW63Ei1NzOhSCIQQ9UCwZHoBSCllZq+MKsL4tqIeHMobzP1Uo2YRHINryBMj0MFiDT7BYkMIbCnx13TOtAisSerW1QqWlNiNSdO1EEgpe9pGIq7ISgmz35AnRhAlIYhEjEC7hjTgU1nsYxHEUx2B09c15NMm266FIJYkhD3maTwXalGZzaFK9GNiEYSbNaSDxRoffCuLQRWVxVsdgW+MAPSeBHFAQghB2PsWCxHdPQlMiyAlO3wz3hmYPqp/VAmNb2UxxN8G9oExAtCLlzggMYSgR3sSZEbfIsgY1IMYQaBrSP+oEhrf9FEwNrCPU4vAagiC7o8VcxJCCNKSrFgtIsyisvQoxgiOqqBeyoAexAgCLAIdLE5sfCuLwcjVjych8LUIjFu9eIk5CSEEQggyHbb43aWstQ4cmZCUdgwxAh0s1uBTWWzkgdhT4sw15BsjMIVAWwSxJiGEAHqyJ0GUYwTJmSqw1xOLQFiViIBeXSU6HYLFKXFoEQQKgf7OxpqEEoLwWlHHwiJI70FlsZF6p1dXGvCvLAYjWBxPQtDi/a56YgTaio01CSMEmWG3oo7ivsWmRdAT15Cz2aeJV1L8rK5qdsOjM6D+cKxHklh0qCyOt2BxMItAC0Gs0ULQGdHOGvLECHqQNWQW49gc8ZOBcXgDVG2Dil7dhVQTSIfK4nhrMdESJFishSDWJIwQZKXYw8saMmMEbnfvDcqktd5rEbid4bl3TIsA4ssiaDEa6UVzcx9NkMriVPWdiMb3uDvcLvX97lBZrIUg1iSUEBxtbsPlDnEf4uQMQIIzCttVttSpymAz4BvONdtbvH3ne7Ot7+7PYdFDoR+vhSA2BGs6B/GxQPDdphL8ew1pYkrCCMG4okycLsnasiOhvSFaexK42tXEb1oEEJ57yNns7Tvfmzs+bXgZFv859OPNIrlobe7T32iqgU/vV6vocHA5VRaZxaoex9OeBL7bVPreaosg5iSMEJw8Kg+rRfDp1oruD4bodSA1J0pHD4XAzyJI7r0ffGuDGleoLgZtERwb2z+GRQ9Cxebw3udq81oDEF+7lAVaBJ5eQ1oIYk1MhEAIMUAI8YoQYqsQYosQYlZvXzMrxc60odl8urUytDd49iSIkhAkZ/ZsH4RAi6C3gsVtDYAMPSfd7J/UcrTr4zTBMb8DLWFaVC6n1+UCKkYAcW4RxMHYEpxYWQQPAx9IKccAk4CopJacPiafLYfqOHQ0hMnM3KUs3B9iuLT4WATmj7anFoG1ly0CCL3OQVsEx4bZfDBc15rb6a0qBu9kGw97EpjfTbtPcgPET6ZbAhN1IRBCZAInA88ASCnbpJS10bj26WPyAVgQilWQO1JNrOte6t1B+VkEpmsojB9te0t0YgSmZRSqteIRAh0j6BHmdyBsi6AtwCIwvhvx0GYi0CIwN7DXFkHMiYVFMByoBJ4TQqwRQjwthEgLPEgIcZsQYqUQYmVlZYjunG4YmZ9OSXZKaHGC9HyY9V1Y/xIcWB2R6wfF1yLokWsoSllDHosgRGulVVsEx4SZORaukLra/YXA436JwxiBeV9Xw8ecWAiBDZgKPC6lnAI0AvcGHiSlfFJKOV1KOT0vLy8iFxZCcPqYfJbsqKLFGUI2xtwfQmoufPQzkCGmnYZLUIsgTNeQxyLoxc3A28IUAtMi6G3XWn/F/Du3hGksu9rA6uMaimeLAHo3wUETMrEQgv3AfinlF8bjV1DCEBVOG5NPs9PFF7truj/YkQmn3Qd7l8DWd3tnQC3HKATOZn+LoLdysj0WQaiuITN9VFsEPSLSrqF4tQisvZjgoAmZqAuBlPIwUCaEGG08dQYQZo5cz5k1PAeH3cKCUNNIp34LckfDx7/oHRPWdKH4po+GWlDmqdQ0fuzWXooRuNq9E0koIiWlriM4VnrqGnK3e6uKwSdYHA9CoC2CeCVWWUM/AF4UQqwHJgO/jdaFHXYrc47L5dOtFchQ3D1WG5z1a6jZCdvei/yAWurUBG5LVvnf1qTOJ9vmWpj/P97qUfPH3dt1BL5WQChC0NYA0g0IbRH0lGOyCHzrCEzXUDwIgWkRBAqBriOINTERAinlWsP/P1FKeamUMsRy38hwxtgC9tU0selgiD+yITPV7dH9kR+M2XDOpKvGc199CJ8/BAfXqMeeFZZP07nesFr8hCAE15AZH8goVEIQD31u+ho9TR8NrCOIp1x9z/c1MFishSDWJExlsS8XHF9EktXCK6tCnNgdA1RudlNV5AdjtqA2SUrvXAjqD6nbBsOt1cEi6KWmc61hWgSmEGSVADJ6G/z0JzwFZWEW5LmcARaBUZsSrxaBTh+NCxJSCLJS7Zw1voA31x6grT2E1aoQkJoDjb0gBIEWgT2184mzoVzdNhpCEMwicDsjvwIP1zVkujOyStStdg+FT6RcQ7ZkQMSJEHRiEehgccxJSCEAuHJaCUeanCzYFmLQODUXmqojP5AOFkFa5wVlphB0ZhH0VjdH34k8bIsAHTDuCcdUWewjBELEzwb2OlgctySsEJw0Ipe8jOTQ3UNpudGxCLqKEdQHCEEwi8D3+UgRbozAnLyyBhuPE9QiaKqBhh4WQ3rqCHoSI7D7P2d3xEkdQSsIS0ALDF1QFg8krBDYrBYun1LMgq0VVDWEsIJOy+2dGEFrPSRneR+bG+IEo8HY9tG0DIJlDUHkf1hmjEBYQ+tZE2gRJGpR2Tt3wSs39uy95t/Z2ajSd0MlMFgMaq+L5qjmYwSnvcW7raqJjhHEBQkrBABXTCuh3S15c+3B7g9O7SWLoKUHFkGjscrsYBEk+z8fKUxhSs8P0TVkVMMmumuo7iDU7g3/fS6n8pun5qjH4fz9AmMEAJklUHcg/HFEmvZW//gAxNf2qglMQgvBqIIMJpZkheYeSstVP8hIprq5XaqZm1+MIDX4ZNvW6G381qlFYNxG+odlunbS80NMH61TY0nNNd6foELQWg+NPYgrmf//GUXqNpzMoWCuoayS3kl9DhfTIvCltzLdNGGR0EIAcNX0wWw5VMeNz61gd1UXq11zdRbJgLE5wToC0keDuV/qDbdQykCv3zkw+Ga6BHrDIhBW9TcINVjsyPK28k7UGEFLnXLthJuxY/7/m0IQjpAGBotBCUHdwfBcTL1Be2sQIeil2hdNWCS8EFw7Ywg/u2AsX+45wjl/XsSDH26l3RUk/TLNWN1G0j3k23DOJCnNqMwNqHo2rYCiiWpyaW3wsQgCg8W9kDWUnN51jYM2B01/AAAgAElEQVTf8eYezOmASNwYgfn/2xRCXytfzKyxTNMiCNc1FBAjyCoB6fLGmGKFs7mjEFi1RRAPJLwQWC2CW04azqc/PoULJxbx2IKdPPjhto4Hmm6OSAaMfVtQmySlqfYMgT8O0yIonKhuGyuC7Phkxgg6EYLXb4fNb4U/ztYGSMoIXQhajipxs1iUVZCIFoHb5XWjhfudcQa4hsKKEbQHcQ0Z2Vuxdg91FiPojdoXTVjYuj8kMcjPcPCnqyeTlmzjiUW7mDo0m3PGF3oPSDNaYffE59sZQS0Cc0+CRu9KH7wWgSkEDRVBLIIugsWudrXJji0Zxl0c3jjbTIsgLfQWE44B6n6iCoHvZw7XnWhaBBnG9y9siyCIawjiQAg6iRGAqn2xpHR8jyYqJLxFEMjPLhzLpJIsfvzyOvb4xgzSomQRdLZdZf1hZUbnjVKPG7qwCIIFi5uqANmzNMLWBiVQXWU0+eKbCZWc6e2wmkj4ruLDXTz0NFgspVpdd3ANFavbmAtBJxaB+ZomZmghCCDZZuWxr0/FahXc/q9V3g1sHANUwLRXYgS+dQSd7EnQUA7pBZBurBIbK7w+VzMv29qFRWAWofVECNoa1Mo+KV2JTHfBPTNYDNoigPAtAo9ryPi/DtU15DaCwYHB4uQM9f2NuRAEsQg8CQ5aCGKJFoIglGSn8tCVk9h6uJ63zBoDiwVSB3pz+CNBi89eBCa+riFf6g8rIUjNAYTXIggs14fgPypz3D21CEzXEHS/X0KrT9sMR2ZiBot9P3O4VqTpGnJkKQsxVIvAtAQDXUOg4gQxF4IuLILe2lBJExJaCDrhjLH5lGSn8P7GQ94nI91vqLOsIejoi28oVytEq025qcwYgW8coSsz2yMEYW59aI4lKSO0HdTaW5VAJbxF4CsEPbQI7GmGay1EITX3qQh0DUF81BIEjRF0k+CgiQpaCDpBCMG54wtZvKOKuhbjBxbpfkMtdepHa/f5cSR1ESNILzDGkR++RXAsrqHWen+LoCsh8MQ9TCEIYyLrDVzO2EyApvhZbOF/Z0yLICk1PIvKIwTBLIISOFoW3jgiTVCLQAtBPKCFoAvOO74Qp0t6t7VMzYlcsPjIXti/0lt0ZWK6hnyLytrboLnG6zNOz/fGCPwsgi5iBGbr6raG8Ap4pLGfQFJ6524rXzzurjixCFb/Ex6dEf02zObfYcDQHtQRmBZBqhLSiLiGSlTrj1j+XwSNEWghiAe0EHTBlMHZFGQm8/4GI4c/EhZB+Wb499Xw8CTYtxQmXOn/ejDXkJk6aloE6fnqucAflvmjCuZv9e2C2RKGe6i9VQUhk9M7t1Z8CRQCR5YSNXO1Gm1q9ypXS7iT8bFiWkEDS3tWR2BzgMWqLIKQg8XG3zgwWAw+KaQx7DnUlUWgYwQxRQtBF1gsgnPGF7Lwqwqa2tpVLUFL7bFNau/8F+xbBif/GO7aAOf/wf/1YO4XUwh8LYKGStVa2NcisNoB0UmMwGffhXDcQ6YghRojMFNFzbhHrNtMmAIQ7e6bLXUqyyyrpGd1BObf2pHVA9dQsBhBHBSVdRkj0NXFsUQLQTecO76QFqebRV9V+vQb6n51WdPYxtHmAMFobYADK2HajXD6z7yrNF+C1RGYVcW+MYL2ZhUA9v1heTYh6cQiMI8NZ1I0J/BkX9dQF0VlHVxDmf7n8UVKWPIwHNkT+njCxfys4VhBkaC1Xq3mU3PV9yWcyllnkwoUQw+DxV1ZBDGKE7jaVZuLToVA9xuKJTETAiGEVQixRgjxTqzGEAozSgeSnWrn/Y2HOxaVSQmv3grbP/F7j9stufxvS5j7+095+vNd3u0wy75QbpbSkzq/oMWq2kr7WQSGEHgsAkMQavf5WwRgdHPsxCLIGanu98giCDdYHGgRBJnMGsrh41/AF0+GPp5wMT9rtC0CM4U2NUdNgOEIUVuj1w0XVrC4ixhBRqGyUKJpEbhdsPjP8PkfYdVz6rlA11BXtS+aqBFLi+BOYEsMrx8SNquFs8cVMn9LBbub1KT7lzeX8uWeGuV/3vCy90tusGJPDXuqm8jPSOb+d7dw9p8/Y+nOKtjzucoiGTyz64sGVvDWl6udncw2F+nGbXuQJl42R8cfldutYhtmVXJYFoEhBCFnDQXGCLqwCEx/ddny0McTLh7XUJQtAnMLUs/iIQz3kLPJaxkmZ6n/51BWzF25hixWyCyOrhDsWwaf/Arm/wbe+7F6zvwOm/RW63RNWMRECIQQJcAFwNOxuH64nDuhkIbWdm57TW0ysm//Pm7750qqthkT2N4lfqb/66sP8FDy03wwbRXP3XgCQghu+cdKWrYvhOJpalLtikAhaDisfkAWq3psWgTgn3oKaoUV+KNqrlGr0tzRxuMexgjsIQqBsHjdSKZFEGxVa26Wcmhd5/s0Hyu9ZRHsXAC1XbhZPK6hgepxOELQ1ugTIzCFNASrwN2FawiiX0uwz/h9/HCLiofdvgQmXuV/jK2XWqdrwiJWFsFfgHuATh2nQojbhBArhRArKysjWM3bA04amctdZ47kexfMAOCnp+ThcksWLPhQHdB8BCqVcdPidLF0wzYuFwuwL/sLp5WmMe/WmeTY2rCXr6Nt8OzuLxi4XWV9uf/kn5bvvW8LdA0F2frPLCbLOU5N0j2NEdiS1GqzqxiB6RIx216Y7TOCWQR1RtW2ux0Org59TKEipRJBiGyMwO2Gedeq+EZntBodWM2uteFkm7U1ei0C07IKJYXUXAAEyxqC6NcS7FsOeWMhcxAMGAKFE7yLGRPdayguiLoQCCEuBCqklKu6Ok5K+aSUcrqUcnpeXl5Xh/Y6NquFu84cxaWzJwKCHFHPI9dNpaR5K0dsxg99z2IAPtlSzjTnaixINSlufJXCLAd/P6UNK26e3FeMDNxrIJDAXcoaDnvjA6BWmcL4QQVaBME2+jCLydILVM+ZnsYIoPvGc759hsAnRhBkIqs74N3IfF8vuIecTd7JMZIWQcNh5a6pP9T5MS116rP3ZEMjp0/WUHIYFkFXriHwblDjdoU+lp7idkHZChjSjRtU9xqKC2JhEcwBLhZC7AFeAk4XQvwrBuMIH4tVTcJNVZwyIodp9r280zKZo8lFyv8PvLHmABc41iHT8iF/HKx8FoDxretwCRuP7sjhheXd7GOblOZfUBZoEVisXt9zB4sgyEYfpkWQng8p2T2PEUD3exIE7sHcVYyg7qBaKeaOUoH0SOOb3RXJGIGZ5WSm9QbD4xrqgRC0NfkHiyG0gLFHCDrpLp9VotxHDRXBX48kFVuU+HcnBLrXUFwQdSGQUt4npSyRUg4DrgE+lVJeH+1x9BhzE/vq7SS5GnEMnc7HTSNp3vE51fXNfL7tMHPFesTIs1Wa6ME16t+ez7EMPoHZYwZz/ztb2HLI/4e99XAdL68sU9aC72TrdqmMH1+LALzuoaAWQcCPyvzhp+UFF4L2ts7TG31jBND9ngS+exGY47HYOo8RZBbD4BPV6jHSm5P4fs5IWgRHDCHvTAik9LrIklKVmycsi6DRP30UQrQIzKyhziyCKNYS7FumbrVF0CfQdQThkmY0nju4BoDLL7qYtuJZpDhr+e3zbzBJbiPFVQ+jzoZJV6tJYMnDcGgdYthJPHjlRDJT7Nz50hpPi+sdFfVc8+Ry7nllPc8t2eM/2TZWqR3LfC0CUKt76GgRWJM6rq4aK9RknJINKUFcQ/93PTw6Hco3dfy8rfVqMjdXmYHWSiDm7mQmQnTeZqLugPIfD5mpfPhVX3V+3p5gxgfsaZGNEdQaQlBf3nFLUVDtLNztXrdYak4PgsWBFkEIMQKzDXVXriGITpxg33K1n8KAoV0fZ7EY21VqIYglMRUCKeVCKeWFsRxD2KTmqMn5wGqwp2HNH8NVV16rXjq8nK9lblLBuuGnKV/5hMth0+tqMi89iZz0ZB762kS+Km/ggfe3cuhoM994ZgU2i4WTR+Vx/7ubOdBk8VoEgTUEJuldWQQBrqGGSmUNCGFYBAGT4sHVULMTnj4T1v/H/zWzz5BJdzECc79iX4IVRbndUHdICYGZThvpNFJT8AaWRtgi2KNuXa3BJ2hT9MxJ3PzOhEJ7m5rQAy2CkFxDZrC4M9dQFDeo2bdcCbyZNNAVwb6zmqiiLYJwSctVBWUHV0PRJLBYseUMQ2aWcF3+Ps53rIehs72TwPSb1K01GUpU1tGpo/O5cc4wnl+6hysfX0ZdSzvP33gCj399KqMLM/l4ez3uVmOy9VQVdyIEthTqW5x878XV7K1uNLKGAoLFjRXe/O1A15DTqFA+4VYomgyv3QKLHvK+bu5FYGIPxTUUTAgCLIKmKuWvzixW2UypubAvwnECM0YwsDTCMQKfGE8wf3vghkPhWARmC+oeBYu7cQ05stT5elsIasugbj8MmRXa8cGsVE1U0UIQLmbLgMMboHiqek4IxLC5jG34grSjO2DUOd7jB02F4ulQerLf6v0n545hdEEGlfWtPHnDNCYUZ5GWbOPpb06nzZKKpb2JpodnwEvXqTeYqzkTnxjBJ1vKeXfDIV76six4+mhDhVc4UrLVZG1mjpgpnMXT4JtvwbCTVMdOE3MvApOuLAK32xsk9cURRAjMySizWK0ah8zsBYvAFILhxmeOUAziyB7IGqLumxabL+bq3XQNmYuHUPBtQQ3KJZeUHmawuJP0UYhOCqkZ+O8uPmCSXtB14F3T62ghCJe0XECqyXbQFO/zw+Z6V3MjfYRACPjGG/C15/1O47BbmXfbTN69Yy6zR+R6ni8ekMKZZ1/APlHEsqpUPsu9lqavzVMuFF/MmIEthflb1Kp0/pZyQwgCYwSVXuFIyVbjN10a5qSQVaImkKFzVOsKs22zuReBSVdC0Fqnzt3BIsjo6EIxBcj8XINPhJpdkc1oaa5VMZr0QjWuSOyd7GxRaaODT1CPu7IIfF1DoXY/NeMvpmsIQt/3OSQhiMJOZfuWqcVD/vjQjk8viE4mk6ZTtBCES5p30vZYBADD5qjbgcdB7gj/9yRnBK0mHpiWxMiCjA7PD591KTn3bmTJjL9xY9l5nPZmEv9ZWYbb7ROYNAJ/7cmZLPqqktQkK1+VN1DvtPoHi6VUQpDu4xoCryluTgpmIDFvFCCheod63CFGEJA+umM+PDYTqncG33HNfBxoEXiEwLB0zNVjJNNIm2ogZaByPUBk3A9HywCphAu8rjtfAv8OqTnq7+gMwQ/uydJK9T7nCHFPAnc3dQQQnerifcuVUHaWxhqI2VZdEzO0EISLWSnqGADZpd7ns0uVhTDp2ohcJi3Zxi8uGsdr351DYVYKd7+yngsfWax6FoGaOG94g5WuUdS1tHPnGaqh3J6jLn+LoOWo8h37WQR4fea1ZYDwTshmG4rKbeo2MEZgWgSmm2X3IlVV/eKVSgwguEUQ6OOuO6AmLDPPvmiSiqNEsrCs+YiRKRXwmQOob3Hy67c3UdsUQr8bMz5QOFGNN9gEFugaCqeWwHQN2X2EIDnExnPdVRaDEoLmmq4D/uHicsLWd2HtPFj5nMo+666fli/pBepvE6s9KzRaCMLGtAgGTfHPiBACblsIp9wd0ctNHjyA178zm4evmczRZifXPfUFX+yqVtc77jQ+3VaJ3Sr4+syhjMhPZ+eRNn8h8C0mg+AWQUaht+dLzghAQNVXvLB8L0eP1tBu83FTJKUBkj+9v07VPNTsUuesO0j7K7eoS1p8jofgMYK6g8jMQdzywmoWb69SLq2SEzwV2hGhuQZSs711DZ1YBB9sPMxzS/bw4hf7uj9n7R51mz2sc5dGoGsosGttV5iuIV8rLNTNaUJ1DUHkNqhxu+C121Qs643b4Z27AAnHnR76OczvZmNsW8kkMloIwsXMvvF1C/UyFovgksnFfPzDkynMdPD7D7Z62lR8urWCmcNzSE+2ccbYfHYfcakGcy4jp9y3mAyCCEGZ/74IdgdkD6Xl0Bbuf2czVmcj83c10dqugsubqtTtvz/fwqLtVXBktwqGX/E0lma14v1wR0CdQXKGWq36ukbqDtCQnM8nW8q597X1qqZi2Bw4vD70rRm7o/mI4RoyPnMntQSfb1cTtKegryuO7FHpjukFkFEQPFjs6c/k4xqCEC0CM2vI1zUU4uY0LqdKHe0qZdNTSxCC6HWHlKqr6KbX1P4ad6xRzeXu2e2NoYSCGe/S7qGYoYUgXNLz4cI/w4zbon7p1CQbd545ktX7avlkSwV7qxvZUdHAaaPViuqssQU0S8Mva2YOmTuTdWURBG6Qkzua2n2bkNJFumhh6xHJd/61mv+sLOO5lWrVNjjdzdOLdkLNHhg4nINFZ/Kb9m9SJ1N5an07TpdPhk6wzWnqDlAu1QS5/0gzzy7ZrQLV0h25NNKmGm8RHQR1DbndkiU7qhiYlsTe6ia+2N1NUPfIXtUWw2Lp3CJoqVPBXrPBmqfxXAhC4OzENRRq+mhX8QHwEYIIxAk+/R/VQmXOXXDy3So7a8AQb8fVUPEIgQ4YxwotBD1h+k0dC7yixNemlTA8N42HPtzGx5vVCuqMsWqSnzIkG1uSf3/3wwdVVtCSQ4KF2yposBguh+YjakUXRAhq00vJbt7HjVPVRH3y+FI+3VrB3a+spyhPPXf1pIFs3rEL2uphYCn/WLaHf7rO5o1zlrClLon3Nvg0ZAvMhZcS6g6yszWTIQNTOWtcAY99uoOKrOOVf3tvBNxDUqrPmDqwS9fQ5kN1VDe28aOzR5GebOPllWUqkN2ZZXBkj3ILgRLXoMHio/4ptIEWgau987qGtoA6AggjWNzetVsIVLWvsBy7EGz/RG04M/WbcOavju1c5iJFWwQxQwtBH8NmtfDDs0exrbyeh+dv57i8NIbmqEnDahEMK1Srsa37K7np+S95+bNVuKTghpd28q3nvuTKJ1bgTspQk2JjFbhaWXkkjX8t3+tpefHuoUyShZPvjFNiMmXkYB762iSunTGY758zGYALRmcwyq5cKi0ZQ/n3F/s4b0IR188sZXhuGs8s3u11swT21G+qBlcb646mMX1YNj89fyxtLjcPflqm6hn2LDn2P1RrnXKRpWQrd5ctJahryHQLnTW2gIsmDaJhw3vwp7Gw4H+Dn7d2r7dtQnqhikMEFvC11vtnTqUMUJNvU5Wxq93N8NiJwYOjphAEWgSBrrVguNqCBor/8MFW3jeF2WqDjEHHLgT7vwQEnPeH0KqHuyJNC0Gs0ULQBzl/QhETijOpb2nn9DH5fq+NKVZuiFufW8LKPTWcOVjgThnIK9+dy8PXTGZ3VSMV7am0NVTTUr0HgL+vc/KzNzZy6oML+eNH23h1r5qEBhxZr06alMGV00r43eUTSU5VE1yGpY0rStVENm+7lfqWdm6aW4rFIrhpbinr9x9lhelmCdycxtiQZkdrFicMG8iw3DRumlPKK6v3U54zXfVxOtbN7s28/RTDTdFJ19XFOyoZU5hBfqaDaycP5BeWZ3ALq6qu3jHf/+DmI2pl7msRQMcgp9mC2sRiVddvqoY1/4LNb6jYwu5FHccdzDVkZmF15x4K4hoqq2nibwt38vM3N9LYasSNBkSglqB6h3IDBbY46Ql2h/qM2jUUM7QQ9EEsFsF9543FZhGcf3yR32ujilVQ+IbphXx+z+mMy2zFnlnI1CHZXDK5mCe/MZ0qVyprv9rNg/+nJrqzZ03nhZtnMGiAg0c+3UGlw1jxHjC2jAhMHwVoa+SMggbcUvC75c1MHjyAaUNV/OGKqSVkp9p5evFudayZqWI06jMzVg7JHKYb7/ne6SPIS0/md5uNPX6PtZ7AnPTNmEjKgA7umOY2F1/uPsJJI5V4Hv/VY5SIKn6T+WvIG6OyYep8XFxm6mi28fcx3YOBAePWuo7V1am5qj/V+z+BoXNVVtDmNzuO29yUxuLz0/RsTtOdELR3yN1/a52q16hqaOMfy/aoJyNRXVy9w8gwixC6ujimaCHoo8wZkcv6X53NlCHZfs87UtRK8rZZg8hKtRvtJbwb+5wyKo9BhUXYWmtJaVaTxFVnzOSkkXm8+p3Z/PuWE3nkptOVuX7A2DXMr6DMWKm2NZLdeoAjtjzasHPzXG9NRUqSletnDuWTLeXsqKhXE+eQ2ap1hZQei6DJkc9xeercmQ47T31jOp81l9KOlfZdxxgnMNpLNNkyWbCtAunI6iAEK/bU0OZyM3dkHhxci/jicbYMuoLny4ezfvZf1er81Vu8GVhms7lAiyBwJRvoGgIVJzi0VvnwL38SRp0LW9/xntvEd79iE/NcR3Z3/ZmDWARvrzvItKHZnDY6jyc+20Vdi9MQggM9b7khpaoZibgQaIsgVmgh6MOkJgWp3LQmq1uzuMi3vYTBwNwCJgx0850pySq7xVg1CyGYPSKXyYMHQN5ob7vl5IDKYlAVsDW7SSkYwU1zSjl3gn/w/Fuzh5GWZOOB97eqJ6Z9U3U43bNY1RxgZfjQYVgsXv/ypMED+N3Vs1jvLmXv6o/8K6m7Yd6Kffz4P+u8cQlj0n96VS03PvclayoF7QFtHj7/qpIkm4UZQzLh7TshNZeiKx5gUJaDK16pYtHI+1TgetEf1BvMv4cnRqCyXZ55fxmPfrrde+JA1xBAmhEwvvivqm/UuEuUq2hvQDzEd1Mak8EzlFX16i1eKy0YbqefEGw7XM/Ww/VcPGkQPzxrNEebnTy7eLfPBjXBV+A7KupVbUdnNFSoJIGc4zo/Jly6qy4u39z1Z9ccE1oI+hs2Qwg86aOV3pWrSUo2SW1HSWs+pCaFYMG+3FHe+4FN50C5MGp2kVo4kl9cNA671f+rlJOezPdOG8EnWypYuqNKTXyOLFj9D1pryjgss5lWmksg504oxFo6l8HNW3nik40hfeTWdhcPfbiNV1btZ/U+Y9VvTPqvbm5iTGEGuxvtVFeWs3BbhUcsFu+o4oRh2aTs/1yt1s++nwE5+bx7x0mcMiqPb6w+jiVpZyM/+4Py5x/Zq2IOhttHGrUZRysP8PjCnTSYPvhgrbin3wzn/A7GXUJ1Qytb0k9UIhzoHmpr8O8zBCrz6VvvqnP+89LO02vNOgKDt9YdwGq4D48vyeKc8QU88/luGhyGaAeJEyzeXsUljy7hm8+tYGdlJ11ma4wK8ogKQYFql94Zb9+pXHWaXkELQX/DIwRtsOEV5WrIG+1/jBk4rS3rWENg4vseX4vAlgIIlTbZVKVaPHfCjXOGUTwghfvf3YLL6oCJV8Pmt2g7sJ6DMocThmUHfd/EOReQJFx88fn7HDra7Pea2y1xBVgK76w7RHVjGzaL4LklhvvEiBHsb3Vw/6UTOHXyKDJp5FvPfcmJv53PnS+tYevhek4amaeylCw2GKu2xshOS+Kpb0znVxeN43u117FPDML5n5tVjMOMDwAvrjxMjUxnak4bjW0u3lx7QLl6nE0dXUPHnQazvktZTROXPLaEi/6+itqS02DL2/57CDuDWASgrnvj+6ow8IXLvO08fPFxDUkpeWvdQeaMyCUvQ30n/uusUTS0tfPiFsMlFBAneGvdQW58fgUl2amk2K1eay4Qsw9VRF1D+crKCNb6wtmihLp6R2iFdZqw0ULQ3zCFoGYnvPNDtQfCpOv8j3EMUAHZqq9UBkkw/CwCHyGwWJRVUG6s1rM7FwKH3cq9541h86E6Xl29X+Wcu1rJOLqNCnKYUJwV9H1iyEyksHCBWMofP9zmeb6prZ3LH1/KdU8tp90oWJNS8vzSPYzIT+ebs4fx/sbDSjyaa2gUqRxXkMW0odnk5BSQQgsPXTaWGaUD+Xx7FRYBZ4zJV90yiyb55e4LIfjWnFKe+/ap/Lf1v3A3HlF7UBhuobVltfzm7c00JuVySpGLsUWZ/Gv5PmRrQJ8hH3ZXNXLVE8uob2lnYFoSDx8apwr+zG0dwXANpfFVeT03PPMFv3prE2+uPcCB2mblUvr6f1SX2x2fdPzDuZyeOoI1ZbWU1TRz8SRv19oxhZlcO2MIj642WpD4WATPLt7NHfPWMGVINi/fPovvnHocH28uZ/muIEVw1TuU4GR1/O6U17Xw3RdXccljSzjtoYXM/O18fvLKejYe6KYOoquiskPrvK7Ow+u7Po+mR2gh6G+Ym4HP/42a7C9/omMXSDOTxtnUuUXgEQLhX9wE6vHhDer+wOFdDufCiUVMGTKAhz7cxpq2YtyDpgHgTi8i2WYN/iZHJmLajXzNsoATN/yczWWVuN2SH728jrVltXyxu4ZHF6hV6ep9R9hw4Cjfmj2Mb80ehpSSF5btpba6nGpXGtfOGIIQwlNdfOX4dB69biorf3omK392FiMH2pXvuZNNVKYMyeZPd9zA8xmqj9IzmyWjfvY+lz62hLyMZAoHDUU0VnD9zCFsOVTHxl37PZ/Bl/X7a7n6iWW0truZd+tM/nTVZP6vdgxtItnfPWTsV/zQh9v4YncN//dlGXe+tJZT/rBATcoDh6sMpGAToo8QvLX2IEk2C+eM99/i9JcXjWPkkEHUyVSqD+7E5Zb86q1N/OadzZwzvoB/3jSDrBQ7N80ppSjLwW/f29IxVlO9U43D4v//53S5+d6Lq1mwtZKsFDsTirOYPiybt9Yd5MJHFnPF40tZW+YfsK9rcfKjl9fx2UHDPRlMCPav8N4/tK7j65pjRgtBf8MMFrY1wLm/Cz5Rp/i4ZIKs6gC1T0BShrIGAmMIST57AHfhGgK1sv7FheOobXZy2d+W8sv9SgjS8oZ0/Tku+CMtc+7ma9ZFuP91JY9/sIr3Nx7mZxeM5bIpxTzy6Q7W7DvCc0v2kOGwcfnUYgYbVcrzVuzj0OGD1IkMLp9S4v+ZDZeRxSIYmJakVvmuNrWrXCcUZDr45h338+mI+6gffwM3zSnlv84cxb9vPRF7ViHUl3PJ5GLSkqx8uNrYdzk5k8bWduat2Mcljy3h4keX4KXOZoEAABivSURBVJbw0m0zGTcok7kjc7lmzljmt0+kdcMb3gyetibq3El8tLmc2085jg2/Opt375hLXkYyD364DQlQNBEOb6DFqWIj+6qN2gMjWNzY2s7b6w5yxph8Mhz+BWbJNit/v2EaFZY8Nm/ZxLdfWMXzS/dw05xS/vb1aTjsanJPSbLy47NHs37/Ud5ef9D/D9JJ6uiDH25j5d4jPHDF8fzzphk8cu0UHr1uKsv/+wx+fuE4DtY2c9UTy3httRLLA7XNXPn4Ul5dvZ8HPjfSfYMFjMu+UJlaGUVaCHqJEBuGa/oMpktizIUw5Ybgx/gJQScWgRCQO1JtwhKIaSGk5QV1gQQyZUg2S35yOit217B6Rz6LvtrO0BMv7fpNQuA462d8XpvJiRt/xYAvriZ51D3cPPd86lvbWbG7hh/MW8Ohoy3cNGeYJ4PqxjmlfLipnNb2KlIyclQKLXTeb2jvUnXbzbaKjiQbp19/Lx16ahr57+lJVi6bWszqlR+DDT7e1cS9ry6gurGNUQXp/PzCcVw+pZjsNG9Wzz3njubRzbM4r/lLKnevI++4KeBsYnNlO6lJVm6cPQyb1cL4QVl8//QR/PT1jSz8qpLTCo+H5Y/zx/c38dTSMpbvqublb8/CYlQWP7ZgB9WNbdxyUnCRzs9w4Cg5jrZ9u5i/tZxfXjSOG+d0PPayKcU8s3g3f/hgGxdOHITVIlQ8o2aX/y58wIebDvPkol1cP3MIl0z2300vK0WlF182pZjvvriKH768ji/31PDJlgpanC6ev/EE3l2aBHth8drNzB13sffNUkLZCig9RaXlHlzb5f9TOCzdWcXTn+/mVxeNZ0hOkLhMAhF1i0AIMVgIsUAIsUUIsUkIcWe0x9CvScuF6/4Dlz7eeel/KEIAMPwUKJjQ8XkzZtBFfCCQvIxkLphYxM8vP4GT732DEeMmh/S+mZd/n3syHgB7Krfsuwfx2q1kuo7yp6smcaC2GbeUfGPWMM/xJ5YOZGxRJlk0kJfvk9LqCGi2Z7JvGeSNDb9Rmkl6gWcT++tnDiXFrTJtHllSwciCdF65fRYf3nUyN88t9RMBUDGUyy+7GoD/e/VlGlvbcbc2sLXGxddPHOJ3/NemDWbwwBT++NE2ZMHx4Gpj8fIljC3KZOXeI2qbUpeTRpeFpz/fzeVTipk2tPPPlFlQyihHLS/dOjOoCICymn5w+ggO1DazaLuR0XO0TFlQOSPYW93IO+sP8vsPtvLj/6xjYkkWP79wXKfXHJiWxAs3n8gNM4cyb0UZSVYLr35nNqeOzue315+KGwurNm/luqeWc9dLa/jVW5t4f/EKZSUMngFFk5BVX/HQO6v5n3c2d98pthOa2tr55Zsbue6pL/h0awXzvoxAJ9YwKa9r4aUV+3r8GSJNLCyCduBHUsrVQogMYJUQ4mMp5eYYjKV/Mursrl/3CIFQfWc648xfBX/etAi6cQtFArvVwu/vuhm7+xuw9C+q9UPNLk68ZT6/uWQCtY1tDB7oXc0JIfj5BWMpnNeEI9dHCEyLwLffkNulVpvHX9nzAXqqi8sZUziamYPsUAX3XjaDWSecqOITXTB85HhaUwoYWr+GO+et5om2JlpI5paT/F16STYLd5w+krtfWc8nR/I5Czgp/RB33n4rN//jS373/hauym5j4+EmkmwW7j1vTNfjzirB1lrLicXJXR52xtgCBqapHfJOG53vyRhaVJ3JN15eCIDNIji+JIu/XjOl87iPgd1q4X8uncB5EwoZXZhBTrq6vt1uR6bncZLDzaet7aypbaamsY2atkWclwQvHiwktRkuQ7J0yWeslqMozHRw68ldx6gCOXy0hWueXMae6iZunDOMzQfr+GDjYe45Z3S3/1eR5MlFu3hm8W5cUvL1E4d2/4ZeJuoWgZTykJRytXG/HtgCFHf9Lk1EMSfFjCLvhjTh4BGC8H6EPSXZZsWS5IBT74UL/qiCuzvnc8PMofzA2JnNl9nDs0lxNSB8LZ/A9tugMp9a61TVc08J6Jx56wxVGzF73PDQJhYhSB5xMmek7mDR1oNYcVFaXEBBZscePpdNKWZ4bhrf+aCOZpnETSMaSEu28b+XHU+r0011bQP769q544wR5Ad5vx9mbKiu6w1qkmwWLptSzMeby6lpbIPqXQA8tMrFhOJM3vnBXDb95hxe/+4cP0Hujtkjcj0i4PlTpOczdWAbb35/Lp/dfRrrf3k29044Sotw8IvlkgfWqc/0xBkWzptQyAMfbGXV3hD3gjZ4eP52Dta2MO/WmfzyovFcNGkQu6sa2V7RSc1ED3hz7QEeM5IZOuOzr5SFdf87W9hTFcHd4npITIPFQohhwBSgQ4WMEOI2IcRKIcTKykq9c1FEsaeoeoCu3EJd0QPXUMSYdK2axD57sPNW0S1HAenv7jELvHxjBGZ8YGjX8YEuMdMe640gZxfpo50ydDaprZX8ZLJq4jdjZPD/F5vVwl1njaJdWqhJH0lhkwpMH5eXzvdOG4F0O3E4HHxrdgj/L559CbrpOdTexrXjknG6JG+sOQDVO3Da0lhfm8wPzxrFhOKsbq2AkAnoNySEYFD9BhxDT+DTu8/gpR9dBml55DVs4/dXTqR4QArf//caJVAhUFbTxH9WlnHNjMHMOk5Vep89rgAh4MONQdqJ94CdlQ3c/Z/1PPjhNhZsDd4yY/+RJnZUNHDL3FJsVsGP/rOuQ21MtImZEAgh0oFXgbuklB2qRKSUT0opp0spp+fl5XU8gebYSMvzK44KC7MXThRcQx2wJcGcO6FseefbWnoazvkIgcUKyVn+FsHepZA1pOeCCB1312qpU62gbWF05Rw6F4CbC9QqMnvAgE4PvWhiEa9/dzZFo09QKaSGGN5+6nAy7JITjysgyRbCz9r8zLXdCMH8XzNi3kmcVdTCyyvLcFfvYKe7kOOLB3g2RIoYgf2G2hpVmvLgExmak0ZpXrqq9zi0jkyHnb99fSrVDW3cMW8NTW3tnZ/X4JFPt2OxCL57qjfjKT/TwdQh2XywySsEVQ2tnPuXRapAMAzcbsl9r23AYbcwPC+Nn73h0/HVh4Xb1ML2mhlD+J9LJrBq7xGeWBSkQDCKxEQIhBB2lAi8KKV8LRZjSHi+9hyc/vOevTfKrqEOTLlBTRqLHgz+uqcFdUDlcsoAb4xAShUoPhZrAJSl4buJfWu9qiEIx9+cO1IJ8/aP1OPAug0fhBBMGZKNpWiisnyMFX2yzUqa1U1uVoiWSEYRCGvX7aidzbDmBXA28kvb82w9XMfRsi1scxZwxxkjI+9TN/sNmZbewTWqFmbwDO8xRZOhYgs4m5lQnMX9l01gyc4qrnx8mSq664S91Y28uvoA180YQmGWv0ifO76QTQfrKKtRabi/fnszWw/Xc/+7W2huc/kdu6uygScX7eT2F1Yx63fzueGZLzzpuy99WcaK3TX89IKx/OGKiRyobeahj7YRyMJtlZRkp3BcXhqXTB7E+ccX8qePvuK/X9/gGUO0iUXWkACeAbZIKf8U7etrDEqm99wiGHk2TP2Gd+etaGN3wOw7YPdnKtgbiNF5tEMmUMoAr0VQvVP1YeombbRbhFB/x81vQOVXyjUUjlvIPMfQ2d423YHdR4NROFHdmoV9YBSUhZj/YbWpWpG9S+DjX8JzF6h+Pr5sflOJzdiLKKlcxBX2ZWS1HqIhbShnjo2wNQBK3N1O7/+R2Yq8xGf/46JJShzKVW7JVdMH8+w3T6CspomLH1nMh5sOs3RHFR9sPMS76w9RVtOElJK/zt+BzSL47qkd+yOdM14F/D/cdJj5W8p5e91BzhxbQGV9Ky8s3+M5bndVIxc+spjfvreVzYfqmDokmzX7ajn34UU8vnAnv3t/CzOHD+Sq6YOZPmwg188cwvNL9/gV0bW2u1i6s4pTR+chhEAIwQNXTOSqEwargPxDC7nnlXVRF4RYZA3NAW4ANgghzKTg/5ZSvheDsWh6wrA56l8smX6j2irx3R/BZU9AgU/aYuBeBCYp2d4YwZdPA0KlyB4rlzwG866Fp89UXUYD+wyFwtA53grjLiwCDwXj1a5nhzfAmAvUc6HsWezLwOFKTPevVK1G9i5W9Scjz1Kvr/qHOubK5+Cp0/ht+dNYpGTCxGm9k2Hj29Y7daDqAZUz0l/Qiyap20NroUQVJ542Jp/XvzeHW/+5km+/0LFDaUFmMpX1rdw4pzRoEH1ITirjijJ5Y+0BqhtU3cffvj6VW/65kscX7uS6E4eSZLVwx7w1JNksvHfHSQzLVf9HB2qbuffV9fz+g60k2Sz87vKJnr/NPeeO4ePN5fzklfW8+f05OOxWVu45QlObi1NGeYU002Hnt5cdzw9OH8ETn+1i3op9vLb6AFefMJgfnD6ygwXTG0RdCKSUi4Ho5Wlp+idJaXDRX+CtH8Df56g+RnP/S/m+O3MNOQaoPvwVW2DFk0pMIuHeGjwDbv0U5l0DFZth2Enhn2Ooj7CGIgRJqaq695DRasLtVivlcITgsr+rv0fh8UpU/jYTPvxvGH6qKhrbtxTO/LVqW3HhwyQ9fQYAxx8/NfRrhINvvOVoGeycD6fc63/MgCHq//XQWuW6qt0HjixG5Bfy1vfn8OWeGlKTbGQ67LjckjVlR1i55wiH61q4/ZTOu6WeO6GQP338FULAo9fNJslm4UdnjeKSx5bw3OLdNLS28//t3XtwVNUdwPHvLwlEngaEAIISQBQJIC+VpwpUBWyFKpZaVEQcxxZHrHZ8tLa2MjJqtVU7VLAigiA44gOl1ioRocxUJCAqBZGnEgQBFWy0oOCvf/zOyibkASHLwr2/z8xO9t69u3vOns357T33PN7fvItJV3b7PggANM+pxbRrzmLO8k+om51Fq6TH6h9Xg3sv6cSoJ5cw/pVV3D2kA2+u3kbNzAx6tTnwbLrZ8bX4/cX5XH9uGybMX8vqwnl8snwU6380iV7dux3eZ1sJH1nsjl3th1ilu+A++4W/dIpVaJnZgBw4FXRi1tV/3GbNN/3urL60NGgJo1+zif6a5B/683PbW6DavfPgmobAKvBNS+x+YlK2jEP4l65/ot0SLrzHglnhE1bBZtSAziPssRbdkDOvhWXTyGhcjbOOJksEgs/WwMIHbZW4vjeXPEbEzgremWELHYFdmL9qDvVO7kH/diXnVurY4vgSAw7LMygEgpE9875fae+Mk3I4v30T/vrmOv73rQ3ySzQjlUySMLRL2T3g+7XL5do+rXh80QZ6tWnEgg+30yuvDnW+LoJN62zt502L7aL9KQOgwzCaNmzNOJkIWU+xs0YumfVT373UA4E7ttVuCIPug7Oug3VvWLNC8Vb75VhqUjRq5djU2RsWwOAH9i8WU12y68Glf6vaczMy7DrB6lfKnoa6LE07wornrPdTwd22L6eSOZwqcupAm8ph/nircNsNLrG6HQPvhZ5jDgyw1SXRNFQwzq61DJ++fzbdZL3H2plfTku7Lbwfnh5ugbj0lOtlKd5uI/CTmrfaNqnHyzf0oV2zktd3bj7/VF5f+Sltc+vyu/zP4KFOFnh6jrFp1TMy7SL/u7MsTRfcU3KZUeDWs2txwqp/UfvZPzKLNTTcVQwPhwclA3Lz7YdE4RRYPNEu4otArxvJOfe2ktPAp4gHAhcNJ7SpfKGU40K3zNx86DYq9Wk6VK37wZrX96ezMk072t8pg6yr7MV/gQ6HMUpaBC4cD5P6gn4H3a4u+XhmVmq7DGfXt0p2907rDNCinOaQNv3tlnDy2TD5Aph+qS3gs/U9WD4TdqyGy6ZC06RpUv49wZq/GrSC/KHQ9kI7S9y2ko5fboasa/Z/rsDpzeoz7equdFk/keynH7bvWFY2vHwjvDHOejh9vcO6Ju/ZZZ/bwHvts9xTDHPGUHPli/wc+Jgm/HPfmZzfqzuNmuXZeJgTu+yfqXb3Lvjg79ZpoNuokte9UkyOlrkuKtK9e3ctLCxMdzLcse792fDcaBg5F1pVoR0/1fbttbb5xqdWfixYBfZYP2h1jk0HUtX5kkp79dewcSFct/CAX7cp90hXq0SvX2QDHw/WlndhymCbdResmUnVrpuMnGuV6jvTYc4YC7hgq85pUvfQzGyr5C+fCXk2toPtq+05RUugyxUw6H5rutuw0K4zZWTZIMdTBljvq7cm2HWNTj+BWSMsGPX9FXQaTsH2esxb9Snjf9zxiE1nISJLVbV7pcd5IHCxsW+vLUDfKEVt3FGhemjjIKrT1hXWhFeVQX4bF1kTTfshVtnv/AievMi61fYeC/Pusgvhl8+yCv+rsGZ0vabWpLSn2FZ/+2KjreOxY42NValZx5oSK5uTShXm3ADLp1uwyMq2Hldt+lXhg6geHgicc27HWgsGxVtttb6rXqy4V9bXn8OMy2BzqG/yL7FrUKXX/S7Pvr0w5xc2TmXYZFtHIY08EDjnHFgwWDoF+t5ycM1n33xlF8zz+sBpg1KfvhTyQOCcczF3sIHAl6p0zrmY80DgnHMx54HAOedizgOBc87FnAcC55yLOQ8EzjkXcx4InHMu5jwQOOdczB0TA8pEZDvwURWf3gjYUY3JOVbEMd9xzDPEM99xzDMcer5bqmrjyg46JgLB4RCRwoMZWRc1ccx3HPMM8cx3HPMMqcu3Nw0551zMeSBwzrmYi0MgeCzdCUiTOOY7jnmGeOY7jnmGFOU78tcInHPOVSwOZwTOOecq4IHAOediLtKBQEQGishqEVkrIrenOz2pICInich8EVklIv8RkbFhf0MReV1E1oS/DdKd1uomIpki8o6IzA3brURkccjzMyJSM91prG4ikiMis0Xkg1DmPaNe1iLyy/DdXiEiM0XkuCiWtYg8ISLbRGRF0r4yy1bMI6Fue09Euh7Oe0c2EIhIJjABGAS0By4XkfbpTVVK7AVuUdXTgR7AmJDP24ECVW0LFITtqBkLrEravg/4c8jzF8DotKQqtR4GXlXVdsAZWP4jW9Yi0hy4Eeiuqh2ATOCnRLOsnwQGltpXXtkOAtqG23XAo4fzxpENBMBZwFpVXa+q3wCzgCFpTlO1U9Utqros3P8vVjE0x/I6NRw2FRianhSmhoi0AC4CHg/bAvQHZodDopjn+sA5wGQAVf1GVXcS8bIGsoBaIpIF1Aa2EMGyVtWFwOeldpdXtkOAaWreAnJEpFlV3zvKgaA5sClpuyjsiywRyQO6AIuBJqq6BSxYALnpS1lKPATcCnwXtk8Adqrq3rAdxfJuDWwHpoQmscdFpA4RLmtV3Qw8AHyMBYBdwFKiX9YJ5ZVttdZvUQ4EUsa+yPaVFZG6wHPATar6ZbrTk0oi8kNgm6ouTd5dxqFRK+8soCvwqKp2Ab4iQs1AZQlt4kOAVsCJQB2sWaS0qJV1Zar1+x7lQFAEnJS03QL4JE1pSSkRqYEFgRmq+nzY/WniVDH83Zau9KVAb+BiEdmINfn1x84QckLzAUSzvIuAIlVdHLZnY4EhymX9A2CDqm5X1W+B54FeRL+sE8or22qt36IcCJYAbUPvgprYBaaX0pymahfaxicDq1T1T0kPvQSMDPdHAnOOdNpSRVXvUNUWqpqHlesbqjoCmA8MC4dFKs8AqroV2CQip4VdA4CVRLissSahHiJSO3zXE3mOdFknKa9sXwKuCr2HegC7Ek1IVaKqkb0Bg4EPgXXAb9KdnhTlsQ92SvgesDzcBmNt5gXAmvC3YbrTmqL8nwfMDfdbA28Da4Fngex0py8F+e0MFIbyfhFoEPWyBv4AfACsAJ4CsqNY1sBM7DrIt9gv/tHllS3WNDQh1G3vY72qqvzePsWEc87FXJSbhpxzzh0EDwTOORdzHgiccy7mPBA451zMeSBwzrmY80DgXIqJyHmJGVKdOxp5IHDOuZjzQOBcICJXiMjbIrJcRCaF9Q6KReRBEVkmIgUi0jgc21lE3gpzwb+QNE/8KSIyT0TeDc9pE16+btI6AjPCKFnnjgoeCJwDROR0YDjQW1U7A/uAEdgkZ8tUtSuwALgrPGUacJuqdsJGdib2zwAmqOoZ2Jw4iWH/XYCbsLUxWmPzJTl3VMiq/BDnYmEA0A1YEn6s18Im+PoOeCYcMx14XkSOB3JUdUHYPxV4VkTqAc1V9QUAVd0NEF7vbVUtCtvLgTxgUeqz5VzlPBA4ZwSYqqp3lNgp8ttSx1U0J0tFzT17ku7vw//33FHEm4acMwXAMBHJhe/Xim2J/Y8kZrn8GbBIVXcBX4hI37D/SmCB2joQRSIyNLxGtojUPqK5cK4K/FeJc4CqrhSRO4HXRCQDmwFyDLb4S76ILMVWxxoenjISmBgq+vXAqLD/SmCSiNwdXuOyI5gN56rEZx91rgIiUqyqddOdDudSyZuGnHMu5vyMwDnnYs7PCJxzLuY8EDjnXMx5IHDOuZjzQOCcczHngcA552Lu/77AmefMQ0nPAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "experimento_ssd7_panel.h5\n" + ] + } + ], + "source": [ + "#Graficar aprendizaje\n", + "\n", + "history_path =config['train']['saved_weights_name'].split('.')[0] + '_history'\n", + "\n", + "hist_load = np.load(history_path + '.npy',allow_pickle=True).item()\n", + "\n", + "print(hist_load.keys())\n", + "\n", + "# summarize history for loss\n", + "plt.plot(hist_load['loss'])\n", + "plt.plot(hist_load['val_loss'])\n", + "plt.title('model loss')\n", + "plt.ylabel('loss')\n", + "plt.xlabel('epoch')\n", + "plt.legend(['train', 'test'], loc='upper left')\n", + "plt.show()\n", + "\n", + "print(config['train']['saved_weights_name'])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Evaluación del Modelo" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "config_path = 'config_7_panel.json'\n", + "\n", + "with open(config_path) as config_buffer:\n", + " config = json.loads(config_buffer.read())\n", + "\n", + " \n", + "model_mode = 'training'\n", + "# TODO: Set the path to the `.h5` file of the model to be loaded.\n", + "model_path = config['train']['saved_weights_name']\n", + "\n", + "# We need to create an SSDLoss object in order to pass that to the model loader.\n", + "ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n", + "\n", + "K.clear_session() # Clear previous models from memory.\n", + "\n", + "model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n", + " 'L2Normalization': L2Normalization,\n", + " 'DecodeDetections': DecodeDetections,\n", + " 'compute_loss': ssd_loss.compute_loss})\n", + "\n", + "\n", + " \n", + "train_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n", + "val_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n", + "\n", + "# 2: Parse the image and label lists for the training and validation datasets. This can take a while.\n", + "\n", + "\n", + "\n", + "# The XML parser needs to now what object class names to look for and in which order to map them to integers.\n", + "classes = ['background' ] + labels\n", + "\n", + "train_dataset.parse_xml(images_dirs= [config['train']['train_image_folder']],\n", + " image_set_filenames=[config['train']['train_image_set_filename']],\n", + " annotations_dirs=[config['train']['train_annot_folder']],\n", + " classes=classes,\n", + " include_classes='all',\n", + " #classes = ['background', 'panel', 'cell'], \n", + " #include_classes=classes,\n", + " exclude_truncated=False,\n", + " exclude_difficult=False,\n", + " ret=False)\n", + "\n", + "val_dataset.parse_xml(images_dirs= [config['test']['test_image_folder']],\n", + " image_set_filenames=[config['test']['test_image_set_filename']],\n", + " annotations_dirs=[config['test']['test_annot_folder']],\n", + " classes=classes,\n", + " include_classes='all',\n", + " #classes = ['background', 'panel', 'cell'], \n", + " #include_classes=classes,\n", + " exclude_truncated=False,\n", + " exclude_difficult=False,\n", + " ret=False)\n", + "\n", + "#########################\n", + "# 3: Set the batch size.\n", + "#########################\n", + "batch_size = config['train']['batch_size'] # Change the batch size if you like, or if you run into GPU memory issues.\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "evaluator = Evaluator(model=model,\n", + " n_classes=n_classes,\n", + " data_generator=val_dataset,\n", + " model_mode='training')\n", + "\n", + "results = evaluator(img_height=img_height,\n", + " img_width=img_width,\n", + " batch_size=4,\n", + " data_generator_mode='resize',\n", + " round_confidences=False,\n", + " matching_iou_threshold=0.5,\n", + " border_pixels='include',\n", + " sorting_algorithm='quicksort',\n", + " average_precision_mode='sample',\n", + " num_recall_points=11,\n", + " ignore_neutral_boxes=True,\n", + " return_precisions=True,\n", + " return_recalls=True,\n", + " return_average_precisions=True,\n", + " verbose=True)\n", + "\n", + "mean_average_precision, average_precisions, precisions, recalls = results\n", + "total_instances = []\n", + "precisions = []\n", + "\n", + "for i in range(1, len(average_precisions)):\n", + " \n", + " print('{:.0f} instances of class'.format(len(recalls[i])),\n", + " classes[i], 'with average precision: {:.4f}'.format(average_precisions[i]))\n", + " total_instances.append(len(recalls[i]))\n", + " precisions.append(average_precisions[i])\n", + "\n", + "if sum(total_instances) == 0:\n", + " \n", + " print('No test instances found.')\n", + "\n", + "else:\n", + "\n", + " print('mAP using the weighted average of precisions among classes: {:.4f}'.format(sum([a * b for a, b in zip(total_instances, precisions)]) / sum(total_instances)))\n", + " print('mAP: {:.4f}'.format(sum(precisions) / sum(x > 0 for x in total_instances)))\n", + "\n", + " for i in range(1, len(average_precisions)):\n", + " print(\"{:<14}{:<6}{}\".format(classes[i], 'AP', round(average_precisions[i], 3)))\n", + " print()\n", + " print(\"{:<14}{:<6}{}\".format('','mAP', round(mean_average_precision, 3)))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Cargar nuevamente el modelo desde los pesos.\n", + "Predicción" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Training on: \t{'panel': 1}\n", + "\n", + "__________________________________________________________________________________________________\n", + "Layer (type) Output Shape Param # Connected to \n", + "==================================================================================================\n", + "input_1 (InputLayer) (None, 400, 400, 3) 0 \n", + "__________________________________________________________________________________________________\n", + "identity_layer (Lambda) (None, 400, 400, 3) 0 input_1[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv1 (Conv2D) (None, 400, 400, 32) 2432 identity_layer[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn1 (BatchNormalization) (None, 400, 400, 32) 128 conv1[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu1 (ELU) (None, 400, 400, 32) 0 bn1[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool1 (MaxPooling2D) (None, 200, 200, 32) 0 elu1[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2 (Conv2D) (None, 200, 200, 48) 13872 pool1[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn2 (BatchNormalization) (None, 200, 200, 48) 192 conv2[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu2 (ELU) (None, 200, 200, 48) 0 bn2[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool2 (MaxPooling2D) (None, 100, 100, 48) 0 elu2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv3 (Conv2D) (None, 100, 100, 64) 27712 pool2[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn3 (BatchNormalization) (None, 100, 100, 64) 256 conv3[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu3 (ELU) (None, 100, 100, 64) 0 bn3[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool3 (MaxPooling2D) (None, 50, 50, 64) 0 elu3[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv4 (Conv2D) (None, 50, 50, 64) 36928 pool3[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn4 (BatchNormalization) (None, 50, 50, 64) 256 conv4[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu4 (ELU) (None, 50, 50, 64) 0 bn4[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool4 (MaxPooling2D) (None, 25, 25, 64) 0 elu4[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv5 (Conv2D) (None, 25, 25, 48) 27696 pool4[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn5 (BatchNormalization) (None, 25, 25, 48) 192 conv5[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu5 (ELU) (None, 25, 25, 48) 0 bn5[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool5 (MaxPooling2D) (None, 12, 12, 48) 0 elu5[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv6 (Conv2D) (None, 12, 12, 48) 20784 pool5[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn6 (BatchNormalization) (None, 12, 12, 48) 192 conv6[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu6 (ELU) (None, 12, 12, 48) 0 bn6[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool6 (MaxPooling2D) (None, 6, 6, 48) 0 elu6[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv7 (Conv2D) (None, 6, 6, 32) 13856 pool6[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn7 (BatchNormalization) (None, 6, 6, 32) 128 conv7[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu7 (ELU) (None, 6, 6, 32) 0 bn7[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes4 (Conv2D) (None, 50, 50, 8) 4616 elu4[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes5 (Conv2D) (None, 25, 25, 8) 3464 elu5[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes6 (Conv2D) (None, 12, 12, 8) 3464 elu6[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes7 (Conv2D) (None, 6, 6, 8) 2312 elu7[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes4 (Conv2D) (None, 50, 50, 16) 9232 elu4[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes5 (Conv2D) (None, 25, 25, 16) 6928 elu5[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes6 (Conv2D) (None, 12, 12, 16) 6928 elu6[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes7 (Conv2D) (None, 6, 6, 16) 4624 elu7[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes4_reshape (Reshape) (None, 10000, 2) 0 classes4[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes5_reshape (Reshape) (None, 2500, 2) 0 classes5[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes6_reshape (Reshape) (None, 576, 2) 0 classes6[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes7_reshape (Reshape) (None, 144, 2) 0 classes7[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors4 (AnchorBoxes) (None, 50, 50, 4, 8) 0 boxes4[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors5 (AnchorBoxes) (None, 25, 25, 4, 8) 0 boxes5[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors6 (AnchorBoxes) (None, 12, 12, 4, 8) 0 boxes6[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors7 (AnchorBoxes) (None, 6, 6, 4, 8) 0 boxes7[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes_concat (Concatenate) (None, 13220, 2) 0 classes4_reshape[0][0] \n", + " classes5_reshape[0][0] \n", + " classes6_reshape[0][0] \n", + " classes7_reshape[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes4_reshape (Reshape) (None, 10000, 4) 0 boxes4[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes5_reshape (Reshape) (None, 2500, 4) 0 boxes5[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes6_reshape (Reshape) (None, 576, 4) 0 boxes6[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes7_reshape (Reshape) (None, 144, 4) 0 boxes7[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors4_reshape (Reshape) (None, 10000, 8) 0 anchors4[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors5_reshape (Reshape) (None, 2500, 8) 0 anchors5[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors6_reshape (Reshape) (None, 576, 8) 0 anchors6[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors7_reshape (Reshape) (None, 144, 8) 0 anchors7[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes_softmax (Activation) (None, 13220, 2) 0 classes_concat[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes_concat (Concatenate) (None, 13220, 4) 0 boxes4_reshape[0][0] \n", + " boxes5_reshape[0][0] \n", + " boxes6_reshape[0][0] \n", + " boxes7_reshape[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors_concat (Concatenate) (None, 13220, 8) 0 anchors4_reshape[0][0] \n", + " anchors5_reshape[0][0] \n", + " anchors6_reshape[0][0] \n", + " anchors7_reshape[0][0] \n", + "__________________________________________________________________________________________________\n", + "predictions (Concatenate) (None, 13220, 14) 0 classes_softmax[0][0] \n", + " boxes_concat[0][0] \n", + " anchors_concat[0][0] \n", + "==================================================================================================\n", + "Total params: 186,192\n", + "Trainable params: 185,520\n", + "Non-trainable params: 672\n", + "__________________________________________________________________________________________________\n" + ] + } + ], + "source": [ + "#############################\n", + "####Prediction\n", + "#############################\n", + "\n", + "from imageio import imread\n", + "from keras.preprocessing import image\n", + "import time\n", + "\n", + "def makedirs(path):\n", + " try:\n", + " os.makedirs(path)\n", + " except OSError:\n", + " if not os.path.isdir(path):\n", + " raise\n", + "\n", + "\n", + "config_path = 'config_7_panel.json'\n", + "input_path = ['panel_jpg/Mision_1/', 'panel_jpg/Mision_2/']\n", + "output_path = 'result_ssd7_panel/'\n", + "\n", + "with open(config_path) as config_buffer:\n", + " config = json.loads(config_buffer.read())\n", + "\n", + "makedirs(output_path)\n", + "###############################\n", + "# Parse the annotations\n", + "###############################\n", + "score_threshold = 0.5\n", + "score_threshold_iou = 0.5\n", + "labels = config['model']['labels']\n", + "categories = {}\n", + "#categories = {\"Razor\": 1, \"Gun\": 2, \"Knife\": 3, \"Shuriken\": 4} #la categoría 0 es la background\n", + "for i in range(len(labels)): categories[labels[i]] = i+1\n", + "print('\\nTraining on: \\t' + str(categories) + '\\n')\n", + "\n", + "img_height = config['model']['input'] # Height of the model input images\n", + "img_width = config['model']['input'] # Width of the model input images\n", + "img_channels = 3 # Number of color channels of the model input images\n", + "n_classes = len(labels) # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO\n", + "classes = ['background'] + labels\n", + "\n", + "model_mode = 'training'\n", + "# TODO: Set the path to the `.h5` file of the model to be loaded.\n", + "model_path = config['train']['saved_weights_name']\n", + "\n", + "# We need to create an SSDLoss object in order to pass that to the model loader.\n", + "ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n", + "\n", + "K.clear_session() # Clear previous models from memory.\n", + "\n", + "model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n", + " 'L2Normalization': L2Normalization,\n", + " 'DecodeDetections': DecodeDetections,\n", + " 'compute_loss': ssd_loss.compute_loss})\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "model.summary()\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Tiempo Total: 1.040\n", + "Tiempo promedio por imagen: 0.104\n", + "OK\n" + ] + } + ], + "source": [ + "image_paths = []\n", + "for inp in input_path:\n", + " if os.path.isdir(inp):\n", + " for inp_file in os.listdir(inp):\n", + " image_paths += [inp + inp_file]\n", + " else:\n", + " image_paths += [inp]\n", + "\n", + "image_paths = [inp_file for inp_file in image_paths if (inp_file[-4:] in ['.jpg', '.png', 'JPEG'])]\n", + "times = []\n", + "\n", + "\n", + "for img_path in image_paths:\n", + " orig_images = [] # Store the images here.\n", + " input_images = [] # Store resized versions of the images here.\n", + " #print(img_path)\n", + "\n", + " # preprocess image for network\n", + " orig_images.append(imread(img_path))\n", + " img = image.load_img(img_path, target_size=(img_height, img_width))\n", + " img = image.img_to_array(img)\n", + " input_images.append(img)\n", + " input_images = np.array(input_images)\n", + " # process image\n", + " start = time.time()\n", + " y_pred = model.predict(input_images)\n", + " y_pred_decoded = decode_detections(y_pred,\n", + " confidence_thresh=score_threshold,\n", + " iou_threshold=score_threshold_iou,\n", + " top_k=200,\n", + " normalize_coords=True,\n", + " img_height=img_height,\n", + " img_width=img_width)\n", + "\n", + "\n", + " #print(\"processing time: \", time.time() - start)\n", + " times.append(time.time() - start)\n", + " # correct for image scale\n", + "\n", + " # visualize detections\n", + " # Set the colors for the bounding boxes\n", + " colors = plt.cm.brg(np.linspace(0, 1, 21)).tolist()\n", + "\n", + " plt.figure(figsize=(20,12))\n", + " plt.imshow(orig_images[0],cmap = 'gray')\n", + "\n", + " current_axis = plt.gca()\n", + " #print(y_pred)\n", + " for box in y_pred_decoded[0]:\n", + " # Transform the predicted bounding boxes for the 300x300 image to the original image dimensions.\n", + "\n", + " xmin = box[2] * orig_images[0].shape[1] / img_width\n", + " ymin = box[3] * orig_images[0].shape[0] / img_height\n", + " xmax = box[4] * orig_images[0].shape[1] / img_width\n", + " ymax = box[5] * orig_images[0].shape[0] / img_height\n", + "\n", + " color = colors[int(box[0])]\n", + " label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])\n", + " current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color=color, fill=False, linewidth=2))\n", + " current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':color, 'alpha':1.0})\n", + "\n", + " #plt.figure(figsize=(15, 15))\n", + " #plt.axis('off')\n", + " save_path = output_path + img_path.split('/')[-1]\n", + " plt.savefig(save_path)\n", + " plt.close()\n", + " \n", + "file = open(output_path + 'time.txt','w')\n", + "\n", + "file.write('Tiempo promedio:' + str(np.mean(times)))\n", + "\n", + "file.close()\n", + "print('Tiempo Total: {:.3f}'.format(np.sum(times)))\n", + "print('Tiempo promedio por imagen: {:.3f}'.format(np.mean(times)))\n", + "print('OK')" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "panel : 69\n", + "cell : 423\n" + ] + } + ], + "source": [ + "\n", + "# Summary instance training\n", + "category_train_list = []\n", + "for image_label in train_dataset.labels:\n", + " category_train_list += [i[0] for i in train_dataset.labels[0]]\n", + "summary_category_training = {train_dataset.classes[i]: category_train_list.count(i) for i in list(set(category_train_list))}\n", + "for i in summary_category_training.keys():\n", + " print(i, ': {:.0f}'.format(summary_category_training[i]))\n" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "__________________________________________________________________________________________________\n", + "Layer (type) Output Shape Param # Connected to \n", + "==================================================================================================\n", + "input_1 (InputLayer) (None, 400, 400, 3) 0 \n", + "__________________________________________________________________________________________________\n", + "identity_layer (Lambda) (None, 400, 400, 3) 0 input_1[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv1 (Conv2D) (None, 400, 400, 32) 2432 identity_layer[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn1 (BatchNormalization) (None, 400, 400, 32) 128 conv1[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu1 (ELU) (None, 400, 400, 32) 0 bn1[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool1 (MaxPooling2D) (None, 200, 200, 32) 0 elu1[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2 (Conv2D) (None, 200, 200, 48) 13872 pool1[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn2 (BatchNormalization) (None, 200, 200, 48) 192 conv2[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu2 (ELU) (None, 200, 200, 48) 0 bn2[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool2 (MaxPooling2D) (None, 100, 100, 48) 0 elu2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv3 (Conv2D) (None, 100, 100, 64) 27712 pool2[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn3 (BatchNormalization) (None, 100, 100, 64) 256 conv3[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu3 (ELU) (None, 100, 100, 64) 0 bn3[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool3 (MaxPooling2D) (None, 50, 50, 64) 0 elu3[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv4 (Conv2D) (None, 50, 50, 64) 36928 pool3[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn4 (BatchNormalization) (None, 50, 50, 64) 256 conv4[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu4 (ELU) (None, 50, 50, 64) 0 bn4[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool4 (MaxPooling2D) (None, 25, 25, 64) 0 elu4[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv5 (Conv2D) (None, 25, 25, 48) 27696 pool4[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn5 (BatchNormalization) (None, 25, 25, 48) 192 conv5[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu5 (ELU) (None, 25, 25, 48) 0 bn5[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool5 (MaxPooling2D) (None, 12, 12, 48) 0 elu5[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv6 (Conv2D) (None, 12, 12, 48) 20784 pool5[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn6 (BatchNormalization) (None, 12, 12, 48) 192 conv6[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu6 (ELU) (None, 12, 12, 48) 0 bn6[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool6 (MaxPooling2D) (None, 6, 6, 48) 0 elu6[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv7 (Conv2D) (None, 6, 6, 32) 13856 pool6[0][0] \n", + "__________________________________________________________________________________________________\n", + "bn7 (BatchNormalization) (None, 6, 6, 32) 128 conv7[0][0] \n", + "__________________________________________________________________________________________________\n", + "elu7 (ELU) (None, 6, 6, 32) 0 bn7[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes4 (Conv2D) (None, 50, 50, 12) 6924 elu4[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes5 (Conv2D) (None, 25, 25, 12) 5196 elu5[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes6 (Conv2D) (None, 12, 12, 12) 5196 elu6[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes7 (Conv2D) (None, 6, 6, 12) 3468 elu7[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes4 (Conv2D) (None, 50, 50, 16) 9232 elu4[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes5 (Conv2D) (None, 25, 25, 16) 6928 elu5[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes6 (Conv2D) (None, 12, 12, 16) 6928 elu6[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes7 (Conv2D) (None, 6, 6, 16) 4624 elu7[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes4_reshape (Reshape) (None, 10000, 3) 0 classes4[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes5_reshape (Reshape) (None, 2500, 3) 0 classes5[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes6_reshape (Reshape) (None, 576, 3) 0 classes6[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes7_reshape (Reshape) (None, 144, 3) 0 classes7[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors4 (AnchorBoxes) (None, 50, 50, 4, 8) 0 boxes4[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors5 (AnchorBoxes) (None, 25, 25, 4, 8) 0 boxes5[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors6 (AnchorBoxes) (None, 12, 12, 4, 8) 0 boxes6[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors7 (AnchorBoxes) (None, 6, 6, 4, 8) 0 boxes7[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes_concat (Concatenate) (None, 13220, 3) 0 classes4_reshape[0][0] \n", + " classes5_reshape[0][0] \n", + " classes6_reshape[0][0] \n", + " classes7_reshape[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes4_reshape (Reshape) (None, 10000, 4) 0 boxes4[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes5_reshape (Reshape) (None, 2500, 4) 0 boxes5[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes6_reshape (Reshape) (None, 576, 4) 0 boxes6[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes7_reshape (Reshape) (None, 144, 4) 0 boxes7[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors4_reshape (Reshape) (None, 10000, 8) 0 anchors4[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors5_reshape (Reshape) (None, 2500, 8) 0 anchors5[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors6_reshape (Reshape) (None, 576, 8) 0 anchors6[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors7_reshape (Reshape) (None, 144, 8) 0 anchors7[0][0] \n", + "__________________________________________________________________________________________________\n", + "classes_softmax (Activation) (None, 13220, 3) 0 classes_concat[0][0] \n", + "__________________________________________________________________________________________________\n", + "boxes_concat (Concatenate) (None, 13220, 4) 0 boxes4_reshape[0][0] \n", + " boxes5_reshape[0][0] \n", + " boxes6_reshape[0][0] \n", + " boxes7_reshape[0][0] \n", + "__________________________________________________________________________________________________\n", + "anchors_concat (Concatenate) (None, 13220, 8) 0 anchors4_reshape[0][0] \n", + " anchors5_reshape[0][0] \n", + " anchors6_reshape[0][0] \n", + " anchors7_reshape[0][0] \n", + "__________________________________________________________________________________________________\n", + "predictions (Concatenate) (None, 13220, 15) 0 classes_softmax[0][0] \n", + " boxes_concat[0][0] \n", + " anchors_concat[0][0] \n", + "==================================================================================================\n", + "Total params: 193,120\n", + "Trainable params: 192,448\n", + "Non-trainable params: 672\n", + "__________________________________________________________________________________________________\n" + ] + } + ], + "source": [ + "\n", + "\n", + "\n", + "model.summary()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/Primer_reslutado_panel/config_7_panel.json b/Primer_reslutado_panel/config_7_panel.json new file mode 100644 index 0000000..9265f16 --- /dev/null +++ b/Primer_reslutado_panel/config_7_panel.json @@ -0,0 +1,28 @@ +{ + "model" : { + "backend": "ssd7", + "input": 400, + "labels": ["panel"] + }, + + "train": { + "train_image_folder": "Train&Test_A/images", + "train_annot_folder": "Train&Test_A/anns", + "train_image_set_filename": "Train&Test_A/train.txt", + + "train_times": 1, + "batch_size": 8, + "learning_rate": 1e-4, + "nb_epochs": 10, + "warmup_epochs": 3, + "saved_weights_name": "experimento_ssd7_panel.h5", + "debug": true + }, + + +"test": { + "test_image_folder": "Train&Test_A/images", + "test_annot_folder": "Train&Test_A/anns", + "test_image_set_filename": "Train&Test_A/test.txt" + } +} diff --git a/Primer_reslutado_panel/experimento_ssd7_panel.h5 b/Primer_reslutado_panel/experimento_ssd7_panel.h5 new file mode 100644 index 0000000..e05b49c Binary files /dev/null and b/Primer_reslutado_panel/experimento_ssd7_panel.h5 differ diff --git a/Primer_reslutado_panel/experimento_ssd7_panel_history.npy b/Primer_reslutado_panel/experimento_ssd7_panel_history.npy new file mode 100644 index 0000000..cf56166 Binary files /dev/null and b/Primer_reslutado_panel/experimento_ssd7_panel_history.npy differ diff --git a/Primer_resultado_fault_1/.gitignore b/Primer_resultado_fault_1/.gitignore new file mode 100644 index 0000000..503268f --- /dev/null +++ b/Primer_resultado_fault_1/.gitignore @@ -0,0 +1 @@ +experimento_ssd300_fault_1.h5 diff --git a/Primer_resultado_fault_1/.ipynb_checkpoints/Panel_Detector_Fault_1-checkpoint.ipynb b/Primer_resultado_fault_1/.ipynb_checkpoints/Panel_Detector_Fault_1-checkpoint.ipynb new file mode 100644 index 0000000..9abf0e9 --- /dev/null +++ b/Primer_resultado_fault_1/.ipynb_checkpoints/Panel_Detector_Fault_1-checkpoint.ipynb @@ -0,0 +1,4263 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Cargar el modelo ssd7 \n", + "(https://github.com/pierluigiferrari/ssd_keras#how-to-fine-tune-one-of-the-trained-models-on-your-own-dataset)\n", + "\n", + "Training del SSD7 (modelo reducido de SSD). Parámetros en config_7.json y descargar VGG_ILSVRC_16_layers_fc_reduced.h5\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Using TensorFlow backend.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Training on: \t{'1': 1}\n", + "\n", + "WARNING:tensorflow:From /home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Colocations handled automatically by placer.\n", + "OK create model\n", + "\n", + "Loading pretrained weights VGG.\n", + "\n", + "WARNING:tensorflow:From /home/dl-desktop/Desktop/Rentadrone/ssd_keras-master/keras_loss_function/keras_ssd_loss.py:133: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use tf.cast instead.\n", + "WARNING:tensorflow:From /home/dl-desktop/Desktop/Rentadrone/ssd_keras-master/keras_loss_function/keras_ssd_loss.py:166: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use tf.cast instead.\n", + "__________________________________________________________________________________________________\n", + "Layer (type) Output Shape Param # Connected to \n", + "==================================================================================================\n", + "input_1 (InputLayer) (None, 400, 400, 3) 0 \n", + "__________________________________________________________________________________________________\n", + "identity_layer (Lambda) (None, 400, 400, 3) 0 input_1[0][0] \n", + "__________________________________________________________________________________________________\n", + "input_mean_normalization (Lambd (None, 400, 400, 3) 0 identity_layer[0][0] \n", + "__________________________________________________________________________________________________\n", + "input_channel_swap (Lambda) (None, 400, 400, 3) 0 input_mean_normalization[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv1_1 (Conv2D) (None, 400, 400, 64) 1792 input_channel_swap[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv1_2 (Conv2D) (None, 400, 400, 64) 36928 conv1_1[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool1 (MaxPooling2D) (None, 200, 200, 64) 0 conv1_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2_1 (Conv2D) (None, 200, 200, 128 73856 pool1[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2_2 (Conv2D) (None, 200, 200, 128 147584 conv2_1[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool2 (MaxPooling2D) (None, 100, 100, 128 0 conv2_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv3_1 (Conv2D) (None, 100, 100, 256 295168 pool2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv3_2 (Conv2D) (None, 100, 100, 256 590080 conv3_1[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv3_3 (Conv2D) (None, 100, 100, 256 590080 conv3_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool3 (MaxPooling2D) (None, 50, 50, 256) 0 conv3_3[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv4_1 (Conv2D) (None, 50, 50, 512) 1180160 pool3[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv4_2 (Conv2D) (None, 50, 50, 512) 2359808 conv4_1[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv4_3 (Conv2D) (None, 50, 50, 512) 2359808 conv4_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool4 (MaxPooling2D) (None, 25, 25, 512) 0 conv4_3[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv5_1 (Conv2D) (None, 25, 25, 512) 2359808 pool4[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv5_2 (Conv2D) (None, 25, 25, 512) 2359808 conv5_1[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv5_3 (Conv2D) (None, 25, 25, 512) 2359808 conv5_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool5 (MaxPooling2D) (None, 25, 25, 512) 0 conv5_3[0][0] \n", + "__________________________________________________________________________________________________\n", + "fc6 (Conv2D) (None, 25, 25, 1024) 4719616 pool5[0][0] \n", + "__________________________________________________________________________________________________\n", + "fc7 (Conv2D) (None, 25, 25, 1024) 1049600 fc6[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv6_1 (Conv2D) (None, 25, 25, 256) 262400 fc7[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv6_padding (ZeroPadding2D) (None, 27, 27, 256) 0 conv6_1[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv6_2 (Conv2D) (None, 13, 13, 512) 1180160 conv6_padding[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv7_1 (Conv2D) (None, 13, 13, 128) 65664 conv6_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv7_padding (ZeroPadding2D) (None, 15, 15, 128) 0 conv7_1[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv7_2 (Conv2D) (None, 7, 7, 256) 295168 conv7_padding[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv8_1 (Conv2D) (None, 7, 7, 128) 32896 conv7_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv8_2 (Conv2D) (None, 5, 5, 256) 295168 conv8_1[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv9_1 (Conv2D) (None, 5, 5, 128) 32896 conv8_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv4_3_norm (L2Normalization) (None, 50, 50, 512) 512 conv4_3[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv9_2 (Conv2D) (None, 3, 3, 256) 295168 conv9_1[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv4_3_norm_mbox_conf (Conv2D) (None, 50, 50, 8) 36872 conv4_3_norm[0][0] \n", + "__________________________________________________________________________________________________\n", + "fc7_mbox_conf (Conv2D) (None, 25, 25, 12) 110604 fc7[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv6_2_mbox_conf (Conv2D) (None, 13, 13, 12) 55308 conv6_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv7_2_mbox_conf (Conv2D) (None, 7, 7, 12) 27660 conv7_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv8_2_mbox_conf (Conv2D) (None, 5, 5, 8) 18440 conv8_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv9_2_mbox_conf (Conv2D) (None, 3, 3, 8) 18440 conv9_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv4_3_norm_mbox_loc (Conv2D) (None, 50, 50, 16) 73744 conv4_3_norm[0][0] \n", + "__________________________________________________________________________________________________\n", + "fc7_mbox_loc (Conv2D) (None, 25, 25, 24) 221208 fc7[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv6_2_mbox_loc (Conv2D) (None, 13, 13, 24) 110616 conv6_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv7_2_mbox_loc (Conv2D) (None, 7, 7, 24) 55320 conv7_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv8_2_mbox_loc (Conv2D) (None, 5, 5, 16) 36880 conv8_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv9_2_mbox_loc (Conv2D) (None, 3, 3, 16) 36880 conv9_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv4_3_norm_mbox_conf_reshape (None, 10000, 2) 0 conv4_3_norm_mbox_conf[0][0] \n", + "__________________________________________________________________________________________________\n", + "fc7_mbox_conf_reshape (Reshape) (None, 3750, 2) 0 fc7_mbox_conf[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv6_2_mbox_conf_reshape (Resh (None, 1014, 2) 0 conv6_2_mbox_conf[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv7_2_mbox_conf_reshape (Resh (None, 294, 2) 0 conv7_2_mbox_conf[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv8_2_mbox_conf_reshape (Resh (None, 100, 2) 0 conv8_2_mbox_conf[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv9_2_mbox_conf_reshape (Resh (None, 36, 2) 0 conv9_2_mbox_conf[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv4_3_norm_mbox_priorbox (Anc (None, 50, 50, 4, 8) 0 conv4_3_norm_mbox_loc[0][0] \n", + "__________________________________________________________________________________________________\n", + "fc7_mbox_priorbox (AnchorBoxes) (None, 25, 25, 6, 8) 0 fc7_mbox_loc[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv6_2_mbox_priorbox (AnchorBo (None, 13, 13, 6, 8) 0 conv6_2_mbox_loc[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv7_2_mbox_priorbox (AnchorBo (None, 7, 7, 6, 8) 0 conv7_2_mbox_loc[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv8_2_mbox_priorbox (AnchorBo (None, 5, 5, 4, 8) 0 conv8_2_mbox_loc[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv9_2_mbox_priorbox (AnchorBo (None, 3, 3, 4, 8) 0 conv9_2_mbox_loc[0][0] \n", + "__________________________________________________________________________________________________\n", + "mbox_conf (Concatenate) (None, 15194, 2) 0 conv4_3_norm_mbox_conf_reshape[0]\n", + " fc7_mbox_conf_reshape[0][0] \n", + " conv6_2_mbox_conf_reshape[0][0] \n", + " conv7_2_mbox_conf_reshape[0][0] \n", + " conv8_2_mbox_conf_reshape[0][0] \n", + " conv9_2_mbox_conf_reshape[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv4_3_norm_mbox_loc_reshape ( (None, 10000, 4) 0 conv4_3_norm_mbox_loc[0][0] \n", + "__________________________________________________________________________________________________\n", + "fc7_mbox_loc_reshape (Reshape) (None, 3750, 4) 0 fc7_mbox_loc[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv6_2_mbox_loc_reshape (Resha (None, 1014, 4) 0 conv6_2_mbox_loc[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv7_2_mbox_loc_reshape (Resha (None, 294, 4) 0 conv7_2_mbox_loc[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv8_2_mbox_loc_reshape (Resha (None, 100, 4) 0 conv8_2_mbox_loc[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv9_2_mbox_loc_reshape (Resha (None, 36, 4) 0 conv9_2_mbox_loc[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv4_3_norm_mbox_priorbox_resh (None, 10000, 8) 0 conv4_3_norm_mbox_priorbox[0][0] \n", + "__________________________________________________________________________________________________\n", + "fc7_mbox_priorbox_reshape (Resh (None, 3750, 8) 0 fc7_mbox_priorbox[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv6_2_mbox_priorbox_reshape ( (None, 1014, 8) 0 conv6_2_mbox_priorbox[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv7_2_mbox_priorbox_reshape ( (None, 294, 8) 0 conv7_2_mbox_priorbox[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv8_2_mbox_priorbox_reshape ( (None, 100, 8) 0 conv8_2_mbox_priorbox[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv9_2_mbox_priorbox_reshape ( (None, 36, 8) 0 conv9_2_mbox_priorbox[0][0] \n", + "__________________________________________________________________________________________________\n", + "mbox_conf_softmax (Activation) (None, 15194, 2) 0 mbox_conf[0][0] \n", + "__________________________________________________________________________________________________\n", + "mbox_loc (Concatenate) (None, 15194, 4) 0 conv4_3_norm_mbox_loc_reshape[0][\n", + " fc7_mbox_loc_reshape[0][0] \n", + " conv6_2_mbox_loc_reshape[0][0] \n", + " conv7_2_mbox_loc_reshape[0][0] \n", + " conv8_2_mbox_loc_reshape[0][0] \n", + " conv9_2_mbox_loc_reshape[0][0] \n", + "__________________________________________________________________________________________________\n", + "mbox_priorbox (Concatenate) (None, 15194, 8) 0 conv4_3_norm_mbox_priorbox_reshap\n", + " fc7_mbox_priorbox_reshape[0][0] \n", + " conv6_2_mbox_priorbox_reshape[0][\n", + " conv7_2_mbox_priorbox_reshape[0][\n", + " conv8_2_mbox_priorbox_reshape[0][\n", + " conv9_2_mbox_priorbox_reshape[0][\n", + "__________________________________________________________________________________________________\n", + "predictions (Concatenate) (None, 15194, 14) 0 mbox_conf_softmax[0][0] \n", + " mbox_loc[0][0] \n", + " mbox_priorbox[0][0] \n", + "==================================================================================================\n", + "Total params: 23,745,908\n", + "Trainable params: 23,745,908\n", + "Non-trainable params: 0\n", + "__________________________________________________________________________________________________\n" + ] + } + ], + "source": [ + "from keras.optimizers import Adam, SGD\n", + "from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TerminateOnNaN, CSVLogger\n", + "from keras import backend as K\n", + "from keras.models import load_model\n", + "from math import ceil\n", + "import numpy as np\n", + "from matplotlib import pyplot as plt\n", + "import os\n", + "import json\n", + "import xml.etree.cElementTree as ET\n", + "\n", + "import sys\n", + "sys.path += [os.path.abspath('../../ssd_keras-master')]\n", + "\n", + "from keras_loss_function.keras_ssd_loss import SSDLoss\n", + "from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes\n", + "from keras_layers.keras_layer_DecodeDetections import DecodeDetections\n", + "from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast\n", + "from keras_layers.keras_layer_L2Normalization import L2Normalization\n", + "from ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder\n", + "from ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast\n", + "from data_generator.object_detection_2d_data_generator import DataGenerator\n", + "from data_generator.object_detection_2d_geometric_ops import Resize\n", + "from data_generator.object_detection_2d_photometric_ops import ConvertTo3Channels\n", + "from data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation\n", + "from data_generator.object_detection_2d_misc_utils import apply_inverse_transforms\n", + "from eval_utils.average_precision_evaluator import Evaluator\n", + "from data_generator.data_augmentation_chain_variable_input_size import DataAugmentationVariableInputSize\n", + "from data_generator.data_augmentation_chain_constant_input_size import DataAugmentationConstantInputSize\n", + "\n", + "\n", + "def makedirs(path):\n", + " try:\n", + " os.makedirs(path)\n", + " except OSError:\n", + " if not os.path.isdir(path):\n", + " raise\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "K.tensorflow_backend._get_available_gpus()\n", + "\n", + "\n", + "def lr_schedule(epoch):\n", + " if epoch < 80:\n", + " return 0.001\n", + " elif epoch < 100:\n", + " return 0.0001\n", + " else:\n", + " return 0.00001\n", + "\n", + "config_path = 'config_300_fault_1.json'\n", + "\n", + "\n", + "with open(config_path) as config_buffer:\n", + " config = json.loads(config_buffer.read())\n", + "\n", + "###############################\n", + "# Parse the annotations\n", + "###############################\n", + "path_imgs_training = config['train']['train_image_folder']\n", + "path_anns_training = config['train']['train_annot_folder']\n", + "path_imgs_val = config['test']['test_image_folder']\n", + "path_anns_val = config['test']['test_annot_folder']\n", + "labels = config['model']['labels']\n", + "categories = {}\n", + "#categories = {\"Razor\": 1, \"Gun\": 2, \"Knife\": 3, \"Shuriken\": 4} #la categoría 0 es la background\n", + "for i in range(len(labels)): categories[labels[i]] = i+1\n", + "print('\\nTraining on: \\t' + str(categories) + '\\n')\n", + "\n", + "####################################\n", + "# Parameters\n", + "###################################\n", + " #%%\n", + "img_height = config['model']['input'] # Height of the model input images\n", + "img_width = config['model']['input'] # Width of the model input images\n", + "img_channels = 3 # Number of color channels of the model input images\n", + "mean_color = [123, 117, 104] # The per-channel mean of the images in the dataset. Do not change this value if you're using any of the pre-trained weights.\n", + "swap_channels = [2, 1, 0] # The color channel order in the original SSD is BGR, so we'll have the model reverse the color channel order of the input images.\n", + "n_classes = len(labels) # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO\n", + "scales_pascal = [0.01, 0.05, 0.1, 0.2, 0.37, 0.54, 0.71] # The anchor box scaling factors used in the original SSD300 for the Pascal VOC datasets\n", + "#scales_coco = [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05] # The anchor box scaling factors used in the original SSD300 for the MS COCO datasets\n", + "scales = scales_pascal\n", + "aspect_ratios = [[1.0, 2.0, 0.5],\n", + " [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n", + " [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n", + " [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n", + " [1.0, 2.0, 0.5],\n", + " [1.0, 2.0, 0.5]] # The anchor box aspect ratios used in the original SSD300; the order matters\n", + "two_boxes_for_ar1 = True\n", + "steps = [8, 16, 32, 64, 100, 300] # The space between two adjacent anchor box center points for each predictor layer.\n", + "offsets = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5] # The offsets of the first anchor box center points from the top and left borders of the image as a fraction of the step size for each predictor layer.\n", + "clip_boxes = False # Whether or not to clip the anchor boxes to lie entirely within the image boundaries\n", + "variances = [0.1, 0.1, 0.2, 0.2] # The variances by which the encoded target coordinates are divided as in the original implementation\n", + "normalize_coords = True\n", + "\n", + "K.clear_session() # Clear previous models from memory.\n", + "\n", + "\n", + "model_path = config['train']['saved_weights_name']\n", + "# 3: Instantiate an optimizer and the SSD loss function and compile the model.\n", + "# If you want to follow the original Caffe implementation, use the preset SGD\n", + "# optimizer, otherwise I'd recommend the commented-out Adam optimizer.\n", + "\n", + "\n", + "if config['model']['backend'] == 'ssd7':\n", + " #weights_path = 'VGG_ILSVRC_16_layers_fc_reduced.h5'\n", + " scales = [0.08, 0.16, 0.32, 0.64, 0.96] # An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`.\n", + " aspect_ratios = [0.5 ,1.0, 2.0] # The list of aspect ratios for the anchor boxes\n", + " two_boxes_for_ar1 = True # Whether or not you want to generate two anchor boxes for aspect ratio 1\n", + " steps = None # In case you'd like to set the step sizes for the anchor box grids manually; not recommended\n", + " offsets = None\n", + "\n", + "if os.path.exists(model_path):\n", + " print(\"\\nLoading pretrained weights.\\n\")\n", + " # We need to create an SSDLoss object in order to pass that to the model loader.\n", + " ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n", + "\n", + " K.clear_session() # Clear previous models from memory.\n", + " model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n", + " 'L2Normalization': L2Normalization,\n", + " 'compute_loss': ssd_loss.compute_loss})\n", + "\n", + "\n", + "else:\n", + " ####################################\n", + " # Build the Keras model.\n", + " ###################################\n", + "\n", + " if config['model']['backend'] == 'ssd300':\n", + " #weights_path = 'VGG_VOC0712Plus_SSD_300x300_ft_iter_160000.h5'\n", + " from models.keras_ssd300 import ssd_300 as ssd\n", + "\n", + " model = ssd(image_size=(img_height, img_width, img_channels),\n", + " n_classes=n_classes,\n", + " mode='training',\n", + " l2_regularization=0.0005,\n", + " scales=scales,\n", + " aspect_ratios_per_layer=aspect_ratios,\n", + " two_boxes_for_ar1=two_boxes_for_ar1,\n", + " steps=steps,\n", + " offsets=offsets,\n", + " clip_boxes=clip_boxes,\n", + " variances=variances,\n", + " normalize_coords=normalize_coords,\n", + " subtract_mean=mean_color,\n", + " swap_channels=swap_channels)\n", + "\n", + "\n", + " elif config['model']['backend'] == 'ssd7':\n", + " #weights_path = 'VGG_ILSVRC_16_layers_fc_reduced.h5'\n", + " from models.keras_ssd7 import build_model as ssd\n", + " scales = [0.08, 0.16, 0.32, 0.64, 0.96] # An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`.\n", + " aspect_ratios = [0.5 ,1.0, 2.0] # The list of aspect ratios for the anchor boxes\n", + " two_boxes_for_ar1 = True # Whether or not you want to generate two anchor boxes for aspect ratio 1\n", + " steps = None # In case you'd like to set the step sizes for the anchor box grids manually; not recommended\n", + " offsets = None\n", + " model = ssd(image_size=(img_height, img_width, img_channels),\n", + " n_classes=n_classes,\n", + " mode='training',\n", + " l2_regularization=0.0005,\n", + " scales=scales,\n", + " aspect_ratios_global=aspect_ratios,\n", + " aspect_ratios_per_layer=None,\n", + " two_boxes_for_ar1=two_boxes_for_ar1,\n", + " steps=steps,\n", + " offsets=offsets,\n", + " clip_boxes=clip_boxes,\n", + " variances=variances,\n", + " normalize_coords=normalize_coords,\n", + " subtract_mean=None,\n", + " divide_by_stddev=None)\n", + "\n", + " else :\n", + " print('Wrong Backend')\n", + "\n", + "\n", + "\n", + " print('OK create model')\n", + " #sgd = SGD(lr=config['train']['learning_rate'], momentum=0.9, decay=0.0, nesterov=False)\n", + "\n", + " # TODO: Set the path to the weights you want to load. only for ssd300 or ssd512\n", + "\n", + " weights_path = '../ssd_keras-master/VGG_ILSVRC_16_layers_fc_reduced.h5'\n", + " print(\"\\nLoading pretrained weights VGG.\\n\")\n", + " model.load_weights(weights_path, by_name=True)\n", + "\n", + " # 3: Instantiate an optimizer and the SSD loss function and compile the model.\n", + " # If you want to follow the original Caffe implementation, use the preset SGD\n", + " # optimizer, otherwise I'd recommend the commented-out Adam optimizer.\n", + "\n", + "\n", + " #adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n", + " #sgd = SGD(lr=0.001, momentum=0.9, decay=0.0, nesterov=False)\n", + " optimizer = Adam(lr=config['train']['learning_rate'], beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n", + " ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n", + " model.compile(optimizer=optimizer, loss=ssd_loss.compute_loss)\n", + "\n", + " model.summary()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Instanciar los generadores de datos y entrenamiento del modelo.\n", + "\n", + "*Cambio realizado para leer png y jpg. keras-ssd-master/data_generator/object_detection_2d_data_generator.py función parse_xml\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Processing image set 'train.txt': 100%|██████████| 33/33 [00:00<00:00, 88.77it/s]\n", + "Processing image set 'test.txt': 100%|██████████| 2/2 [00:00<00:00, 61.92it/s]\n", + "1 : 444\n", + "Number of images in the training dataset:\t 33\n", + "Number of images in the validation dataset:\t 2\n", + "WARNING:tensorflow:From /home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/math_grad.py:102: div (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Deprecated in favor of operator or tf.math.divide.\n", + "Epoch 1/500\n", + "\n", + "Epoch 00001: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 95s 947ms/step - loss: 16.3709 - val_loss: 7.3757\n", + "\n", + "Epoch 00001: val_loss improved from inf to 7.37568, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 2/500\n", + "\n", + "Epoch 00002: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 8.3424 - val_loss: 5.9648\n", + "\n", + "Epoch 00002: val_loss improved from 7.37568 to 5.96482, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 3/500\n", + "\n", + "Epoch 00003: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 81s 814ms/step - loss: 6.9268 - val_loss: 5.5916\n", + "\n", + "Epoch 00003: val_loss improved from 5.96482 to 5.59162, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 4/500\n", + "\n", + "Epoch 00004: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 81s 806ms/step - loss: 6.5707 - val_loss: 5.6131\n", + "\n", + "Epoch 00004: val_loss did not improve from 5.59162\n", + "Epoch 5/500\n", + "\n", + "Epoch 00005: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 81s 809ms/step - loss: 6.2085 - val_loss: 5.8056\n", + "\n", + "Epoch 00005: val_loss did not improve from 5.59162\n", + "Epoch 6/500\n", + "\n", + "Epoch 00006: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 803ms/step - loss: 5.9796 - val_loss: 5.4107\n", + "\n", + "Epoch 00006: val_loss improved from 5.59162 to 5.41071, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 7/500\n", + "\n", + "Epoch 00007: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 82s 817ms/step - loss: 5.8464 - val_loss: 5.4046\n", + "\n", + "Epoch 00007: val_loss improved from 5.41071 to 5.40461, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 8/500\n", + "\n", + "Epoch 00008: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 82s 821ms/step - loss: 5.8391 - val_loss: 5.1717\n", + "\n", + "Epoch 00008: val_loss improved from 5.40461 to 5.17174, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 9/500\n", + "\n", + "Epoch 00009: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 81s 807ms/step - loss: 5.6631 - val_loss: 5.1447\n", + "\n", + "Epoch 00009: val_loss improved from 5.17174 to 5.14472, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 10/500\n", + "\n", + "Epoch 00010: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 5.6221 - val_loss: 5.3356\n", + "\n", + "Epoch 00010: val_loss did not improve from 5.14472\n", + "Epoch 11/500\n", + "\n", + "Epoch 00011: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 803ms/step - loss: 5.5115 - val_loss: 5.6827\n", + "\n", + "Epoch 00011: val_loss did not improve from 5.14472\n", + "Epoch 12/500\n", + "\n", + "Epoch 00012: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 5.4163 - val_loss: 5.0174\n", + "\n", + "Epoch 00012: val_loss improved from 5.14472 to 5.01743, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 13/500\n", + "\n", + "Epoch 00013: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 803ms/step - loss: 5.2737 - val_loss: 4.8928\n", + "\n", + "Epoch 00013: val_loss improved from 5.01743 to 4.89279, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 14/500\n", + "\n", + "Epoch 00014: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 5.1896 - val_loss: 4.6932\n", + "\n", + "Epoch 00014: val_loss improved from 4.89279 to 4.69325, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 15/500\n", + "\n", + "Epoch 00015: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 5.0712 - val_loss: 4.7150\n", + "\n", + "Epoch 00015: val_loss did not improve from 4.69325\n", + "Epoch 16/500\n", + "\n", + "Epoch 00016: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 803ms/step - loss: 5.0187 - val_loss: 4.7564\n", + "\n", + "Epoch 00016: val_loss did not improve from 4.69325\n", + "Epoch 17/500\n", + "\n", + "Epoch 00017: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 803ms/step - loss: 4.9779 - val_loss: 4.6682\n", + "\n", + "Epoch 00017: val_loss improved from 4.69325 to 4.66824, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 18/500\n", + "\n", + "Epoch 00018: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 804ms/step - loss: 5.0324 - val_loss: 4.3389\n", + "\n", + "Epoch 00018: val_loss improved from 4.66824 to 4.33889, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 19/500\n", + "\n", + "Epoch 00019: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 4.8554 - val_loss: 4.3513\n", + "\n", + "Epoch 00019: val_loss did not improve from 4.33889\n", + "Epoch 20/500\n", + "\n", + "Epoch 00020: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 4.7299 - val_loss: 4.2093\n", + "\n", + "Epoch 00020: val_loss improved from 4.33889 to 4.20925, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 21/500\n", + "\n", + "Epoch 00021: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 4.7058 - val_loss: 4.3614\n", + "\n", + "Epoch 00021: val_loss did not improve from 4.20925\n", + "Epoch 22/500\n", + "\n", + "Epoch 00022: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 4.6238 - val_loss: 4.1315\n", + "\n", + "Epoch 00022: val_loss improved from 4.20925 to 4.13152, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 23/500\n", + "\n", + "Epoch 00023: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 4.5439 - val_loss: 4.0052\n", + "\n", + "Epoch 00023: val_loss improved from 4.13152 to 4.00518, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 24/500\n", + "\n", + "Epoch 00024: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 4.4489 - val_loss: 4.1691\n", + "\n", + "Epoch 00024: val_loss did not improve from 4.00518\n", + "Epoch 25/500\n", + "\n", + "Epoch 00025: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 4.2741 - val_loss: 4.1397\n", + "\n", + "Epoch 00025: val_loss did not improve from 4.00518\n", + "Epoch 26/500\n", + "\n", + "Epoch 00026: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 4.2250 - val_loss: 3.9073\n", + "\n", + "Epoch 00026: val_loss improved from 4.00518 to 3.90726, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 27/500\n", + "\n", + "Epoch 00027: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 4.1448 - val_loss: 4.9886\n", + "\n", + "Epoch 00027: val_loss did not improve from 3.90726\n", + "Epoch 28/500\n", + "\n", + "Epoch 00028: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 4.0781 - val_loss: 3.9171\n", + "\n", + "Epoch 00028: val_loss did not improve from 3.90726\n", + "Epoch 29/500\n", + "\n", + "Epoch 00029: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 4.0313 - val_loss: 3.8165\n", + "\n", + "Epoch 00029: val_loss improved from 3.90726 to 3.81654, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 30/500\n", + "\n", + "Epoch 00030: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.8771 - val_loss: 3.8606\n", + "\n", + "Epoch 00030: val_loss did not improve from 3.81654\n", + "Epoch 31/500\n", + "\n", + "Epoch 00031: LearningRateScheduler setting learning rate to 0.001.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "100/100 [==============================] - 80s 801ms/step - loss: 3.7454 - val_loss: 3.9101\n", + "\n", + "Epoch 00031: val_loss did not improve from 3.81654\n", + "Epoch 32/500\n", + "\n", + "Epoch 00032: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.7843 - val_loss: 3.7655\n", + "\n", + "Epoch 00032: val_loss improved from 3.81654 to 3.76554, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 33/500\n", + "\n", + "Epoch 00033: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 3.6930 - val_loss: 3.7563\n", + "\n", + "Epoch 00033: val_loss improved from 3.76554 to 3.75635, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 34/500\n", + "\n", + "Epoch 00034: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.7410 - val_loss: 3.6279\n", + "\n", + "Epoch 00034: val_loss improved from 3.75635 to 3.62786, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 35/500\n", + "\n", + "Epoch 00035: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.6264 - val_loss: 3.6042\n", + "\n", + "Epoch 00035: val_loss improved from 3.62786 to 3.60423, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 36/500\n", + "\n", + "Epoch 00036: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 3.7013 - val_loss: 3.6819\n", + "\n", + "Epoch 00036: val_loss did not improve from 3.60423\n", + "Epoch 37/500\n", + "\n", + "Epoch 00037: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.7254 - val_loss: 3.8854\n", + "\n", + "Epoch 00037: val_loss did not improve from 3.60423\n", + "Epoch 38/500\n", + "\n", + "Epoch 00038: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.7286 - val_loss: 3.7263\n", + "\n", + "Epoch 00038: val_loss did not improve from 3.60423\n", + "Epoch 39/500\n", + "\n", + "Epoch 00039: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.6215 - val_loss: 3.7384\n", + "\n", + "Epoch 00039: val_loss did not improve from 3.60423\n", + "Epoch 40/500\n", + "\n", + "Epoch 00040: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.5454 - val_loss: 3.6938\n", + "\n", + "Epoch 00040: val_loss did not improve from 3.60423\n", + "Epoch 41/500\n", + "\n", + "Epoch 00041: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.5633 - val_loss: 3.8448\n", + "\n", + "Epoch 00041: val_loss did not improve from 3.60423\n", + "Epoch 42/500\n", + "\n", + "Epoch 00042: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 3.5717 - val_loss: 3.7542\n", + "\n", + "Epoch 00042: val_loss did not improve from 3.60423\n", + "Epoch 43/500\n", + "\n", + "Epoch 00043: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 3.4769 - val_loss: 3.5321\n", + "\n", + "Epoch 00043: val_loss improved from 3.60423 to 3.53213, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 44/500\n", + "\n", + "Epoch 00044: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 3.5226 - val_loss: 3.7346\n", + "\n", + "Epoch 00044: val_loss did not improve from 3.53213\n", + "Epoch 45/500\n", + "\n", + "Epoch 00045: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 3.4415 - val_loss: 3.6502\n", + "\n", + "Epoch 00045: val_loss did not improve from 3.53213\n", + "Epoch 46/500\n", + "\n", + "Epoch 00046: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 3.4931 - val_loss: 3.6032\n", + "\n", + "Epoch 00046: val_loss did not improve from 3.53213\n", + "Epoch 47/500\n", + "\n", + "Epoch 00047: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.4958 - val_loss: 3.6229\n", + "\n", + "Epoch 00047: val_loss did not improve from 3.53213\n", + "Epoch 48/500\n", + "\n", + "Epoch 00048: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.4587 - val_loss: 3.6163\n", + "\n", + "Epoch 00048: val_loss did not improve from 3.53213\n", + "Epoch 49/500\n", + "\n", + "Epoch 00049: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.4677 - val_loss: 3.7527\n", + "\n", + "Epoch 00049: val_loss did not improve from 3.53213\n", + "Epoch 50/500\n", + "\n", + "Epoch 00050: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.4107 - val_loss: 3.5594\n", + "\n", + "Epoch 00050: val_loss did not improve from 3.53213\n", + "Epoch 51/500\n", + "\n", + "Epoch 00051: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.4377 - val_loss: 3.5592\n", + "\n", + "Epoch 00051: val_loss did not improve from 3.53213\n", + "Epoch 52/500\n", + "\n", + "Epoch 00052: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 803ms/step - loss: 3.3956 - val_loss: 3.7218\n", + "\n", + "Epoch 00052: val_loss did not improve from 3.53213\n", + "Epoch 53/500\n", + "\n", + "Epoch 00053: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 3.4251 - val_loss: 3.5406\n", + "\n", + "Epoch 00053: val_loss did not improve from 3.53213\n", + "Epoch 54/500\n", + "\n", + "Epoch 00054: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.3444 - val_loss: 3.7238\n", + "\n", + "Epoch 00054: val_loss did not improve from 3.53213\n", + "Epoch 55/500\n", + "\n", + "Epoch 00055: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.3245 - val_loss: 3.9998\n", + "\n", + "Epoch 00055: val_loss did not improve from 3.53213\n", + "Epoch 56/500\n", + "\n", + "Epoch 00056: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.3507 - val_loss: 3.7415\n", + "\n", + "Epoch 00056: val_loss did not improve from 3.53213\n", + "Epoch 57/500\n", + "\n", + "Epoch 00057: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.3039 - val_loss: 3.5360\n", + "\n", + "Epoch 00057: val_loss did not improve from 3.53213\n", + "Epoch 58/500\n", + "\n", + "Epoch 00058: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.3545 - val_loss: 3.6459\n", + "\n", + "Epoch 00058: val_loss did not improve from 3.53213\n", + "Epoch 59/500\n", + "\n", + "Epoch 00059: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.5069 - val_loss: 3.5454\n", + "\n", + "Epoch 00059: val_loss did not improve from 3.53213\n", + "Epoch 60/500\n", + "\n", + "Epoch 00060: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.3454 - val_loss: 3.5025\n", + "\n", + "Epoch 00060: val_loss improved from 3.53213 to 3.50248, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 61/500\n", + "\n", + "Epoch 00061: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.3401 - val_loss: 3.4923\n", + "\n", + "Epoch 00061: val_loss improved from 3.50248 to 3.49233, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 62/500\n", + "\n", + "Epoch 00062: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.2500 - val_loss: 3.3722\n", + "\n", + "Epoch 00062: val_loss improved from 3.49233 to 3.37216, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 63/500\n", + "\n", + "Epoch 00063: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 3.2749 - val_loss: 3.5324\n", + "\n", + "Epoch 00063: val_loss did not improve from 3.37216\n", + "Epoch 64/500\n", + "\n", + "Epoch 00064: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.2514 - val_loss: 3.3746\n", + "\n", + "Epoch 00064: val_loss did not improve from 3.37216\n", + "Epoch 65/500\n", + "\n", + "Epoch 00065: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.1946 - val_loss: 3.0985\n", + "\n", + "Epoch 00065: val_loss improved from 3.37216 to 3.09848, saving model to experimento_ssd300_fault_1.h5\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 66/500\n", + "\n", + "Epoch 00066: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.2282 - val_loss: 3.2196\n", + "\n", + "Epoch 00066: val_loss did not improve from 3.09848\n", + "Epoch 67/500\n", + "\n", + "Epoch 00067: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.1572 - val_loss: 3.2833\n", + "\n", + "Epoch 00067: val_loss did not improve from 3.09848\n", + "Epoch 68/500\n", + "\n", + "Epoch 00068: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.1747 - val_loss: 3.3269\n", + "\n", + "Epoch 00068: val_loss did not improve from 3.09848\n", + "Epoch 69/500\n", + "\n", + "Epoch 00069: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 3.2022 - val_loss: 3.5159\n", + "\n", + "Epoch 00069: val_loss did not improve from 3.09848\n", + "Epoch 70/500\n", + "\n", + "Epoch 00070: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.2015 - val_loss: 3.0329\n", + "\n", + "Epoch 00070: val_loss improved from 3.09848 to 3.03288, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 71/500\n", + "\n", + "Epoch 00071: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.1563 - val_loss: 3.1785\n", + "\n", + "Epoch 00071: val_loss did not improve from 3.03288\n", + "Epoch 72/500\n", + "\n", + "Epoch 00072: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.0944 - val_loss: 3.3246\n", + "\n", + "Epoch 00072: val_loss did not improve from 3.03288\n", + "Epoch 73/500\n", + "\n", + "Epoch 00073: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.0834 - val_loss: 3.3990\n", + "\n", + "Epoch 00073: val_loss did not improve from 3.03288\n", + "Epoch 74/500\n", + "\n", + "Epoch 00074: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.0638 - val_loss: 3.2314\n", + "\n", + "Epoch 00074: val_loss did not improve from 3.03288\n", + "Epoch 75/500\n", + "\n", + "Epoch 00075: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.0576 - val_loss: 3.2828\n", + "\n", + "Epoch 00075: val_loss did not improve from 3.03288\n", + "Epoch 76/500\n", + "\n", + "Epoch 00076: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.1641 - val_loss: 3.1036\n", + "\n", + "Epoch 00076: val_loss did not improve from 3.03288\n", + "Epoch 77/500\n", + "\n", + "Epoch 00077: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.0128 - val_loss: 3.3556\n", + "\n", + "Epoch 00077: val_loss did not improve from 3.03288\n", + "Epoch 78/500\n", + "\n", + "Epoch 00078: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 3.0574 - val_loss: 3.1095\n", + "\n", + "Epoch 00078: val_loss did not improve from 3.03288\n", + "Epoch 79/500\n", + "\n", + "Epoch 00079: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 3.0328 - val_loss: 3.1693\n", + "\n", + "Epoch 00079: val_loss did not improve from 3.03288\n", + "Epoch 80/500\n", + "\n", + "Epoch 00080: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.0884 - val_loss: 2.9858\n", + "\n", + "Epoch 00080: val_loss improved from 3.03288 to 2.98575, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 81/500\n", + "\n", + "Epoch 00081: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.8945 - val_loss: 3.1375\n", + "\n", + "Epoch 00081: val_loss did not improve from 2.98575\n", + "Epoch 82/500\n", + "\n", + "Epoch 00082: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.8242 - val_loss: 3.1283\n", + "\n", + "Epoch 00082: val_loss did not improve from 2.98575\n", + "Epoch 83/500\n", + "\n", + "Epoch 00083: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.8158 - val_loss: 3.0812\n", + "\n", + "Epoch 00083: val_loss did not improve from 2.98575\n", + "Epoch 84/500\n", + "\n", + "Epoch 00084: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.7252 - val_loss: 3.0028\n", + "\n", + "Epoch 00084: val_loss did not improve from 2.98575\n", + "Epoch 85/500\n", + "\n", + "Epoch 00085: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.7557 - val_loss: 3.0320\n", + "\n", + "Epoch 00085: val_loss did not improve from 2.98575\n", + "Epoch 86/500\n", + "\n", + "Epoch 00086: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.6858 - val_loss: 3.0034\n", + "\n", + "Epoch 00086: val_loss did not improve from 2.98575\n", + "Epoch 87/500\n", + "\n", + "Epoch 00087: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.6145 - val_loss: 3.0041\n", + "\n", + "Epoch 00087: val_loss did not improve from 2.98575\n", + "Epoch 88/500\n", + "\n", + "Epoch 00088: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.6769 - val_loss: 3.0108\n", + "\n", + "Epoch 00088: val_loss did not improve from 2.98575\n", + "Epoch 89/500\n", + "\n", + "Epoch 00089: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.6359 - val_loss: 2.9382\n", + "\n", + "Epoch 00089: val_loss improved from 2.98575 to 2.93818, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 90/500\n", + "\n", + "Epoch 00090: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.5922 - val_loss: 2.9442\n", + "\n", + "Epoch 00090: val_loss did not improve from 2.93818\n", + "Epoch 91/500\n", + "\n", + "Epoch 00091: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.6036 - val_loss: 2.9341\n", + "\n", + "Epoch 00091: val_loss improved from 2.93818 to 2.93406, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 92/500\n", + "\n", + "Epoch 00092: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.5844 - val_loss: 2.9017\n", + "\n", + "Epoch 00092: val_loss improved from 2.93406 to 2.90171, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 93/500\n", + "\n", + "Epoch 00093: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.5528 - val_loss: 2.8696\n", + "\n", + "Epoch 00093: val_loss improved from 2.90171 to 2.86965, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 94/500\n", + "\n", + "Epoch 00094: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.5428 - val_loss: 2.9039\n", + "\n", + "Epoch 00094: val_loss did not improve from 2.86965\n", + "Epoch 95/500\n", + "\n", + "Epoch 00095: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.5477 - val_loss: 2.8995\n", + "\n", + "Epoch 00095: val_loss did not improve from 2.86965\n", + "Epoch 96/500\n", + "\n", + "Epoch 00096: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.5020 - val_loss: 2.9773\n", + "\n", + "Epoch 00096: val_loss did not improve from 2.86965\n", + "Epoch 97/500\n", + "\n", + "Epoch 00097: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 2.5610 - val_loss: 2.8112\n", + "\n", + "Epoch 00097: val_loss improved from 2.86965 to 2.81115, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 98/500\n", + "\n", + "Epoch 00098: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.4317 - val_loss: 2.8764\n", + "\n", + "Epoch 00098: val_loss did not improve from 2.81115\n", + "Epoch 99/500\n", + "\n", + "Epoch 00099: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.5008 - val_loss: 2.8408\n", + "\n", + "Epoch 00099: val_loss did not improve from 2.81115\n", + "Epoch 100/500\n", + "\n", + "Epoch 00100: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.4662 - val_loss: 2.8257\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Epoch 00100: val_loss did not improve from 2.81115\n", + "Epoch 101/500\n", + "\n", + "Epoch 00101: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.4243 - val_loss: 2.7867\n", + "\n", + "Epoch 00101: val_loss improved from 2.81115 to 2.78665, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 102/500\n", + "\n", + "Epoch 00102: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.4661 - val_loss: 2.7834\n", + "\n", + "Epoch 00102: val_loss improved from 2.78665 to 2.78338, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 103/500\n", + "\n", + "Epoch 00103: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.4118 - val_loss: 2.7545\n", + "\n", + "Epoch 00103: val_loss improved from 2.78338 to 2.75448, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 104/500\n", + "\n", + "Epoch 00104: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.4510 - val_loss: 2.7466\n", + "\n", + "Epoch 00104: val_loss improved from 2.75448 to 2.74665, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 105/500\n", + "\n", + "Epoch 00105: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.4811 - val_loss: 2.7390\n", + "\n", + "Epoch 00105: val_loss improved from 2.74665 to 2.73900, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 106/500\n", + "\n", + "Epoch 00106: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.4332 - val_loss: 2.7359\n", + "\n", + "Epoch 00106: val_loss improved from 2.73900 to 2.73586, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 107/500\n", + "\n", + "Epoch 00107: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.4275 - val_loss: 2.7517\n", + "\n", + "Epoch 00107: val_loss did not improve from 2.73586\n", + "Epoch 108/500\n", + "\n", + "Epoch 00108: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3691 - val_loss: 2.7352\n", + "\n", + "Epoch 00108: val_loss improved from 2.73586 to 2.73520, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 109/500\n", + "\n", + "Epoch 00109: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.4060 - val_loss: 2.7100\n", + "\n", + "Epoch 00109: val_loss improved from 2.73520 to 2.70995, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 110/500\n", + "\n", + "Epoch 00110: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.4380 - val_loss: 2.6994\n", + "\n", + "Epoch 00110: val_loss improved from 2.70995 to 2.69939, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 111/500\n", + "\n", + "Epoch 00111: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.4594 - val_loss: 2.7045\n", + "\n", + "Epoch 00111: val_loss did not improve from 2.69939\n", + "Epoch 112/500\n", + "\n", + "Epoch 00112: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 2.4270 - val_loss: 2.7049\n", + "\n", + "Epoch 00112: val_loss did not improve from 2.69939\n", + "Epoch 113/500\n", + "\n", + "Epoch 00113: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3366 - val_loss: 2.7099\n", + "\n", + "Epoch 00113: val_loss did not improve from 2.69939\n", + "Epoch 114/500\n", + "\n", + "Epoch 00114: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3707 - val_loss: 2.7035\n", + "\n", + "Epoch 00114: val_loss did not improve from 2.69939\n", + "Epoch 115/500\n", + "\n", + "Epoch 00115: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.4000 - val_loss: 2.7090\n", + "\n", + "Epoch 00115: val_loss did not improve from 2.69939\n", + "Epoch 116/500\n", + "\n", + "Epoch 00116: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.3661 - val_loss: 2.7183\n", + "\n", + "Epoch 00116: val_loss did not improve from 2.69939\n", + "Epoch 117/500\n", + "\n", + "Epoch 00117: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.4599 - val_loss: 2.7063\n", + "\n", + "Epoch 00117: val_loss did not improve from 2.69939\n", + "Epoch 118/500\n", + "\n", + "Epoch 00118: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3982 - val_loss: 2.7186\n", + "\n", + "Epoch 00118: val_loss did not improve from 2.69939\n", + "Epoch 119/500\n", + "\n", + "Epoch 00119: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.4185 - val_loss: 2.7158\n", + "\n", + "Epoch 00119: val_loss did not improve from 2.69939\n", + "Epoch 120/500\n", + "\n", + "Epoch 00120: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 2.4261 - val_loss: 2.7000\n", + "\n", + "Epoch 00120: val_loss did not improve from 2.69939\n", + "Epoch 121/500\n", + "\n", + "Epoch 00121: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3914 - val_loss: 2.7090\n", + "\n", + "Epoch 00121: val_loss did not improve from 2.69939\n", + "Epoch 122/500\n", + "\n", + "Epoch 00122: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.4349 - val_loss: 2.6965\n", + "\n", + "Epoch 00122: val_loss improved from 2.69939 to 2.69654, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 123/500\n", + "\n", + "Epoch 00123: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.4154 - val_loss: 2.7004\n", + "\n", + "Epoch 00123: val_loss did not improve from 2.69654\n", + "Epoch 124/500\n", + "\n", + "Epoch 00124: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3835 - val_loss: 2.7080\n", + "\n", + "Epoch 00124: val_loss did not improve from 2.69654\n", + "Epoch 125/500\n", + "\n", + "Epoch 00125: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.3307 - val_loss: 2.6899\n", + "\n", + "Epoch 00125: val_loss improved from 2.69654 to 2.68988, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 126/500\n", + "\n", + "Epoch 00126: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 2.3386 - val_loss: 2.6951\n", + "\n", + "Epoch 00126: val_loss did not improve from 2.68988\n", + "Epoch 127/500\n", + "\n", + "Epoch 00127: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.4267 - val_loss: 2.6741\n", + "\n", + "Epoch 00127: val_loss improved from 2.68988 to 2.67407, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 128/500\n", + "\n", + "Epoch 00128: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3711 - val_loss: 2.6878\n", + "\n", + "Epoch 00128: val_loss did not improve from 2.67407\n", + "Epoch 129/500\n", + "\n", + "Epoch 00129: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3586 - val_loss: 2.6804\n", + "\n", + "Epoch 00129: val_loss did not improve from 2.67407\n", + "Epoch 130/500\n", + "\n", + "Epoch 00130: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3444 - val_loss: 2.6767\n", + "\n", + "Epoch 00130: val_loss did not improve from 2.67407\n", + "Epoch 131/500\n", + "\n", + "Epoch 00131: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.4412 - val_loss: 2.6694\n", + "\n", + "Epoch 00131: val_loss improved from 2.67407 to 2.66941, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 132/500\n", + "\n", + "Epoch 00132: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3977 - val_loss: 2.6752\n", + "\n", + "Epoch 00132: val_loss did not improve from 2.66941\n", + "Epoch 133/500\n", + "\n", + "Epoch 00133: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3353 - val_loss: 2.6631\n", + "\n", + "Epoch 00133: val_loss improved from 2.66941 to 2.66305, saving model to experimento_ssd300_fault_1.h5\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 134/500\n", + "\n", + "Epoch 00134: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.3689 - val_loss: 2.6725\n", + "\n", + "Epoch 00134: val_loss did not improve from 2.66305\n", + "Epoch 135/500\n", + "\n", + "Epoch 00135: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3339 - val_loss: 2.6928\n", + "\n", + "Epoch 00135: val_loss did not improve from 2.66305\n", + "Epoch 136/500\n", + "\n", + "Epoch 00136: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3850 - val_loss: 2.6774\n", + "\n", + "Epoch 00136: val_loss did not improve from 2.66305\n", + "Epoch 137/500\n", + "\n", + "Epoch 00137: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.3342 - val_loss: 2.6772\n", + "\n", + "Epoch 00137: val_loss did not improve from 2.66305\n", + "Epoch 138/500\n", + "\n", + "Epoch 00138: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3865 - val_loss: 2.6818\n", + "\n", + "Epoch 00138: val_loss did not improve from 2.66305\n", + "Epoch 139/500\n", + "\n", + "Epoch 00139: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3988 - val_loss: 2.6868\n", + "\n", + "Epoch 00139: val_loss did not improve from 2.66305\n", + "Epoch 140/500\n", + "\n", + "Epoch 00140: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3424 - val_loss: 2.6794\n", + "\n", + "Epoch 00140: val_loss did not improve from 2.66305\n", + "Epoch 141/500\n", + "\n", + "Epoch 00141: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3614 - val_loss: 2.6603\n", + "\n", + "Epoch 00141: val_loss improved from 2.66305 to 2.66026, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 142/500\n", + "\n", + "Epoch 00142: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3719 - val_loss: 2.6549\n", + "\n", + "Epoch 00142: val_loss improved from 2.66026 to 2.65490, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 143/500\n", + "\n", + "Epoch 00143: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3043 - val_loss: 2.6432\n", + "\n", + "Epoch 00143: val_loss improved from 2.65490 to 2.64322, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 144/500\n", + "\n", + "Epoch 00144: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2873 - val_loss: 2.6532\n", + "\n", + "Epoch 00144: val_loss did not improve from 2.64322\n", + "Epoch 145/500\n", + "\n", + "Epoch 00145: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3059 - val_loss: 2.6704\n", + "\n", + "Epoch 00145: val_loss did not improve from 2.64322\n", + "Epoch 146/500\n", + "\n", + "Epoch 00146: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3863 - val_loss: 2.6564\n", + "\n", + "Epoch 00146: val_loss did not improve from 2.64322\n", + "Epoch 147/500\n", + "\n", + "Epoch 00147: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2575 - val_loss: 2.6529\n", + "\n", + "Epoch 00147: val_loss did not improve from 2.64322\n", + "Epoch 148/500\n", + "\n", + "Epoch 00148: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3390 - val_loss: 2.6654\n", + "\n", + "Epoch 00148: val_loss did not improve from 2.64322\n", + "Epoch 149/500\n", + "\n", + "Epoch 00149: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.3935 - val_loss: 2.6721\n", + "\n", + "Epoch 00149: val_loss did not improve from 2.64322\n", + "Epoch 150/500\n", + "\n", + "Epoch 00150: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3100 - val_loss: 2.6558\n", + "\n", + "Epoch 00150: val_loss did not improve from 2.64322\n", + "Epoch 151/500\n", + "\n", + "Epoch 00151: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3418 - val_loss: 2.6429\n", + "\n", + "Epoch 00151: val_loss improved from 2.64322 to 2.64291, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 152/500\n", + "\n", + "Epoch 00152: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.3659 - val_loss: 2.6681\n", + "\n", + "Epoch 00152: val_loss did not improve from 2.64291\n", + "Epoch 153/500\n", + "\n", + "Epoch 00153: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3580 - val_loss: 2.6447\n", + "\n", + "Epoch 00153: val_loss did not improve from 2.64291\n", + "Epoch 154/500\n", + "\n", + "Epoch 00154: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3295 - val_loss: 2.6318\n", + "\n", + "Epoch 00154: val_loss improved from 2.64291 to 2.63178, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 155/500\n", + "\n", + "Epoch 00155: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.2901 - val_loss: 2.6443\n", + "\n", + "Epoch 00155: val_loss did not improve from 2.63178\n", + "Epoch 156/500\n", + "\n", + "Epoch 00156: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3641 - val_loss: 2.6441\n", + "\n", + "Epoch 00156: val_loss did not improve from 2.63178\n", + "Epoch 157/500\n", + "\n", + "Epoch 00157: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2748 - val_loss: 2.6224\n", + "\n", + "Epoch 00157: val_loss improved from 2.63178 to 2.62240, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 158/500\n", + "\n", + "Epoch 00158: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3301 - val_loss: 2.6146\n", + "\n", + "Epoch 00158: val_loss improved from 2.62240 to 2.61456, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 159/500\n", + "\n", + "Epoch 00159: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3196 - val_loss: 2.6206\n", + "\n", + "Epoch 00159: val_loss did not improve from 2.61456\n", + "Epoch 160/500\n", + "\n", + "Epoch 00160: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3206 - val_loss: 2.6014\n", + "\n", + "Epoch 00160: val_loss improved from 2.61456 to 2.60142, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 161/500\n", + "\n", + "Epoch 00161: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.3785 - val_loss: 2.6181\n", + "\n", + "Epoch 00161: val_loss did not improve from 2.60142\n", + "Epoch 162/500\n", + "\n", + "Epoch 00162: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 2.3454 - val_loss: 2.6311\n", + "\n", + "Epoch 00162: val_loss did not improve from 2.60142\n", + "Epoch 163/500\n", + "\n", + "Epoch 00163: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3812 - val_loss: 2.6259\n", + "\n", + "Epoch 00163: val_loss did not improve from 2.60142\n", + "Epoch 164/500\n", + "\n", + "Epoch 00164: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3041 - val_loss: 2.5997\n", + "\n", + "Epoch 00164: val_loss improved from 2.60142 to 2.59965, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 165/500\n", + "\n", + "Epoch 00165: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3461 - val_loss: 2.5921\n", + "\n", + "Epoch 00165: val_loss improved from 2.59965 to 2.59209, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 166/500\n", + "\n", + "Epoch 00166: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.2970 - val_loss: 2.5947\n", + "\n", + "Epoch 00166: val_loss did not improve from 2.59209\n", + "Epoch 167/500\n", + "\n", + "Epoch 00167: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.3181 - val_loss: 2.5825\n", + "\n", + "Epoch 00167: val_loss improved from 2.59209 to 2.58251, saving model to experimento_ssd300_fault_1.h5\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 168/500\n", + "\n", + "Epoch 00168: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3404 - val_loss: 2.5698\n", + "\n", + "Epoch 00168: val_loss improved from 2.58251 to 2.56985, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 169/500\n", + "\n", + "Epoch 00169: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3116 - val_loss: 2.5853\n", + "\n", + "Epoch 00169: val_loss did not improve from 2.56985\n", + "Epoch 170/500\n", + "\n", + "Epoch 00170: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3500 - val_loss: 2.5701\n", + "\n", + "Epoch 00170: val_loss did not improve from 2.56985\n", + "Epoch 171/500\n", + "\n", + "Epoch 00171: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.2736 - val_loss: 2.5792\n", + "\n", + "Epoch 00171: val_loss did not improve from 2.56985\n", + "Epoch 172/500\n", + "\n", + "Epoch 00172: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3543 - val_loss: 2.5719\n", + "\n", + "Epoch 00172: val_loss did not improve from 2.56985\n", + "Epoch 173/500\n", + "\n", + "Epoch 00173: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.2950 - val_loss: 2.5573\n", + "\n", + "Epoch 00173: val_loss improved from 2.56985 to 2.55734, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 174/500\n", + "\n", + "Epoch 00174: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2876 - val_loss: 2.5647\n", + "\n", + "Epoch 00174: val_loss did not improve from 2.55734\n", + "Epoch 175/500\n", + "\n", + "Epoch 00175: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3045 - val_loss: 2.5684\n", + "\n", + "Epoch 00175: val_loss did not improve from 2.55734\n", + "Epoch 176/500\n", + "\n", + "Epoch 00176: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3047 - val_loss: 2.5709\n", + "\n", + "Epoch 00176: val_loss did not improve from 2.55734\n", + "Epoch 177/500\n", + "\n", + "Epoch 00177: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3187 - val_loss: 2.5369\n", + "\n", + "Epoch 00177: val_loss improved from 2.55734 to 2.53691, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 178/500\n", + "\n", + "Epoch 00178: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3003 - val_loss: 2.5418\n", + "\n", + "Epoch 00178: val_loss did not improve from 2.53691\n", + "Epoch 179/500\n", + "\n", + "Epoch 00179: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.2856 - val_loss: 2.5507\n", + "\n", + "Epoch 00179: val_loss did not improve from 2.53691\n", + "Epoch 180/500\n", + "\n", + "Epoch 00180: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2895 - val_loss: 2.5314\n", + "\n", + "Epoch 00180: val_loss improved from 2.53691 to 2.53143, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 181/500\n", + "\n", + "Epoch 00181: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 2.2750 - val_loss: 2.5598\n", + "\n", + "Epoch 00181: val_loss did not improve from 2.53143\n", + "Epoch 182/500\n", + "\n", + "Epoch 00182: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.2768 - val_loss: 2.5310\n", + "\n", + "Epoch 00182: val_loss improved from 2.53143 to 2.53100, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 183/500\n", + "\n", + "Epoch 00183: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3176 - val_loss: 2.5475\n", + "\n", + "Epoch 00183: val_loss did not improve from 2.53100\n", + "Epoch 184/500\n", + "\n", + "Epoch 00184: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2267 - val_loss: 2.5595\n", + "\n", + "Epoch 00184: val_loss did not improve from 2.53100\n", + "Epoch 185/500\n", + "\n", + "Epoch 00185: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.3264 - val_loss: 2.5471\n", + "\n", + "Epoch 00185: val_loss did not improve from 2.53100\n", + "Epoch 186/500\n", + "\n", + "Epoch 00186: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3468 - val_loss: 2.5730\n", + "\n", + "Epoch 00186: val_loss did not improve from 2.53100\n", + "Epoch 187/500\n", + "\n", + "Epoch 00187: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3092 - val_loss: 2.5463\n", + "\n", + "Epoch 00187: val_loss did not improve from 2.53100\n", + "Epoch 188/500\n", + "\n", + "Epoch 00188: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.2971 - val_loss: 2.5499\n", + "\n", + "Epoch 00188: val_loss did not improve from 2.53100\n", + "Epoch 189/500\n", + "\n", + "Epoch 00189: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2848 - val_loss: 2.5662\n", + "\n", + "Epoch 00189: val_loss did not improve from 2.53100\n", + "Epoch 190/500\n", + "\n", + "Epoch 00190: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2599 - val_loss: 2.5368\n", + "\n", + "Epoch 00190: val_loss did not improve from 2.53100\n", + "Epoch 191/500\n", + "\n", + "Epoch 00191: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.2911 - val_loss: 2.5382\n", + "\n", + "Epoch 00191: val_loss did not improve from 2.53100\n", + "Epoch 192/500\n", + "\n", + "Epoch 00192: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 2.2675 - val_loss: 2.5432\n", + "\n", + "Epoch 00192: val_loss did not improve from 2.53100\n", + "Epoch 193/500\n", + "\n", + "Epoch 00193: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2750 - val_loss: 2.5171\n", + "\n", + "Epoch 00193: val_loss improved from 2.53100 to 2.51713, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 194/500\n", + "\n", + "Epoch 00194: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.2869 - val_loss: 2.5373\n", + "\n", + "Epoch 00194: val_loss did not improve from 2.51713\n", + "Epoch 195/500\n", + "\n", + "Epoch 00195: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2793 - val_loss: 2.5192\n", + "\n", + "Epoch 00195: val_loss did not improve from 2.51713\n", + "Epoch 196/500\n", + "\n", + "Epoch 00196: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2531 - val_loss: 2.5084\n", + "\n", + "Epoch 00196: val_loss improved from 2.51713 to 2.50840, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 197/500\n", + "\n", + "Epoch 00197: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3398 - val_loss: 2.5500\n", + "\n", + "Epoch 00197: val_loss did not improve from 2.50840\n", + "Epoch 198/500\n", + "\n", + "Epoch 00198: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2146 - val_loss: 2.4976\n", + "\n", + "Epoch 00198: val_loss improved from 2.50840 to 2.49763, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 199/500\n", + "\n", + "Epoch 00199: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 2.2694 - val_loss: 2.5370\n", + "\n", + "Epoch 00199: val_loss did not improve from 2.49763\n", + "Epoch 200/500\n", + "\n", + "Epoch 00200: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 804ms/step - loss: 2.2821 - val_loss: 2.5114\n", + "\n", + "Epoch 00200: val_loss did not improve from 2.49763\n", + "Epoch 201/500\n", + "\n", + "Epoch 00201: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 805ms/step - loss: 2.2925 - val_loss: 2.4810\n", + "\n", + "Epoch 00201: val_loss improved from 2.49763 to 2.48098, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 202/500\n", + "\n", + "Epoch 00202: LearningRateScheduler setting learning rate to 1e-05.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "100/100 [==============================] - 80s 804ms/step - loss: 2.2775 - val_loss: 2.5117\n", + "\n", + "Epoch 00202: val_loss did not improve from 2.48098\n", + "Epoch 203/500\n", + "\n", + "Epoch 00203: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 81s 810ms/step - loss: 2.2070 - val_loss: 2.5340\n", + "\n", + "Epoch 00203: val_loss did not improve from 2.48098\n", + "Epoch 204/500\n", + "\n", + "Epoch 00204: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 81s 811ms/step - loss: 2.3192 - val_loss: 2.5374\n", + "\n", + "Epoch 00204: val_loss did not improve from 2.48098\n", + "Epoch 205/500\n", + "\n", + "Epoch 00205: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 81s 807ms/step - loss: 2.2391 - val_loss: 2.4816\n", + "\n", + "Epoch 00205: val_loss did not improve from 2.48098\n", + "Epoch 206/500\n", + "\n", + "Epoch 00206: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 81s 815ms/step - loss: 2.2264 - val_loss: 2.5246\n", + "\n", + "Epoch 00206: val_loss did not improve from 2.48098\n", + "Epoch 207/500\n", + "\n", + "Epoch 00207: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 81s 811ms/step - loss: 2.2346 - val_loss: 2.4970\n", + "\n", + "Epoch 00207: val_loss did not improve from 2.48098\n", + "Epoch 208/500\n", + "\n", + "Epoch 00208: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 82s 825ms/step - loss: 2.3264 - val_loss: 2.5283\n", + "\n", + "Epoch 00208: val_loss did not improve from 2.48098\n", + "Epoch 209/500\n", + "\n", + "Epoch 00209: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 81s 811ms/step - loss: 2.1975 - val_loss: 2.5046\n", + "\n", + "Epoch 00209: val_loss did not improve from 2.48098\n", + "Epoch 210/500\n", + "\n", + "Epoch 00210: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.2417 - val_loss: 2.5188\n", + "\n", + "Epoch 00210: val_loss did not improve from 2.48098\n", + "Epoch 211/500\n", + "\n", + "Epoch 00211: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.3256 - val_loss: 2.5239\n", + "\n", + "Epoch 00211: val_loss did not improve from 2.48098\n", + "Epoch 212/500\n", + "\n", + "Epoch 00212: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.3239 - val_loss: 2.5052\n", + "\n", + "Epoch 00212: val_loss did not improve from 2.48098\n", + "Epoch 213/500\n", + "\n", + "Epoch 00213: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.2680 - val_loss: 2.5129\n", + "\n", + "Epoch 00213: val_loss did not improve from 2.48098\n", + "Epoch 214/500\n", + "\n", + "Epoch 00214: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.2134 - val_loss: 2.5480\n", + "\n", + "Epoch 00214: val_loss did not improve from 2.48098\n", + "Epoch 215/500\n", + "\n", + "Epoch 00215: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.2342 - val_loss: 2.5272\n", + "\n", + "Epoch 00215: val_loss did not improve from 2.48098\n", + "Epoch 216/500\n", + "\n", + "Epoch 00216: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 81s 813ms/step - loss: 2.2669 - val_loss: 2.5219\n", + "\n", + "Epoch 00216: val_loss did not improve from 2.48098\n", + "Epoch 217/500\n", + "\n", + "Epoch 00217: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 83s 825ms/step - loss: 2.2314 - val_loss: 2.5375\n", + "\n", + "Epoch 00217: val_loss did not improve from 2.48098\n", + "Epoch 218/500\n", + "\n", + "Epoch 00218: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 83s 826ms/step - loss: 2.2534 - val_loss: 2.5319\n", + "\n", + "Epoch 00218: val_loss did not improve from 2.48098\n", + "Epoch 219/500\n", + "\n", + "Epoch 00219: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 82s 819ms/step - loss: 2.2478 - val_loss: 2.5195\n", + "\n", + "Epoch 00219: val_loss did not improve from 2.48098\n", + "Epoch 220/500\n", + "\n", + "Epoch 00220: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 84s 837ms/step - loss: 2.2300 - val_loss: 2.5264\n", + "\n", + "Epoch 00220: val_loss did not improve from 2.48098\n", + "Epoch 221/500\n", + "\n", + "Epoch 00221: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 84s 837ms/step - loss: 2.2123 - val_loss: 2.5198\n", + "\n", + "Epoch 00221: val_loss did not improve from 2.48098\n", + "Epoch 222/500\n", + "\n", + "Epoch 00222: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 84s 840ms/step - loss: 2.2166 - val_loss: 2.5085\n", + "\n", + "Epoch 00222: val_loss did not improve from 2.48098\n", + "Epoch 223/500\n", + "\n", + "Epoch 00223: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 84s 840ms/step - loss: 2.2422 - val_loss: 2.4828\n", + "\n", + "Epoch 00223: val_loss did not improve from 2.48098\n", + "Epoch 224/500\n", + "\n", + "Epoch 00224: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 84s 837ms/step - loss: 2.2560 - val_loss: 2.5009\n", + "\n", + "Epoch 00224: val_loss did not improve from 2.48098\n", + "Epoch 225/500\n", + "\n", + "Epoch 00225: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 81s 807ms/step - loss: 2.2109 - val_loss: 2.4980\n", + "\n", + "Epoch 00225: val_loss did not improve from 2.48098\n", + "Epoch 226/500\n", + "\n", + "Epoch 00226: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 82s 818ms/step - loss: 2.2310 - val_loss: 2.4782\n", + "\n", + "Epoch 00226: val_loss improved from 2.48098 to 2.47816, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 227/500\n", + "\n", + "Epoch 00227: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 83s 832ms/step - loss: 2.2518 - val_loss: 2.4627\n", + "\n", + "Epoch 00227: val_loss improved from 2.47816 to 2.46269, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 228/500\n", + "\n", + "Epoch 00228: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 83s 834ms/step - loss: 2.2025 - val_loss: 2.4646\n", + "\n", + "Epoch 00228: val_loss did not improve from 2.46269\n", + "Epoch 229/500\n", + "\n", + "Epoch 00229: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 83s 833ms/step - loss: 2.2603 - val_loss: 2.4577\n", + "\n", + "Epoch 00229: val_loss improved from 2.46269 to 2.45775, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 230/500\n", + "\n", + "Epoch 00230: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 83s 833ms/step - loss: 2.2676 - val_loss: 2.4634\n", + "\n", + "Epoch 00230: val_loss did not improve from 2.45775\n", + "Epoch 231/500\n", + "\n", + "Epoch 00231: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 83s 834ms/step - loss: 2.1934 - val_loss: 2.4829\n", + "\n", + "Epoch 00231: val_loss did not improve from 2.45775\n", + "Epoch 232/500\n", + "\n", + "Epoch 00232: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 83s 834ms/step - loss: 2.1895 - val_loss: 2.4623\n", + "\n", + "Epoch 00232: val_loss did not improve from 2.45775\n", + "Epoch 233/500\n", + "\n", + "Epoch 00233: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 83s 832ms/step - loss: 2.1922 - val_loss: 2.4841\n", + "\n", + "Epoch 00233: val_loss did not improve from 2.45775\n", + "Epoch 234/500\n", + "\n", + "Epoch 00234: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 83s 833ms/step - loss: 2.2130 - val_loss: 2.4436\n", + "\n", + "Epoch 00234: val_loss improved from 2.45775 to 2.44358, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 235/500\n", + "\n", + "Epoch 00235: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 83s 834ms/step - loss: 2.1743 - val_loss: 2.4906\n", + "\n", + "Epoch 00235: val_loss did not improve from 2.44358\n", + "Epoch 236/500\n", + "\n", + "Epoch 00236: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 83s 832ms/step - loss: 2.2005 - val_loss: 2.4778\n", + "\n", + "Epoch 00236: val_loss did not improve from 2.44358\n", + "Epoch 237/500\n", + "\n", + "Epoch 00237: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 82s 818ms/step - loss: 2.2146 - val_loss: 2.4629\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Epoch 00237: val_loss did not improve from 2.44358\n", + "Epoch 238/500\n", + "\n", + "Epoch 00238: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2378 - val_loss: 2.4258\n", + "\n", + "Epoch 00238: val_loss improved from 2.44358 to 2.42576, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 239/500\n", + "\n", + "Epoch 00239: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 796ms/step - loss: 2.2562 - val_loss: 2.4473\n", + "\n", + "Epoch 00239: val_loss did not improve from 2.42576\n", + "Epoch 240/500\n", + "\n", + "Epoch 00240: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1777 - val_loss: 2.4431\n", + "\n", + "Epoch 00240: val_loss did not improve from 2.42576\n", + "Epoch 241/500\n", + "\n", + "Epoch 00241: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.2274 - val_loss: 2.4602\n", + "\n", + "Epoch 00241: val_loss did not improve from 2.42576\n", + "Epoch 242/500\n", + "\n", + "Epoch 00242: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 81s 805ms/step - loss: 2.2486 - val_loss: 2.4562\n", + "\n", + "Epoch 00242: val_loss did not improve from 2.42576\n", + "Epoch 243/500\n", + "\n", + "Epoch 00243: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 82s 817ms/step - loss: 2.1823 - val_loss: 2.4786\n", + "\n", + "Epoch 00243: val_loss did not improve from 2.42576\n", + "Epoch 244/500\n", + "\n", + "Epoch 00244: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 87s 870ms/step - loss: 2.2288 - val_loss: 2.4849\n", + "\n", + "Epoch 00244: val_loss did not improve from 2.42576\n", + "Epoch 245/500\n", + "\n", + "Epoch 00245: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 85s 852ms/step - loss: 2.1660 - val_loss: 2.4766\n", + "\n", + "Epoch 00245: val_loss did not improve from 2.42576\n", + "Epoch 246/500\n", + "\n", + "Epoch 00246: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2301 - val_loss: 2.5027\n", + "\n", + "Epoch 00246: val_loss did not improve from 2.42576\n", + "Epoch 247/500\n", + "\n", + "Epoch 00247: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2135 - val_loss: 2.4650\n", + "\n", + "Epoch 00247: val_loss did not improve from 2.42576\n", + "Epoch 248/500\n", + "\n", + "Epoch 00248: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.2765 - val_loss: 2.4521\n", + "\n", + "Epoch 00248: val_loss did not improve from 2.42576\n", + "Epoch 249/500\n", + "\n", + "Epoch 00249: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2387 - val_loss: 2.4847\n", + "\n", + "Epoch 00249: val_loss did not improve from 2.42576\n", + "Epoch 250/500\n", + "\n", + "Epoch 00250: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.1875 - val_loss: 2.4815\n", + "\n", + "Epoch 00250: val_loss did not improve from 2.42576\n", + "Epoch 251/500\n", + "\n", + "Epoch 00251: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1833 - val_loss: 2.4569\n", + "\n", + "Epoch 00251: val_loss did not improve from 2.42576\n", + "Epoch 252/500\n", + "\n", + "Epoch 00252: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2238 - val_loss: 2.4827\n", + "\n", + "Epoch 00252: val_loss did not improve from 2.42576\n", + "Epoch 253/500\n", + "\n", + "Epoch 00253: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2501 - val_loss: 2.4717\n", + "\n", + "Epoch 00253: val_loss did not improve from 2.42576\n", + "Epoch 254/500\n", + "\n", + "Epoch 00254: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1308 - val_loss: 2.4734\n", + "\n", + "Epoch 00254: val_loss did not improve from 2.42576\n", + "Epoch 255/500\n", + "\n", + "Epoch 00255: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 2.1565 - val_loss: 2.4662\n", + "\n", + "Epoch 00255: val_loss did not improve from 2.42576\n", + "Epoch 256/500\n", + "\n", + "Epoch 00256: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2195 - val_loss: 2.4509\n", + "\n", + "Epoch 00256: val_loss did not improve from 2.42576\n", + "Epoch 257/500\n", + "\n", + "Epoch 00257: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.2483 - val_loss: 2.4529\n", + "\n", + "Epoch 00257: val_loss did not improve from 2.42576\n", + "Epoch 258/500\n", + "\n", + "Epoch 00258: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.1620 - val_loss: 2.4285\n", + "\n", + "Epoch 00258: val_loss did not improve from 2.42576\n", + "Epoch 259/500\n", + "\n", + "Epoch 00259: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.1740 - val_loss: 2.4242\n", + "\n", + "Epoch 00259: val_loss improved from 2.42576 to 2.42424, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 260/500\n", + "\n", + "Epoch 00260: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 796ms/step - loss: 2.1539 - val_loss: 2.4161\n", + "\n", + "Epoch 00260: val_loss improved from 2.42424 to 2.41611, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 261/500\n", + "\n", + "Epoch 00261: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1664 - val_loss: 2.4259\n", + "\n", + "Epoch 00261: val_loss did not improve from 2.41611\n", + "Epoch 262/500\n", + "\n", + "Epoch 00262: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.2136 - val_loss: 2.4453\n", + "\n", + "Epoch 00262: val_loss did not improve from 2.41611\n", + "Epoch 263/500\n", + "\n", + "Epoch 00263: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.1044 - val_loss: 2.4816\n", + "\n", + "Epoch 00263: val_loss did not improve from 2.41611\n", + "Epoch 264/500\n", + "\n", + "Epoch 00264: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1978 - val_loss: 2.4537\n", + "\n", + "Epoch 00264: val_loss did not improve from 2.41611\n", + "Epoch 265/500\n", + "\n", + "Epoch 00265: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1747 - val_loss: 2.4479\n", + "\n", + "Epoch 00265: val_loss did not improve from 2.41611\n", + "Epoch 266/500\n", + "\n", + "Epoch 00266: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1925 - val_loss: 2.4660\n", + "\n", + "Epoch 00266: val_loss did not improve from 2.41611\n", + "Epoch 267/500\n", + "\n", + "Epoch 00267: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1858 - val_loss: 2.4388\n", + "\n", + "Epoch 00267: val_loss did not improve from 2.41611\n", + "Epoch 268/500\n", + "\n", + "Epoch 00268: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.2165 - val_loss: 2.4604\n", + "\n", + "Epoch 00268: val_loss did not improve from 2.41611\n", + "Epoch 269/500\n", + "\n", + "Epoch 00269: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.2140 - val_loss: 2.4490\n", + "\n", + "Epoch 00269: val_loss did not improve from 2.41611\n", + "Epoch 270/500\n", + "\n", + "Epoch 00270: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1678 - val_loss: 2.4427\n", + "\n", + "Epoch 00270: val_loss did not improve from 2.41611\n", + "Epoch 271/500\n", + "\n", + "Epoch 00271: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.2064 - val_loss: 2.4158\n", + "\n", + "Epoch 00271: val_loss improved from 2.41611 to 2.41575, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 272/500\n", + "\n", + "Epoch 00272: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1161 - val_loss: 2.4396\n", + "\n", + "Epoch 00272: val_loss did not improve from 2.41575\n", + "Epoch 273/500\n", + "\n", + "Epoch 00273: LearningRateScheduler setting learning rate to 1e-05.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "100/100 [==============================] - 80s 799ms/step - loss: 2.1986 - val_loss: 2.4483\n", + "\n", + "Epoch 00273: val_loss did not improve from 2.41575\n", + "Epoch 274/500\n", + "\n", + "Epoch 00274: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1965 - val_loss: 2.4307\n", + "\n", + "Epoch 00274: val_loss did not improve from 2.41575\n", + "Epoch 275/500\n", + "\n", + "Epoch 00275: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 796ms/step - loss: 2.2041 - val_loss: 2.4547\n", + "\n", + "Epoch 00275: val_loss did not improve from 2.41575\n", + "Epoch 276/500\n", + "\n", + "Epoch 00276: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.2368 - val_loss: 2.4124\n", + "\n", + "Epoch 00276: val_loss improved from 2.41575 to 2.41239, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 277/500\n", + "\n", + "Epoch 00277: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1827 - val_loss: 2.4110\n", + "\n", + "Epoch 00277: val_loss improved from 2.41239 to 2.41095, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 278/500\n", + "\n", + "Epoch 00278: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.1829 - val_loss: 2.4286\n", + "\n", + "Epoch 00278: val_loss did not improve from 2.41095\n", + "Epoch 279/500\n", + "\n", + "Epoch 00279: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.2048 - val_loss: 2.4266\n", + "\n", + "Epoch 00279: val_loss did not improve from 2.41095\n", + "Epoch 280/500\n", + "\n", + "Epoch 00280: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1765 - val_loss: 2.4449\n", + "\n", + "Epoch 00280: val_loss did not improve from 2.41095\n", + "Epoch 281/500\n", + "\n", + "Epoch 00281: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 796ms/step - loss: 2.1752 - val_loss: 2.4267\n", + "\n", + "Epoch 00281: val_loss did not improve from 2.41095\n", + "Epoch 282/500\n", + "\n", + "Epoch 00282: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1497 - val_loss: 2.4363\n", + "\n", + "Epoch 00282: val_loss did not improve from 2.41095\n", + "Epoch 283/500\n", + "\n", + "Epoch 00283: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.1504 - val_loss: 2.4205\n", + "\n", + "Epoch 00283: val_loss did not improve from 2.41095\n", + "Epoch 284/500\n", + "\n", + "Epoch 00284: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 796ms/step - loss: 2.1331 - val_loss: 2.4749\n", + "\n", + "Epoch 00284: val_loss did not improve from 2.41095\n", + "Epoch 285/500\n", + "\n", + "Epoch 00285: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1746 - val_loss: 2.4471\n", + "\n", + "Epoch 00285: val_loss did not improve from 2.41095\n", + "Epoch 286/500\n", + "\n", + "Epoch 00286: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1463 - val_loss: 2.4463\n", + "\n", + "Epoch 00286: val_loss did not improve from 2.41095\n", + "Epoch 287/500\n", + "\n", + "Epoch 00287: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 796ms/step - loss: 2.1568 - val_loss: 2.4754\n", + "\n", + "Epoch 00287: val_loss did not improve from 2.41095\n", + "Epoch 288/500\n", + "\n", + "Epoch 00288: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1094 - val_loss: 2.4033\n", + "\n", + "Epoch 00288: val_loss improved from 2.41095 to 2.40333, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 289/500\n", + "\n", + "Epoch 00289: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1294 - val_loss: 2.4347\n", + "\n", + "Epoch 00289: val_loss did not improve from 2.40333\n", + "Epoch 290/500\n", + "\n", + "Epoch 00290: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 796ms/step - loss: 2.1655 - val_loss: 2.4334\n", + "\n", + "Epoch 00290: val_loss did not improve from 2.40333\n", + "Epoch 291/500\n", + "\n", + "Epoch 00291: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1800 - val_loss: 2.4345\n", + "\n", + "Epoch 00291: val_loss did not improve from 2.40333\n", + "Epoch 292/500\n", + "\n", + "Epoch 00292: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1725 - val_loss: 2.4484\n", + "\n", + "Epoch 00292: val_loss did not improve from 2.40333\n", + "Epoch 293/500\n", + "\n", + "Epoch 00293: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 796ms/step - loss: 2.2019 - val_loss: 2.4093\n", + "\n", + "Epoch 00293: val_loss did not improve from 2.40333\n", + "Epoch 294/500\n", + "\n", + "Epoch 00294: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1663 - val_loss: 2.4833\n", + "\n", + "Epoch 00294: val_loss did not improve from 2.40333\n", + "Epoch 295/500\n", + "\n", + "Epoch 00295: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.1420 - val_loss: 2.4724\n", + "\n", + "Epoch 00295: val_loss did not improve from 2.40333\n", + "Epoch 296/500\n", + "\n", + "Epoch 00296: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 796ms/step - loss: 2.0923 - val_loss: 2.4685\n", + "\n", + "Epoch 00296: val_loss did not improve from 2.40333\n", + "Epoch 297/500\n", + "\n", + "Epoch 00297: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1342 - val_loss: 2.4261\n", + "\n", + "Epoch 00297: val_loss did not improve from 2.40333\n", + "Epoch 298/500\n", + "\n", + "Epoch 00298: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1697 - val_loss: 2.4211\n", + "\n", + "Epoch 00298: val_loss did not improve from 2.40333\n", + "Epoch 299/500\n", + "\n", + "Epoch 00299: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1354 - val_loss: 2.4090\n", + "\n", + "Epoch 00299: val_loss did not improve from 2.40333\n", + "Epoch 300/500\n", + "\n", + "Epoch 00300: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1326 - val_loss: 2.4623\n", + "\n", + "Epoch 00300: val_loss did not improve from 2.40333\n", + "Epoch 301/500\n", + "\n", + "Epoch 00301: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1773 - val_loss: 2.4599\n", + "\n", + "Epoch 00301: val_loss did not improve from 2.40333\n", + "Epoch 302/500\n", + "\n", + "Epoch 00302: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.1958 - val_loss: 2.4428\n", + "\n", + "Epoch 00302: val_loss did not improve from 2.40333\n", + "Epoch 303/500\n", + "\n", + "Epoch 00303: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0970 - val_loss: 2.4402\n", + "\n", + "Epoch 00303: val_loss did not improve from 2.40333\n", + "Epoch 304/500\n", + "\n", + "Epoch 00304: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1320 - val_loss: 2.4646\n", + "\n", + "Epoch 00304: val_loss did not improve from 2.40333\n", + "Epoch 305/500\n", + "\n", + "Epoch 00305: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.1519 - val_loss: 2.4214\n", + "\n", + "Epoch 00305: val_loss did not improve from 2.40333\n", + "Epoch 306/500\n", + "\n", + "Epoch 00306: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1758 - val_loss: 2.4445\n", + "\n", + "Epoch 00306: val_loss did not improve from 2.40333\n", + "Epoch 307/500\n", + "\n", + "Epoch 00307: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1346 - val_loss: 2.4386\n", + "\n", + "Epoch 00307: val_loss did not improve from 2.40333\n", + "Epoch 308/500\n", + "\n", + "Epoch 00308: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.0994 - val_loss: 2.4430\n", + "\n", + "Epoch 00308: val_loss did not improve from 2.40333\n", + "Epoch 309/500\n", + "\n", + "Epoch 00309: LearningRateScheduler setting learning rate to 1e-05.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "100/100 [==============================] - 80s 798ms/step - loss: 2.0948 - val_loss: 2.4618\n", + "\n", + "Epoch 00309: val_loss did not improve from 2.40333\n", + "Epoch 310/500\n", + "\n", + "Epoch 00310: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1108 - val_loss: 2.4125\n", + "\n", + "Epoch 00310: val_loss did not improve from 2.40333\n", + "Epoch 311/500\n", + "\n", + "Epoch 00311: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1230 - val_loss: 2.4152\n", + "\n", + "Epoch 00311: val_loss did not improve from 2.40333\n", + "Epoch 312/500\n", + "\n", + "Epoch 00312: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1885 - val_loss: 2.3989\n", + "\n", + "Epoch 00312: val_loss improved from 2.40333 to 2.39892, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 313/500\n", + "\n", + "Epoch 00313: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1826 - val_loss: 2.4245\n", + "\n", + "Epoch 00313: val_loss did not improve from 2.39892\n", + "Epoch 314/500\n", + "\n", + "Epoch 00314: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.1490 - val_loss: 2.4514\n", + "\n", + "Epoch 00314: val_loss did not improve from 2.39892\n", + "Epoch 315/500\n", + "\n", + "Epoch 00315: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1385 - val_loss: 2.4354\n", + "\n", + "Epoch 00315: val_loss did not improve from 2.39892\n", + "Epoch 316/500\n", + "\n", + "Epoch 00316: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1267 - val_loss: 2.4204\n", + "\n", + "Epoch 00316: val_loss did not improve from 2.39892\n", + "Epoch 317/500\n", + "\n", + "Epoch 00317: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.1377 - val_loss: 2.3919\n", + "\n", + "Epoch 00317: val_loss improved from 2.39892 to 2.39192, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 318/500\n", + "\n", + "Epoch 00318: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1597 - val_loss: 2.4013\n", + "\n", + "Epoch 00318: val_loss did not improve from 2.39192\n", + "Epoch 319/500\n", + "\n", + "Epoch 00319: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1371 - val_loss: 2.4382\n", + "\n", + "Epoch 00319: val_loss did not improve from 2.39192\n", + "Epoch 320/500\n", + "\n", + "Epoch 00320: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1655 - val_loss: 2.3971\n", + "\n", + "Epoch 00320: val_loss did not improve from 2.39192\n", + "Epoch 321/500\n", + "\n", + "Epoch 00321: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1480 - val_loss: 2.3843\n", + "\n", + "Epoch 00321: val_loss improved from 2.39192 to 2.38430, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 322/500\n", + "\n", + "Epoch 00322: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1041 - val_loss: 2.4188\n", + "\n", + "Epoch 00322: val_loss did not improve from 2.38430\n", + "Epoch 323/500\n", + "\n", + "Epoch 00323: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.1651 - val_loss: 2.3858\n", + "\n", + "Epoch 00323: val_loss did not improve from 2.38430\n", + "Epoch 324/500\n", + "\n", + "Epoch 00324: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1550 - val_loss: 2.3975\n", + "\n", + "Epoch 00324: val_loss did not improve from 2.38430\n", + "Epoch 325/500\n", + "\n", + "Epoch 00325: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1363 - val_loss: 2.4203\n", + "\n", + "Epoch 00325: val_loss did not improve from 2.38430\n", + "Epoch 326/500\n", + "\n", + "Epoch 00326: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1205 - val_loss: 2.4071\n", + "\n", + "Epoch 00326: val_loss did not improve from 2.38430\n", + "Epoch 327/500\n", + "\n", + "Epoch 00327: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0460 - val_loss: 2.4163\n", + "\n", + "Epoch 00327: val_loss did not improve from 2.38430\n", + "Epoch 328/500\n", + "\n", + "Epoch 00328: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1368 - val_loss: 2.4043\n", + "\n", + "Epoch 00328: val_loss did not improve from 2.38430\n", + "Epoch 329/500\n", + "\n", + "Epoch 00329: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.1165 - val_loss: 2.4102\n", + "\n", + "Epoch 00329: val_loss did not improve from 2.38430\n", + "Epoch 330/500\n", + "\n", + "Epoch 00330: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0698 - val_loss: 2.4503\n", + "\n", + "Epoch 00330: val_loss did not improve from 2.38430\n", + "Epoch 331/500\n", + "\n", + "Epoch 00331: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1698 - val_loss: 2.3874\n", + "\n", + "Epoch 00331: val_loss did not improve from 2.38430\n", + "Epoch 332/500\n", + "\n", + "Epoch 00332: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0500 - val_loss: 2.4484\n", + "\n", + "Epoch 00332: val_loss did not improve from 2.38430\n", + "Epoch 333/500\n", + "\n", + "Epoch 00333: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1160 - val_loss: 2.4091\n", + "\n", + "Epoch 00333: val_loss did not improve from 2.38430\n", + "Epoch 334/500\n", + "\n", + "Epoch 00334: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1460 - val_loss: 2.4595\n", + "\n", + "Epoch 00334: val_loss did not improve from 2.38430\n", + "Epoch 335/500\n", + "\n", + "Epoch 00335: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1175 - val_loss: 2.4163\n", + "\n", + "Epoch 00335: val_loss did not improve from 2.38430\n", + "Epoch 336/500\n", + "\n", + "Epoch 00336: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1029 - val_loss: 2.4175\n", + "\n", + "Epoch 00336: val_loss did not improve from 2.38430\n", + "Epoch 337/500\n", + "\n", + "Epoch 00337: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1533 - val_loss: 2.4147\n", + "\n", + "Epoch 00337: val_loss did not improve from 2.38430\n", + "Epoch 338/500\n", + "\n", + "Epoch 00338: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1118 - val_loss: 2.4363\n", + "\n", + "Epoch 00338: val_loss did not improve from 2.38430\n", + "Epoch 339/500\n", + "\n", + "Epoch 00339: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1367 - val_loss: 2.4138\n", + "\n", + "Epoch 00339: val_loss did not improve from 2.38430\n", + "Epoch 340/500\n", + "\n", + "Epoch 00340: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0941 - val_loss: 2.4105\n", + "\n", + "Epoch 00340: val_loss did not improve from 2.38430\n", + "Epoch 341/500\n", + "\n", + "Epoch 00341: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0905 - val_loss: 2.4222\n", + "\n", + "Epoch 00341: val_loss did not improve from 2.38430\n", + "Epoch 342/500\n", + "\n", + "Epoch 00342: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0956 - val_loss: 2.4531\n", + "\n", + "Epoch 00342: val_loss did not improve from 2.38430\n", + "Epoch 343/500\n", + "\n", + "Epoch 00343: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1527 - val_loss: 2.4406\n", + "\n", + "Epoch 00343: val_loss did not improve from 2.38430\n", + "Epoch 344/500\n", + "\n", + "Epoch 00344: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.1195 - val_loss: 2.4184\n", + "\n", + "Epoch 00344: val_loss did not improve from 2.38430\n", + "Epoch 345/500\n", + "\n", + "Epoch 00345: LearningRateScheduler setting learning rate to 1e-05.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "100/100 [==============================] - 80s 799ms/step - loss: 2.1247 - val_loss: 2.3930\n", + "\n", + "Epoch 00345: val_loss did not improve from 2.38430\n", + "Epoch 346/500\n", + "\n", + "Epoch 00346: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0853 - val_loss: 2.4446\n", + "\n", + "Epoch 00346: val_loss did not improve from 2.38430\n", + "Epoch 347/500\n", + "\n", + "Epoch 00347: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0962 - val_loss: 2.3987\n", + "\n", + "Epoch 00347: val_loss did not improve from 2.38430\n", + "Epoch 348/500\n", + "\n", + "Epoch 00348: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0868 - val_loss: 2.4302\n", + "\n", + "Epoch 00348: val_loss did not improve from 2.38430\n", + "Epoch 349/500\n", + "\n", + "Epoch 00349: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1354 - val_loss: 2.4145\n", + "\n", + "Epoch 00349: val_loss did not improve from 2.38430\n", + "Epoch 350/500\n", + "\n", + "Epoch 00350: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0808 - val_loss: 2.4016\n", + "\n", + "Epoch 00350: val_loss did not improve from 2.38430\n", + "Epoch 351/500\n", + "\n", + "Epoch 00351: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0929 - val_loss: 2.4298\n", + "\n", + "Epoch 00351: val_loss did not improve from 2.38430\n", + "Epoch 352/500\n", + "\n", + "Epoch 00352: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1178 - val_loss: 2.3901\n", + "\n", + "Epoch 00352: val_loss did not improve from 2.38430\n", + "Epoch 353/500\n", + "\n", + "Epoch 00353: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0798 - val_loss: 2.4365\n", + "\n", + "Epoch 00353: val_loss did not improve from 2.38430\n", + "Epoch 354/500\n", + "\n", + "Epoch 00354: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0993 - val_loss: 2.4296\n", + "\n", + "Epoch 00354: val_loss did not improve from 2.38430\n", + "Epoch 355/500\n", + "\n", + "Epoch 00355: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0661 - val_loss: 2.4054\n", + "\n", + "Epoch 00355: val_loss did not improve from 2.38430\n", + "Epoch 356/500\n", + "\n", + "Epoch 00356: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0964 - val_loss: 2.4211\n", + "\n", + "Epoch 00356: val_loss did not improve from 2.38430\n", + "Epoch 357/500\n", + "\n", + "Epoch 00357: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1526 - val_loss: 2.4048\n", + "\n", + "Epoch 00357: val_loss did not improve from 2.38430\n", + "Epoch 358/500\n", + "\n", + "Epoch 00358: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0620 - val_loss: 2.4042\n", + "\n", + "Epoch 00358: val_loss did not improve from 2.38430\n", + "Epoch 359/500\n", + "\n", + "Epoch 00359: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0800 - val_loss: 2.4167\n", + "\n", + "Epoch 00359: val_loss did not improve from 2.38430\n", + "Epoch 360/500\n", + "\n", + "Epoch 00360: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1386 - val_loss: 2.4166\n", + "\n", + "Epoch 00360: val_loss did not improve from 2.38430\n", + "Epoch 361/500\n", + "\n", + "Epoch 00361: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0717 - val_loss: 2.3937\n", + "\n", + "Epoch 00361: val_loss did not improve from 2.38430\n", + "Epoch 362/500\n", + "\n", + "Epoch 00362: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1056 - val_loss: 2.3814\n", + "\n", + "Epoch 00362: val_loss improved from 2.38430 to 2.38138, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 363/500\n", + "\n", + "Epoch 00363: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0884 - val_loss: 2.3623\n", + "\n", + "Epoch 00363: val_loss improved from 2.38138 to 2.36234, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 364/500\n", + "\n", + "Epoch 00364: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0678 - val_loss: 2.3943\n", + "\n", + "Epoch 00364: val_loss did not improve from 2.36234\n", + "Epoch 365/500\n", + "\n", + "Epoch 00365: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0853 - val_loss: 2.4385\n", + "\n", + "Epoch 00365: val_loss did not improve from 2.36234\n", + "Epoch 366/500\n", + "\n", + "Epoch 00366: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1190 - val_loss: 2.4279\n", + "\n", + "Epoch 00366: val_loss did not improve from 2.36234\n", + "Epoch 367/500\n", + "\n", + "Epoch 00367: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1034 - val_loss: 2.3857\n", + "\n", + "Epoch 00367: val_loss did not improve from 2.36234\n", + "Epoch 368/500\n", + "\n", + "Epoch 00368: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0699 - val_loss: 2.4007\n", + "\n", + "Epoch 00368: val_loss did not improve from 2.36234\n", + "Epoch 369/500\n", + "\n", + "Epoch 00369: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1649 - val_loss: 2.3795\n", + "\n", + "Epoch 00369: val_loss did not improve from 2.36234\n", + "Epoch 370/500\n", + "\n", + "Epoch 00370: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0986 - val_loss: 2.3778\n", + "\n", + "Epoch 00370: val_loss did not improve from 2.36234\n", + "Epoch 371/500\n", + "\n", + "Epoch 00371: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1006 - val_loss: 2.4018\n", + "\n", + "Epoch 00371: val_loss did not improve from 2.36234\n", + "Epoch 372/500\n", + "\n", + "Epoch 00372: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0778 - val_loss: 2.3997\n", + "\n", + "Epoch 00372: val_loss did not improve from 2.36234\n", + "Epoch 373/500\n", + "\n", + "Epoch 00373: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0916 - val_loss: 2.4090\n", + "\n", + "Epoch 00373: val_loss did not improve from 2.36234\n", + "Epoch 374/500\n", + "\n", + "Epoch 00374: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0937 - val_loss: 2.4017\n", + "\n", + "Epoch 00374: val_loss did not improve from 2.36234\n", + "Epoch 375/500\n", + "\n", + "Epoch 00375: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0926 - val_loss: 2.3936\n", + "\n", + "Epoch 00375: val_loss did not improve from 2.36234\n", + "Epoch 376/500\n", + "\n", + "Epoch 00376: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1085 - val_loss: 2.4056\n", + "\n", + "Epoch 00376: val_loss did not improve from 2.36234\n", + "Epoch 377/500\n", + "\n", + "Epoch 00377: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.0807 - val_loss: 2.4472\n", + "\n", + "Epoch 00377: val_loss did not improve from 2.36234\n", + "Epoch 378/500\n", + "\n", + "Epoch 00378: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0767 - val_loss: 2.3985\n", + "\n", + "Epoch 00378: val_loss did not improve from 2.36234\n", + "Epoch 379/500\n", + "\n", + "Epoch 00379: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1006 - val_loss: 2.3763\n", + "\n", + "Epoch 00379: val_loss did not improve from 2.36234\n", + "Epoch 380/500\n", + "\n", + "Epoch 00380: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0999 - val_loss: 2.3724\n", + "\n", + "Epoch 00380: val_loss did not improve from 2.36234\n", + "Epoch 381/500\n", + "\n", + "Epoch 00381: LearningRateScheduler setting learning rate to 1e-05.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "100/100 [==============================] - 80s 799ms/step - loss: 2.0636 - val_loss: 2.3865\n", + "\n", + "Epoch 00381: val_loss did not improve from 2.36234\n", + "Epoch 382/500\n", + "\n", + "Epoch 00382: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 1.9868 - val_loss: 2.3687\n", + "\n", + "Epoch 00382: val_loss did not improve from 2.36234\n", + "Epoch 383/500\n", + "\n", + "Epoch 00383: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0264 - val_loss: 2.3919\n", + "\n", + "Epoch 00383: val_loss did not improve from 2.36234\n", + "Epoch 384/500\n", + "\n", + "Epoch 00384: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1080 - val_loss: 2.3729\n", + "\n", + "Epoch 00384: val_loss did not improve from 2.36234\n", + "Epoch 385/500\n", + "\n", + "Epoch 00385: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1055 - val_loss: 2.3540\n", + "\n", + "Epoch 00385: val_loss improved from 2.36234 to 2.35402, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 386/500\n", + "\n", + "Epoch 00386: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0652 - val_loss: 2.3670\n", + "\n", + "Epoch 00386: val_loss did not improve from 2.35402\n", + "Epoch 387/500\n", + "\n", + "Epoch 00387: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1103 - val_loss: 2.3755\n", + "\n", + "Epoch 00387: val_loss did not improve from 2.35402\n", + "Epoch 388/500\n", + "\n", + "Epoch 00388: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0526 - val_loss: 2.3800\n", + "\n", + "Epoch 00388: val_loss did not improve from 2.35402\n", + "Epoch 389/500\n", + "\n", + "Epoch 00389: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0536 - val_loss: 2.3885\n", + "\n", + "Epoch 00389: val_loss did not improve from 2.35402\n", + "Epoch 390/500\n", + "\n", + "Epoch 00390: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1088 - val_loss: 2.3599\n", + "\n", + "Epoch 00390: val_loss did not improve from 2.35402\n", + "Epoch 391/500\n", + "\n", + "Epoch 00391: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0575 - val_loss: 2.3714\n", + "\n", + "Epoch 00391: val_loss did not improve from 2.35402\n", + "Epoch 392/500\n", + "\n", + "Epoch 00392: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0073 - val_loss: 2.4156\n", + "\n", + "Epoch 00392: val_loss did not improve from 2.35402\n", + "Epoch 393/500\n", + "\n", + "Epoch 00393: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1487 - val_loss: 2.3745\n", + "\n", + "Epoch 00393: val_loss did not improve from 2.35402\n", + "Epoch 394/500\n", + "\n", + "Epoch 00394: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0446 - val_loss: 2.3935\n", + "\n", + "Epoch 00394: val_loss did not improve from 2.35402\n", + "Epoch 395/500\n", + "\n", + "Epoch 00395: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1099 - val_loss: 2.4126\n", + "\n", + "Epoch 00395: val_loss did not improve from 2.35402\n", + "Epoch 396/500\n", + "\n", + "Epoch 00396: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0329 - val_loss: 2.4481\n", + "\n", + "Epoch 00396: val_loss did not improve from 2.35402\n", + "Epoch 397/500\n", + "\n", + "Epoch 00397: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0798 - val_loss: 2.3902\n", + "\n", + "Epoch 00397: val_loss did not improve from 2.35402\n", + "Epoch 398/500\n", + "\n", + "Epoch 00398: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1269 - val_loss: 2.4099\n", + "\n", + "Epoch 00398: val_loss did not improve from 2.35402\n", + "Epoch 399/500\n", + "\n", + "Epoch 00399: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0647 - val_loss: 2.3668\n", + "\n", + "Epoch 00399: val_loss did not improve from 2.35402\n", + "Epoch 400/500\n", + "\n", + "Epoch 00400: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0623 - val_loss: 2.3641\n", + "\n", + "Epoch 00400: val_loss did not improve from 2.35402\n", + "Epoch 401/500\n", + "\n", + "Epoch 00401: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0806 - val_loss: 2.4077\n", + "\n", + "Epoch 00401: val_loss did not improve from 2.35402\n", + "Epoch 402/500\n", + "\n", + "Epoch 00402: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0625 - val_loss: 2.3878\n", + "\n", + "Epoch 00402: val_loss did not improve from 2.35402\n", + "Epoch 403/500\n", + "\n", + "Epoch 00403: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0204 - val_loss: 2.3818\n", + "\n", + "Epoch 00403: val_loss did not improve from 2.35402\n", + "Epoch 404/500\n", + "\n", + "Epoch 00404: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0725 - val_loss: 2.4053\n", + "\n", + "Epoch 00404: val_loss did not improve from 2.35402\n", + "Epoch 405/500\n", + "\n", + "Epoch 00405: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0589 - val_loss: 2.4102\n", + "\n", + "Epoch 00405: val_loss did not improve from 2.35402\n", + "Epoch 406/500\n", + "\n", + "Epoch 00406: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0186 - val_loss: 2.3415\n", + "\n", + "Epoch 00406: val_loss improved from 2.35402 to 2.34146, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 407/500\n", + "\n", + "Epoch 00407: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0659 - val_loss: 2.3922\n", + "\n", + "Epoch 00407: val_loss did not improve from 2.34146\n", + "Epoch 408/500\n", + "\n", + "Epoch 00408: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0959 - val_loss: 2.3671\n", + "\n", + "Epoch 00408: val_loss did not improve from 2.34146\n", + "Epoch 409/500\n", + "\n", + "Epoch 00409: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0579 - val_loss: 2.4155\n", + "\n", + "Epoch 00409: val_loss did not improve from 2.34146\n", + "Epoch 410/500\n", + "\n", + "Epoch 00410: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0830 - val_loss: 2.3769\n", + "\n", + "Epoch 00410: val_loss did not improve from 2.34146\n", + "Epoch 411/500\n", + "\n", + "Epoch 00411: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0422 - val_loss: 2.4380\n", + "\n", + "Epoch 00411: val_loss did not improve from 2.34146\n", + "Epoch 412/500\n", + "\n", + "Epoch 00412: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0299 - val_loss: 2.3743\n", + "\n", + "Epoch 00412: val_loss did not improve from 2.34146\n", + "Epoch 413/500\n", + "\n", + "Epoch 00413: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.0745 - val_loss: 2.4274\n", + "\n", + "Epoch 00413: val_loss did not improve from 2.34146\n", + "Epoch 414/500\n", + "\n", + "Epoch 00414: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1003 - val_loss: 2.3616\n", + "\n", + "Epoch 00414: val_loss did not improve from 2.34146\n", + "Epoch 415/500\n", + "\n", + "Epoch 00415: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0427 - val_loss: 2.3785\n", + "\n", + "Epoch 00415: val_loss did not improve from 2.34146\n", + "Epoch 416/500\n", + "\n", + "Epoch 00416: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0771 - val_loss: 2.3834\n", + "\n", + "Epoch 00416: val_loss did not improve from 2.34146\n", + "Epoch 417/500\n", + "\n", + "Epoch 00417: LearningRateScheduler setting learning rate to 1e-05.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "100/100 [==============================] - 80s 799ms/step - loss: 2.0350 - val_loss: 2.3746\n", + "\n", + "Epoch 00417: val_loss did not improve from 2.34146\n", + "Epoch 418/500\n", + "\n", + "Epoch 00418: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0556 - val_loss: 2.3677\n", + "\n", + "Epoch 00418: val_loss did not improve from 2.34146\n", + "Epoch 419/500\n", + "\n", + "Epoch 00419: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.1044 - val_loss: 2.3601\n", + "\n", + "Epoch 00419: val_loss did not improve from 2.34146\n", + "Epoch 420/500\n", + "\n", + "Epoch 00420: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0146 - val_loss: 2.3894\n", + "\n", + "Epoch 00420: val_loss did not improve from 2.34146\n", + "Epoch 421/500\n", + "\n", + "Epoch 00421: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0476 - val_loss: 2.3624\n", + "\n", + "Epoch 00421: val_loss did not improve from 2.34146\n", + "Epoch 422/500\n", + "\n", + "Epoch 00422: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0478 - val_loss: 2.3906\n", + "\n", + "Epoch 00422: val_loss did not improve from 2.34146\n", + "Epoch 423/500\n", + "\n", + "Epoch 00423: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0872 - val_loss: 2.3948\n", + "\n", + "Epoch 00423: val_loss did not improve from 2.34146\n", + "Epoch 424/500\n", + "\n", + "Epoch 00424: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0461 - val_loss: 2.3858\n", + "\n", + "Epoch 00424: val_loss did not improve from 2.34146\n", + "Epoch 425/500\n", + "\n", + "Epoch 00425: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0589 - val_loss: 2.4122\n", + "\n", + "Epoch 00425: val_loss did not improve from 2.34146\n", + "Epoch 426/500\n", + "\n", + "Epoch 00426: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0172 - val_loss: 2.3670\n", + "\n", + "Epoch 00426: val_loss did not improve from 2.34146\n", + "Epoch 427/500\n", + "\n", + "Epoch 00427: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0098 - val_loss: 2.3985\n", + "\n", + "Epoch 00427: val_loss did not improve from 2.34146\n", + "Epoch 428/500\n", + "\n", + "Epoch 00428: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0527 - val_loss: 2.3992\n", + "\n", + "Epoch 00428: val_loss did not improve from 2.34146\n", + "Epoch 429/500\n", + "\n", + "Epoch 00429: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0470 - val_loss: 2.4607\n", + "\n", + "Epoch 00429: val_loss did not improve from 2.34146\n", + "Epoch 430/500\n", + "\n", + "Epoch 00430: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0599 - val_loss: 2.4135\n", + "\n", + "Epoch 00430: val_loss did not improve from 2.34146\n", + "Epoch 431/500\n", + "\n", + "Epoch 00431: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0177 - val_loss: 2.3874\n", + "\n", + "Epoch 00431: val_loss did not improve from 2.34146\n", + "Epoch 432/500\n", + "\n", + "Epoch 00432: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 1.9960 - val_loss: 2.4020\n", + "\n", + "Epoch 00432: val_loss did not improve from 2.34146\n", + "Epoch 433/500\n", + "\n", + "Epoch 00433: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0111 - val_loss: 2.3511\n", + "\n", + "Epoch 00433: val_loss did not improve from 2.34146\n", + "Epoch 434/500\n", + "\n", + "Epoch 00434: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0170 - val_loss: 2.3787\n", + "\n", + "Epoch 00434: val_loss did not improve from 2.34146\n", + "Epoch 435/500\n", + "\n", + "Epoch 00435: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 1.9859 - val_loss: 2.3683\n", + "\n", + "Epoch 00435: val_loss did not improve from 2.34146\n", + "Epoch 436/500\n", + "\n", + "Epoch 00436: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0651 - val_loss: 2.3521\n", + "\n", + "Epoch 00436: val_loss did not improve from 2.34146\n", + "Epoch 437/500\n", + "\n", + "Epoch 00437: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0509 - val_loss: 2.3714\n", + "\n", + "Epoch 00437: val_loss did not improve from 2.34146\n", + "Epoch 438/500\n", + "\n", + "Epoch 00438: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0418 - val_loss: 2.3868\n", + "\n", + "Epoch 00438: val_loss did not improve from 2.34146\n", + "Epoch 439/500\n", + "\n", + "Epoch 00439: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0409 - val_loss: 2.3806\n", + "\n", + "Epoch 00439: val_loss did not improve from 2.34146\n", + "Epoch 440/500\n", + "\n", + "Epoch 00440: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1010 - val_loss: 2.3676\n", + "\n", + "Epoch 00440: val_loss did not improve from 2.34146\n", + "Epoch 441/500\n", + "\n", + "Epoch 00441: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0553 - val_loss: 2.4117\n", + "\n", + "Epoch 00441: val_loss did not improve from 2.34146\n", + "Epoch 442/500\n", + "\n", + "Epoch 00442: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 1.9936 - val_loss: 2.4315\n", + "\n", + "Epoch 00442: val_loss did not improve from 2.34146\n", + "Epoch 443/500\n", + "\n", + "Epoch 00443: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0162 - val_loss: 2.4066\n", + "\n", + "Epoch 00443: val_loss did not improve from 2.34146\n", + "Epoch 444/500\n", + "\n", + "Epoch 00444: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0278 - val_loss: 2.4046\n", + "\n", + "Epoch 00444: val_loss did not improve from 2.34146\n", + "Epoch 445/500\n", + "\n", + "Epoch 00445: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0602 - val_loss: 2.3750\n", + "\n", + "Epoch 00445: val_loss did not improve from 2.34146\n", + "Epoch 446/500\n", + "\n", + "Epoch 00446: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0345 - val_loss: 2.3787\n", + "\n", + "Epoch 00446: val_loss did not improve from 2.34146\n", + "Epoch 447/500\n", + "\n", + "Epoch 00447: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0533 - val_loss: 2.3742\n", + "\n", + "Epoch 00447: val_loss did not improve from 2.34146\n", + "Epoch 448/500\n", + "\n", + "Epoch 00448: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0464 - val_loss: 2.3607\n", + "\n", + "Epoch 00448: val_loss did not improve from 2.34146\n", + "Epoch 449/500\n", + "\n", + "Epoch 00449: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0469 - val_loss: 2.3640\n", + "\n", + "Epoch 00449: val_loss did not improve from 2.34146\n", + "Epoch 450/500\n", + "\n", + "Epoch 00450: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0163 - val_loss: 2.4275\n", + "\n", + "Epoch 00450: val_loss did not improve from 2.34146\n", + "Epoch 451/500\n", + "\n", + "Epoch 00451: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0045 - val_loss: 2.3684\n", + "\n", + "Epoch 00451: val_loss did not improve from 2.34146\n", + "Epoch 452/500\n", + "\n", + "Epoch 00452: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0544 - val_loss: 2.3581\n", + "\n", + "Epoch 00452: val_loss did not improve from 2.34146\n", + "Epoch 453/500\n", + "\n", + "Epoch 00453: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0316 - val_loss: 2.3709\n", + "\n", + "Epoch 00453: val_loss did not improve from 2.34146\n", + "Epoch 454/500\n", + "\n", + "Epoch 00454: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0651 - val_loss: 2.3356\n", + "\n", + "Epoch 00454: val_loss improved from 2.34146 to 2.33563, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 455/500\n", + "\n", + "Epoch 00455: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.0227 - val_loss: 2.3851\n", + "\n", + "Epoch 00455: val_loss did not improve from 2.33563\n", + "Epoch 456/500\n", + "\n", + "Epoch 00456: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0243 - val_loss: 2.3436\n", + "\n", + "Epoch 00456: val_loss did not improve from 2.33563\n", + "Epoch 457/500\n", + "\n", + "Epoch 00457: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0296 - val_loss: 2.4156\n", + "\n", + "Epoch 00457: val_loss did not improve from 2.33563\n", + "Epoch 458/500\n", + "\n", + "Epoch 00458: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 1.9851 - val_loss: 2.3550\n", + "\n", + "Epoch 00458: val_loss did not improve from 2.33563\n", + "Epoch 459/500\n", + "\n", + "Epoch 00459: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0508 - val_loss: 2.3610\n", + "\n", + "Epoch 00459: val_loss did not improve from 2.33563\n", + "Epoch 460/500\n", + "\n", + "Epoch 00460: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0041 - val_loss: 2.4261\n", + "\n", + "Epoch 00460: val_loss did not improve from 2.33563\n", + "Epoch 461/500\n", + "\n", + "Epoch 00461: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.0278 - val_loss: 2.3630\n", + "\n", + "Epoch 00461: val_loss did not improve from 2.33563\n", + "Epoch 462/500\n", + "\n", + "Epoch 00462: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 1.9905 - val_loss: 2.3738\n", + "\n", + "Epoch 00462: val_loss did not improve from 2.33563\n", + "Epoch 463/500\n", + "\n", + "Epoch 00463: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0774 - val_loss: 2.3582\n", + "\n", + "Epoch 00463: val_loss did not improve from 2.33563\n", + "Epoch 464/500\n", + "\n", + "Epoch 00464: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 1.9975 - val_loss: 2.3427\n", + "\n", + "Epoch 00464: val_loss did not improve from 2.33563\n", + "Epoch 465/500\n", + "\n", + "Epoch 00465: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0039 - val_loss: 2.4340\n", + "\n", + "Epoch 00465: val_loss did not improve from 2.33563\n", + "Epoch 466/500\n", + "\n", + "Epoch 00466: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0650 - val_loss: 2.3641\n", + "\n", + "Epoch 00466: val_loss did not improve from 2.33563\n", + "Epoch 467/500\n", + "\n", + "Epoch 00467: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 1.9773 - val_loss: 2.3502\n", + "\n", + "Epoch 00467: val_loss did not improve from 2.33563\n", + "Epoch 468/500\n", + "\n", + "Epoch 00468: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 1.9900 - val_loss: 2.3671\n", + "\n", + "Epoch 00468: val_loss did not improve from 2.33563\n", + "Epoch 469/500\n", + "\n", + "Epoch 00469: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0433 - val_loss: 2.4299\n", + "\n", + "Epoch 00469: val_loss did not improve from 2.33563\n", + "Epoch 470/500\n", + "\n", + "Epoch 00470: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0096 - val_loss: 2.3356\n", + "\n", + "Epoch 00470: val_loss improved from 2.33563 to 2.33556, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 471/500\n", + "\n", + "Epoch 00471: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0814 - val_loss: 2.4134\n", + "\n", + "Epoch 00471: val_loss did not improve from 2.33556\n", + "Epoch 472/500\n", + "\n", + "Epoch 00472: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0282 - val_loss: 2.3833\n", + "\n", + "Epoch 00472: val_loss did not improve from 2.33556\n", + "Epoch 473/500\n", + "\n", + "Epoch 00473: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 1.9912 - val_loss: 2.3809\n", + "\n", + "Epoch 00473: val_loss did not improve from 2.33556\n", + "Epoch 474/500\n", + "\n", + "Epoch 00474: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0636 - val_loss: 2.3998\n", + "\n", + "Epoch 00474: val_loss did not improve from 2.33556\n", + "Epoch 475/500\n", + "\n", + "Epoch 00475: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 1.9840 - val_loss: 2.3530\n", + "\n", + "Epoch 00475: val_loss did not improve from 2.33556\n", + "Epoch 476/500\n", + "\n", + "Epoch 00476: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.0183 - val_loss: 2.4219\n", + "\n", + "Epoch 00476: val_loss did not improve from 2.33556\n", + "Epoch 477/500\n", + "\n", + "Epoch 00477: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 1.9861 - val_loss: 2.3589\n", + "\n", + "Epoch 00477: val_loss did not improve from 2.33556\n", + "Epoch 478/500\n", + "\n", + "Epoch 00478: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0277 - val_loss: 2.3678\n", + "\n", + "Epoch 00478: val_loss did not improve from 2.33556\n", + "Epoch 479/500\n", + "\n", + "Epoch 00479: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 796ms/step - loss: 2.0421 - val_loss: 2.3922\n", + "\n", + "Epoch 00479: val_loss did not improve from 2.33556\n", + "Epoch 480/500\n", + "\n", + "Epoch 00480: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 1.9588 - val_loss: 2.3798\n", + "\n", + "Epoch 00480: val_loss did not improve from 2.33556\n", + "Epoch 481/500\n", + "\n", + "Epoch 00481: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0262 - val_loss: 2.3859\n", + "\n", + "Epoch 00481: val_loss did not improve from 2.33556\n", + "Epoch 482/500\n", + "\n", + "Epoch 00482: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 1.9900 - val_loss: 2.3502\n", + "\n", + "Epoch 00482: val_loss did not improve from 2.33556\n", + "Epoch 483/500\n", + "\n", + "Epoch 00483: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 1.9932 - val_loss: 2.3712\n", + "\n", + "Epoch 00483: val_loss did not improve from 2.33556\n", + "Epoch 484/500\n", + "\n", + "Epoch 00484: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0029 - val_loss: 2.3610\n", + "\n", + "Epoch 00484: val_loss did not improve from 2.33556\n", + "Epoch 485/500\n", + "\n", + "Epoch 00485: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0292 - val_loss: 2.3485\n", + "\n", + "Epoch 00485: val_loss did not improve from 2.33556\n", + "Epoch 486/500\n", + "\n", + "Epoch 00486: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0036 - val_loss: 2.3521\n", + "\n", + "Epoch 00486: val_loss did not improve from 2.33556\n", + "Epoch 487/500\n", + "\n", + "Epoch 00487: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0304 - val_loss: 2.3897\n", + "\n", + "Epoch 00487: val_loss did not improve from 2.33556\n", + "Epoch 488/500\n", + "\n", + "Epoch 00488: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0249 - val_loss: 2.3887\n", + "\n", + "Epoch 00488: val_loss did not improve from 2.33556\n", + "Epoch 489/500\n", + "\n", + "Epoch 00489: LearningRateScheduler setting learning rate to 1e-05.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "100/100 [==============================] - 80s 799ms/step - loss: 2.0184 - val_loss: 2.4082\n", + "\n", + "Epoch 00489: val_loss did not improve from 2.33556\n", + "Epoch 490/500\n", + "\n", + "Epoch 00490: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0207 - val_loss: 2.4115\n", + "\n", + "Epoch 00490: val_loss did not improve from 2.33556\n", + "Epoch 491/500\n", + "\n", + "Epoch 00491: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0193 - val_loss: 2.3330\n", + "\n", + "Epoch 00491: val_loss improved from 2.33556 to 2.33299, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 492/500\n", + "\n", + "Epoch 00492: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0064 - val_loss: 2.3920\n", + "\n", + "Epoch 00492: val_loss did not improve from 2.33299\n", + "Epoch 493/500\n", + "\n", + "Epoch 00493: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0322 - val_loss: 2.3671\n", + "\n", + "Epoch 00493: val_loss did not improve from 2.33299\n", + "Epoch 494/500\n", + "\n", + "Epoch 00494: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 1.9830 - val_loss: 2.3444\n", + "\n", + "Epoch 00494: val_loss did not improve from 2.33299\n", + "Epoch 495/500\n", + "\n", + "Epoch 00495: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0090 - val_loss: 2.3845\n", + "\n", + "Epoch 00495: val_loss did not improve from 2.33299\n", + "Epoch 496/500\n", + "\n", + "Epoch 00496: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 1.9609 - val_loss: 2.3364\n", + "\n", + "Epoch 00496: val_loss did not improve from 2.33299\n", + "Epoch 497/500\n", + "\n", + "Epoch 00497: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 1.9617 - val_loss: 2.3641\n", + "\n", + "Epoch 00497: val_loss did not improve from 2.33299\n", + "Epoch 498/500\n", + "\n", + "Epoch 00498: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 1.9880 - val_loss: 2.3624\n", + "\n", + "Epoch 00498: val_loss did not improve from 2.33299\n", + "Epoch 499/500\n", + "\n", + "Epoch 00499: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 1.9825 - val_loss: 2.3824\n", + "\n", + "Epoch 00499: val_loss did not improve from 2.33299\n", + "Epoch 500/500\n", + "\n", + "Epoch 00500: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.0526 - val_loss: 2.3589\n", + "\n", + "Epoch 00500: val_loss did not improve from 2.33299\n" + ] + } + ], + "source": [ + "#ENTRENAMIENTO DE MODELO\n", + "#####################################################################\n", + "# Instantiate two `DataGenerator` objects: One for training, one for validation.\n", + "######################################################################\n", + "# Optional: If you have enough memory, consider loading the images into memory for the reasons explained above.\n", + "\n", + "train_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n", + "val_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n", + "\n", + "# 2: Parse the image and label lists for the training and validation datasets. This can take a while.\n", + "\n", + "\n", + "\n", + "# The XML parser needs to now what object class names to look for and in which order to map them to integers.\n", + "classes = ['background' ] + labels\n", + "\n", + "train_dataset.parse_xml(images_dirs= [config['train']['train_image_folder']],\n", + " image_set_filenames=[config['train']['train_image_set_filename']],\n", + " annotations_dirs=[config['train']['train_annot_folder']],\n", + " classes=classes,\n", + " include_classes='all',\n", + " #classes = classes, \n", + " #include_classes= [1],\n", + " exclude_truncated=False,\n", + " exclude_difficult=False,\n", + " ret=False)\n", + "\n", + "val_dataset.parse_xml(images_dirs= [config['test']['test_image_folder']],\n", + " image_set_filenames=[config['test']['test_image_set_filename']],\n", + " annotations_dirs=[config['test']['test_annot_folder']],\n", + " classes=classes,\n", + " include_classes='all',\n", + " #classes = classes, \n", + " #include_classes=[1],\n", + " exclude_truncated=False,\n", + " exclude_difficult=False,\n", + " ret=False)\n", + "\n", + "#########################\n", + "# 3: Set the batch size.\n", + "#########################\n", + "batch_size = config['train']['batch_size'] # Change the batch size if you like, or if you run into GPU memory issues.\n", + "\n", + "##########################\n", + "# 4: Set the image transformations for pre-processing and data augmentation options.\n", + "##########################\n", + "# For the training generator:\n", + "\n", + "\n", + "# For the validation generator:\n", + "convert_to_3_channels = ConvertTo3Channels()\n", + "resize = Resize(height=img_height, width=img_width)\n", + "\n", + "######################################3\n", + "# 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.\n", + "#########################################\n", + "# The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.\n", + "if config['model']['backend'] == 'ssd300':\n", + " predictor_sizes = [model.get_layer('conv4_3_norm_mbox_conf').output_shape[1:3],\n", + " model.get_layer('fc7_mbox_conf').output_shape[1:3],\n", + " model.get_layer('conv6_2_mbox_conf').output_shape[1:3],\n", + " model.get_layer('conv7_2_mbox_conf').output_shape[1:3],\n", + " model.get_layer('conv8_2_mbox_conf').output_shape[1:3],\n", + " model.get_layer('conv9_2_mbox_conf').output_shape[1:3]]\n", + " ssd_input_encoder = SSDInputEncoder(img_height=img_height,\n", + " img_width=img_width,\n", + " n_classes=n_classes,\n", + " predictor_sizes=predictor_sizes,\n", + " scales=scales,\n", + " aspect_ratios_per_layer=aspect_ratios,\n", + " two_boxes_for_ar1=two_boxes_for_ar1,\n", + " steps=steps,\n", + " offsets=offsets,\n", + " clip_boxes=clip_boxes,\n", + " variances=variances,\n", + " matching_type='multi',\n", + " pos_iou_threshold=0.5,\n", + " neg_iou_limit=0.5,\n", + " normalize_coords=normalize_coords)\n", + "\n", + "elif config['model']['backend'] == 'ssd7':\n", + " predictor_sizes = [model.get_layer('classes4').output_shape[1:3],\n", + " model.get_layer('classes5').output_shape[1:3],\n", + " model.get_layer('classes6').output_shape[1:3],\n", + " model.get_layer('classes7').output_shape[1:3]]\n", + " ssd_input_encoder = SSDInputEncoder(img_height=img_height,\n", + " img_width=img_width,\n", + " n_classes=n_classes,\n", + " predictor_sizes=predictor_sizes,\n", + " scales=scales,\n", + " aspect_ratios_global=aspect_ratios,\n", + " two_boxes_for_ar1=two_boxes_for_ar1,\n", + " steps=steps,\n", + " offsets=offsets,\n", + " clip_boxes=clip_boxes,\n", + " variances=variances,\n", + " matching_type='multi',\n", + " pos_iou_threshold=0.5,\n", + " neg_iou_limit=0.3,\n", + " normalize_coords=normalize_coords)\n", + "\n", + "\n", + "\n", + " \n", + "data_augmentation_chain = DataAugmentationVariableInputSize(resize_height = img_height,\n", + " resize_width = img_width,\n", + " random_brightness=(-48, 48, 0.5),\n", + " random_contrast=(0.5, 1.8, 0.5),\n", + " random_saturation=(0.5, 1.8, 0.5),\n", + " random_hue=(18, 0.5),\n", + " random_flip=0.5,\n", + " n_trials_max=3,\n", + " clip_boxes=True,\n", + " overlap_criterion='area',\n", + " bounds_box_filter=(0.3, 1.0),\n", + " bounds_validator=(0.5, 1.0),\n", + " n_boxes_min=1,\n", + " background=(0,0,0))\n", + "#######################\n", + "# 6: Create the generator handles that will be passed to Keras' `fit_generator()` function.\n", + "#######################\n", + "\n", + "train_generator = train_dataset.generate(batch_size=batch_size,\n", + " shuffle=True,\n", + " transformations= [data_augmentation_chain],\n", + " label_encoder=ssd_input_encoder,\n", + " returns={'processed_images',\n", + " 'encoded_labels'},\n", + " keep_images_without_gt=False)\n", + "\n", + "val_generator = val_dataset.generate(batch_size=batch_size,\n", + " shuffle=False,\n", + " transformations=[convert_to_3_channels,\n", + " resize],\n", + " label_encoder=ssd_input_encoder,\n", + " returns={'processed_images',\n", + " 'encoded_labels'},\n", + " keep_images_without_gt=False)\n", + "\n", + "# Summary instance training\n", + "category_train_list = []\n", + "for image_label in train_dataset.labels:\n", + " category_train_list += [i[0] for i in image_label]\n", + "summary_category_training = {train_dataset.classes[i]: category_train_list.count(i) for i in list(set(category_train_list))}\n", + "for i in summary_category_training.keys():\n", + " print(i, ': {:.0f}'.format(summary_category_training[i]))\n", + "\n", + "\n", + "\n", + "# Get the number of samples in the training and validations datasets.\n", + "train_dataset_size = train_dataset.get_dataset_size()\n", + "val_dataset_size = val_dataset.get_dataset_size()\n", + "\n", + "print(\"Number of images in the training dataset:\\t{:>6}\".format(train_dataset_size))\n", + "print(\"Number of images in the validation dataset:\\t{:>6}\".format(val_dataset_size))\n", + "\n", + "\n", + "\n", + "##########################\n", + "# Define model callbacks.\n", + "#########################\n", + "\n", + "# TODO: Set the filepath under which you want to save the model.\n", + "model_checkpoint = ModelCheckpoint(filepath= config['train']['saved_weights_name'],\n", + " monitor='val_loss',\n", + " verbose=1,\n", + " save_best_only=True,\n", + " save_weights_only=False,\n", + " mode='auto',\n", + " period=1)\n", + "#model_checkpoint.best =\n", + "\n", + "csv_logger = CSVLogger(filename='log.csv',\n", + " separator=',',\n", + " append=True)\n", + "\n", + "learning_rate_scheduler = LearningRateScheduler(schedule=lr_schedule,\n", + " verbose=1)\n", + "\n", + "terminate_on_nan = TerminateOnNaN()\n", + "\n", + "callbacks = [model_checkpoint,\n", + " csv_logger,\n", + " learning_rate_scheduler,\n", + " terminate_on_nan]\n", + "\n", + "\n", + "\n", + "batch_images, batch_labels = next(train_generator)\n", + "\n", + "\n", + "initial_epoch = 0\n", + "final_epoch = 500 #config['train']['nb_epochs']\n", + "steps_per_epoch = 100\n", + "\n", + "history = model.fit_generator(generator=train_generator,\n", + " steps_per_epoch=steps_per_epoch,\n", + " epochs=final_epoch,\n", + " callbacks=callbacks,\n", + " validation_data=val_generator,\n", + " validation_steps=ceil(val_dataset_size/batch_size*10),\n", + " initial_epoch=initial_epoch,\n", + " verbose = 1 if config['train']['debug'] else 2)\n", + "\n", + "history_path = config['train']['saved_weights_name'].split('.')[0] + '_history'\n", + "\n", + "np.save(history_path, history.history)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['background', '1']" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "classes" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "dict_keys(['val_loss', 'loss', 'lr'])\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAEWCAYAAABrDZDcAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3Xl8nFXd///XZyaTfV+bJmmb7jvdKK2UHVpaFFSQTVCRn8Vbb0VvQUFuQf2qcLsgoghWVgUR2axAgZalbIWWbkD3dG/SNkmzNHsyy/n9ca4sTZM0LZ1MMvN5Ph55zMx1XTPXOdPpvOecc13nEmMMSimlIpcr1AVQSikVWhoESikV4TQIlFIqwmkQKKVUhNMgUEqpCKdBoJRSEU6DQKkeiMijIvKLXm67W0TO/7Svo1Rf0yBQSqkIp0GglFIRToNADXhOl8zNIvKxiNSLyEMikiMiL4tIrYi8JiJpHba/WEQ2iki1iCwXkXEd1k0VkbXO854CYjvt67Mist557goRmXyCZf6GiGwXkUoR+Y+IDHaWi4j8XkTKROSwU6eJzroFIrLJKVuJiNx0Qm+YUp1oEKhwcSlwATAa+BzwMvBjIBP7Of8ugIiMBp4EvgdkAUuAF0QkWkSigX8DfwfSgaed18V57jTgYeAGIAP4C/AfEYk5noKKyLnAncDlQC6wB/ins3oucKZTj1TgCqDCWfcQcIMxJgmYCLxxPPtVqjsaBCpc/NEYU2qMKQHeAVYaY9YZY5qB54GpznZXAC8ZY5YZY7zAb4E44DPALMAD3GOM8RpjngE+7LCPbwB/McasNMb4jTGPAc3O847Hl4GHjTFrnfLdCswWkWGAF0gCxgJijNlsjDngPM8LjBeRZGNMlTFm7XHuV6kuaRCocFHa4X5jF48TnfuDsb/AATDGBIB9QJ6zrsQcORPjng73hwI/cLqFqkWkGihwnnc8OpehDvurP88Y8wbwJ+A+oFREFolIsrPppcACYI+IvCUis49zv0p1SYNARZr92C90wPbJY7/MS4ADQJ6zrNWQDvf3Ab80xqR2+Is3xjz5KcuQgO1qKgEwxtxrjJkOTMB2Ed3sLP/QGHMJkI3twvrXce5XqS5pEKhI8y/gIhE5T0Q8wA+w3TsrgPcBH/BdEYkSkS8CMzs896/AN0XkNGdQN0FELhKRpOMswz+A60RkijO+8CtsV9ZuETnVeX0PUA80AX5nDOPLIpLidGnVAP5P8T4o1UaDQEUUY8xW4Brgj8Ah7MDy54wxLcaYFuCLwNeAKux4wnMdnrsaO07wJ2f9dmfb4y3D68BPgGexrZARwJXO6mRs4FRhu48qsOMYANcCu0WkBvimUw+lPjXRC9MopVRk0xaBUkpFuKAFgYg87JwUs6HDsnQRWSYiRc5tWk+voZRSKviC2SJ4FLiw07JbgNeNMaOA153HSimlQiioYwTOCTIvGmNaT5HfCpxtjDkgIrnAcmPMmKAVQCml1DFF9fH+clrPknTCILu7DUVkIbAQICEhYfrYsWOPe2cNLT52lNdTmJlAYkxfV1UppUJrzZo1h4wxWcfart9+OxpjFgGLAGbMmGFWr1593K+xencllz3wPou+PpMzRx/zvVBKqbAiInuOvVXfHzVU6nQJ4dyWBXNnreeH6gGySinVvb4Ogv8AX3XufxVYHNzd2STQcyWUUqp7wTx89EnsKftjRKRYRK4H7gIuEJEi7JTBdwVr/7YM9lZjQCmluhe0MQJjzFXdrDrvZLy+1+uluLiYpqambrdx+wL89eJcUpvL2Ly5otvt+rPY2Fjy8/PxeDyhLopSKkz128HiYykuLiYpKYlhw4Zx5GSR7RpafEhZHcMyEkiOG3hfpMYYKioqKC4uprCwMNTFUUqFqQE7xURTUxMZGRndhkA4EBEyMjJ6bPUopdSnNWCDAAjrEGgVCXVUSoXWgA4CpZRSn15YB0Hrb+lgHDVUXV3Nn//85+N+3oIFC6iurg5CiZRS6sSEdRAEU3dB4Pf3fNGoJUuWkJqaGqxiKaXUcRuwRw2F2i233MKOHTuYMmUKHo+HxMREcnNzWb9+PZs2beLzn/88+/bto6mpiRtvvJGFCxcCMGzYMFavXk1dXR3z589nzpw5rFixgry8PBYvXkxcXFyIa6aUijRhEQQ/e2Ejm/bXHLU8YAyNLX5iPW7cruMbdB0/OJk7Pjeh2/V33XUXGzZsYP369SxfvpyLLrqIDRs2tB3m+fDDD5Oenk5jYyOnnnoql156KRkZGUe8RlFREU8++SR//etfufzyy3n22We55hq9+qBSqm+FRRD0BzNnzjziWP97772X559/HoB9+/ZRVFR0VBAUFhYyZcoUAKZPn87u3bv7rLxKKdUqLIKgu1/ujV4/RaW1DE2PJyU+OqhlSEhIaLu/fPlyXnvtNd5//33i4+M5++yzuzwXICYmpu2+2+2msbExqGVUSqmuhPVgcTCPGkpKSqK2trbLdYcPHyYtLY34+Hi2bNnCBx98EIQSKKXUyREWLYJQyMjI4PTTT2fixInExcWRk5PTtu7CCy/kgQceYPLkyYwZM4ZZs2aFsKRKKdWzoF6q8mTp6sI0mzdvZty4cT0+r8nrZ1tpLUPS40kNctdQMPWmrkop1ZmIrDHGzDjWdmHdNaSUUurYNAiUUirCaRAopVSE0yBQSqkIF9ZBEMzDR5VSKlyEdRAopZQ6tsgIgiA0CU50GmqAe+65h4aGhpNcIqWUOjHhHQRBvLiXBoFSKlxExJnFwRgj6DgN9QUXXEB2djb/+te/aG5u5gtf+AI/+9nPqK+v5/LLL6e4uBi/389PfvITSktL2b9/P+eccw6ZmZm8+eabQSidUkr1XngEwcu3wMFPjlrsMYbhLX5iPC5wHWfjZ9AkmH9Xt6s7TkO9dOlSnnnmGVatWoUxhosvvpi3336b8vJyBg8ezEsvvQTYOYhSUlK4++67efPNN8nMzDy+MimlVBCEd9dQH1m6dClLly5l6tSpTJs2jS1btlBUVMSkSZN47bXX+NGPfsQ777xDSkpKqIuqlFJHCY8WQTe/3H0+PzsP1pKfFk96QvDmGjLGcOutt3LDDTcctW7NmjUsWbKEW2+9lblz53L77bcHrRxKKXUiwrxFELwzCTpOQz1v3jwefvhh6urqACgpKaGsrIz9+/cTHx/PNddcw0033cTatWuPeq5SSoVaeLQIQqDjNNTz58/n6quvZvbs2QAkJiby+OOPs337dm6++WZcLhcej4f7778fgIULFzJ//nxyc3N1sFgpFXJhPQ11iy/AloM15KXFkZEQ0+O2/ZlOQ62UOhE6DTVBPY1AKaXCRlgHQZv+3+hRSqmQGdBBcMxurTBoEgyErjul1MA2YIMgNjaWioqKHr8oB/rso8YYKioqiI2NDXVRlFJhbMAeNZSfn09xcTHl5eXdbhMwhtLqJprKPZTHDsyqxsbGkp+fH+piKKXC2MD8dgQ8Hg+FhYU9btPk9XPRT17hhxeO4VtTR/ZRyZRSamAZsF1DvRHlsp1DPv9A7RxSSqngC0kQiMj3RWSjiGwQkSdFJCid4O62IAgE4+WVUios9HkQiEge8F1ghjFmIuAGrgzSvvC4BW9AWwRKKdWdUHUNRQFxIhIFxAP7g7Yjl0tbBEop1YM+DwJjTAnwW2AvcAA4bIxZ2nk7EVkoIqtFZHVPRwYdS5Rb8OoYgVJKdSsUXUNpwCVAITAYSBCRazpvZ4xZZIyZYYyZkZWVdcL787hdeLVFoJRS3QpF19D5wC5jTLkxxgs8B3wmWDuLcokeNaSUUj0IRRDsBWaJSLyICHAesDlYO/O4XXgD2iJQSqnuhGKMYCXwDLAW+MQpw6Jg7c/j1haBUkr1JCRnFhtj7gDu6It9Rbld+LRFoJRS3QrrM4vBjhHoUUNKKdW9sA8Cj1vPI1BKqZ6EfRBEuQWfnlmslFLdCvsg8Lj0PAKllOpJ2AdBlB41pJRSPYqAIHDppHNKKdWD8A6CPe8zuXmdDhYrpVQPwjsI3vkdl1U/pF1DSinVg/AOAreHKHw6xYRSSvUgvIPAFYXb+LVFoJRSPQjvIHB7iMKrYwRKKdWDkMw11Gfc0UQZP16jLQKllOpOeLcIXFG48WmLQCmlehDeQeD24DY+HSNQSqkehHcQuDy4jJ8WbREopVS3wjsIWlsEemaxUkp1K7yDwBWFy/jwBwxGB4yVUqpL4R0ETosAoL7FH+LCKKVU/xTeQeDyIBhcBKiqbwl1aZRSql8K7yBw29MkPPio1CBQSqkuhXcQuDwAROGnskGDQCmluhLeQeC2QeDBR7UGgVJKdSm8g8DV2jXkp7LeG+LCKKVU/xTeQeCOBiBa/DpYrJRS3QjzILBdQxlxLqq0a0gppboU3kHgDBanx4kGgVJKdSO8g8A5fDQ9VvTwUaWU6kZ4B4HTIkiLFap0sFgppboU3kHg7hAE2jWklFJdCu8gcA4fTXWCQCeeU0qpo4V3EDgtgtRowes31DX7QlwgpZTqf8I7CJwxguRoe2EaHSdQSqmjhXcQuFuDwD7U+YaUUupo4R0EzhhBWqyt5sHDTaEsjVJK9UshCQIRSRWRZ0Rki4hsFpHZQdmRM8VERpwAUFLdGJTdKKXUQBYVov3+AXjFGHOZiEQD8UHZi9M1lBBliI92U1KlQaCUUp31eRCISDJwJvA1AGNMCxCcznuna0gCPganxlFS3RCU3Sil1EAWiq6h4UA58IiIrBORB0UkofNGIrJQRFaLyOry8vIT25PTIsDvJS81jmJtESil1FFCEQRRwDTgfmPMVKAeuKXzRsaYRcaYGcaYGVlZWSe2J4/T4+RtYMygJIrK6mj26UXslVKqo1AEQTFQbIxZ6Tx+BhsMJ19sCiDQUMnUglRafAE27a8Jyq6UUmqg6vMgMMYcBPaJyBhn0XnApqDszOWGuDRoqGBalj2p7LbnN+DzB4KyO6WUGohCdR7Bd4AnRORjYArwq6DtKT4d1j9BzgPjueOUGjYdqGHXofqg7U4ppQaakASBMWa90/8/2RjzeWNMVdB2FpcOPnsi2bysSgC2ltYGbXdKKTXQhPeZxQDxGW13s1KTcQmU79oAO94IYaGUUqr/iIAgSG+76/HWMCIrkevWfQn+/oUQFkoppfqP8A+C6MT2+41VfOucEaEri1JK9UPhHwTDz4bM0fZ+YxUXTRrcvk4vVKOUUhEQBGMXwH9/COkjoLGK6KgOVfY1h65cSinVT4R/ELSKS4OGyiMWFZdVhKgwSinVf0ROEMSnw843oWpP26IPi4pDWCCllOofIicIhs2xt+/e3bZoe0lpiAqjlFL9R+QEwek3QkIWlKxtW6RdQ0opFUlBAJBSAAc/bntYXlGFV+cdUkpFuF4FgYjcKCLJYj0kImtFZG6wC3fSpRYc8TA60Mjq3cGb3UIppQaC3rYIvm6MqQHmAlnAdcBdQStVsKQcGQTJbi9LPjkQosIopVT/0NsgEOd2AfCIMeajDssGjkGTjnh4So6Hd4pO8OpnSikVJnp7zeI1IrIUKARuFZEkYOB1rk++Amr223MKXvweY9Ld7P64gdKaJnKSY0NdOqWUConetgiux15O8lRjTAPgwXYPDSwicMb/wKTLABiRZqv/7NpiXt9citEpJ5RSEai3QTAb2GqMqRaRa4D/BQ4Hr1hB5lzLeFB0M2eMyuTXr2zl+sdWs7HzZSwPfAzVe0NQQKWU6ju9DYL7gQYROQX4IbAH+FvQShVsLjdkjkZKN/Cziye0Ld5RXnfkdn85A+6ZhFJKhbPeBoHP2H6TS4A/GGP+ACQFr1h9oGAm7FvJ8MwEXv3emQBsL6s7xpOUUir89DYIakXkVuBa4CURcWPHCQauvBnQWAXVexgzKInCzISjWwRKKRUBehsEVwDN2PMJDgJ5wG+CVqq+0HqNgsqdAIzOSWRT5zECpZSKAL0KAufL/wkgRUQ+CzQZYwbuGAFA+nB7u28VFK9h6pA0dlc0UFGn1yhQSkWW3k4xcTmwCvgScDmwUkQuC2bBgi5pECCw/E548FymD7KnVLy5VU8wU0pFlt6eUHYb9hyCMgARyQJeA54JVsGCTsSeWNZoL1YztXk1EwZnc/viDVwwPoeUmMiaj08pFbl6+23nag0BR8VxPLf/+vIz8NUXITqRqOKV3PnFSTS0+PnPR/v1MpZKqYjR2xbBKyLyKvCk8/gKYElwitSH8qfb28QcqC9nUl4Ko3MSefGj/Vx7Skpoy6aUUn2kt4PFNwOLgMnAKcAiY8yPglmwPpWQBfXliAjzJgziw92VVNXqoaRKqcjQ6+4dY8yzxpj/McZ83xjzfDAL1ecSMqH+EABzxw8iYGDFNp2eWikVGXoMAhGpFZGaLv5qRSR8Drp3WgQAE/OSyU2J5b2t+0NcKKWU6hs9jhEYYwb2NBK9lZBpjx4K+BGXm9NHZrJj655Ql0oppfpEbweLw1tCFpgALLkJxMXI7G+xeW0jxDjrjbGHmyqlVBga+IeAngwZI+zt6ofhwwcZkxIgGm/7er+36+cppVQY0CAAGHEeTG+/zs7omEqi8bWv97eEoFBKKdU3NAjAdvuMuqDtYS7lDE3r0GumQaCUCmM6RtCq4LS2u671/+B7qV5ocBboWcZKqTCmLYJWCZlwRzVExcHWlxi8f2n7Om0RKKXCWMiCQETcIrJORF4MVRmOIgJZY45aXF1XH4LCKKVU3whli+BGYHMI99+11CFHLbpv2cYQFEQppfpGSIJARPKBi4AHQ7H/Hg2afNSitTtL8QfMib3eCzfCy7d8ykIppVTwhKpFcA/wQyDQ3QYislBEVovI6vLyPrxYzJzvwaUPHbnM30JxVUPX2x/Lmkdh5f2fulhKKRUsfR4EzqUuy4wxa3razhizyBgzwxgzIysrq49KB7g9MOkySB3atuhy91tkPjoHmp0ZScs22z+llAoDoWgRnA5cLCK7gX8C54rI4yEoR8++9hJc8P8wriiuiFpOQu1OOLDervvzLPvXXAfv/7n7M4+ba9vve5uCX2allDoBfR4ExphbjTH5xphhwJXAG8aYa/q6HMeUWgCnfxcpmEUAO89Q0751R26z4o/w6q2w/omuX6O2tMN9ndZaKdU/6XkExzL35zyY/WNKTSp1b9zNru0duoQ2PGtvd7/b/uu/rgzWPGbvd/zyr9FprZVS/VNIg8AYs9wY89lQluGY8qYTN+0K7vNdQqapZOWjHY4Aqiiyt588DXfmQ2M1/Osr8MJ34c074YkvtW97uLhvy62UUr2kLYJeuHrmEKZeejPlJoXL3G8D8Kp/xtEbbnsF9jvdR2/dBb7G9nXFq3ScQCnVL2kQ9ILbJXx+aj6BwdOJkgC1Jo5/DP1/XJH0GMz9RfuGz98Avi6+7BNz4MMH4emv9l2hlVKqlzQIeklEyDn/uwAETv0Gc0YPYmW5h/s2dJq3Lzb16Cc3Vtvbba8EuZRKKXX8NAiOx4hz4Ja9pCz4KQsm5wLw4i57xvHh1PHUfvsTzA93Qe4p7c9JGQJRMe2Pi1fD8v+DrS/bx8ZA0WsQ6PbcOqWUCiox5gSnTuhDM2bMMKtXrw51MY7y8Lu72LK/kskf/5JH/Beyw+RxSn4KD141jix3PexZYae3bqzkkzeeInv7U2S76hDjXPTmp4dh3ROw+FtwyX0wtf8dRauUGrhEZI0xposBzSPp9Qg+ha/PKaSqPo+pa69vW/ZR8WFO/c0HXHlqAV+aMY8JScnEphdyjz9AcUshr8Z0mndo/1p729p9pJRSfUy7hj6ltIRotv9yPlfNLODnl0zguW99hpQ4D//8cB+X3r+CX7y0CYAWf4CtptPMpg2V9rwDgOh4eO2nsOJPfVsBpVTE0xbBSRDldnHnF9tnLX3xO3NYtqmUn7+4icc/2IvXZ3in6BAAiwNzuMT1rt3w0DaodybUq9gB7zshMHQ2PPN1+MabEJ/el1VRSkUgbREEQUF6PF+fU8gbPziLgvQ4nlq9D4D8tDhualnI0+PutRse/ARKbYuBj/7Z/gJPXgVVu2Hv+31bcKVURNIWQRANz0rk7ZvPYcvBWraV1nLJlDwuf+B9frmukS/FAktuwrii7ExGDYfan1jnzFHkbeziVZVS6uTSFkGQiQjjcpO5ZEoeAN86ZwQFg/Pa1n+ceh41MYPsg4RO023XlPRVMZVSEUyDoI+dPSabF757RtvjPxycREmjB4CDhV/AzLuzfeNlt9tDUFtO8KI4SinVCxoEIZY44jMMyc0B4Nn9GZQMnnfkBo/Mh1/lto8lKKXUSaZBECr5pwJw7/XnkZBsjwx67kA6c+7v5spnS2/rq5IppSKMBkGofO0luNUZA4hNwXji+dw5Z+ASYVjTPxjW9I+2TZvy58CON+APU2D9k3aaCqWUOkk0CEIlKgZiEu39Wf+FXPxHvjd3HJt+fiH//vbpjMlJolqSAdgy6Sa7XdUu+Pc34W+XHDlusP7J9ovkKKXUcdK5hvqxsu1r+dMjjzLm4h/w5bFuu/CTZ+C1O+z9eXfa+YnuKrCPf3o4NAVVSvVLvZ1rSFsE/Vjm8Kk8JfMpKq2DlHz7d/qNMOd/oGCWvV5yawgAbFocusIqpQYsDYJ+zOUSzhiVxeL1JVQ3tNiFInD+HfDVF2D4ORCdBNkT7Lp/fQXe+rVOaa2UOi4aBP3cd84dSX2zn/96fO2RK6Ki4Zrn4Adb4PN/bl/+5i/huW9AXXnfFlQpNWBpEPRzpxSk8t3zRvL+zgque2QVPn+HX/sulx1wzj0FrvwH3FZqxww2PAOLzobF/21bCSsX2QvgtDLGthq8TUcuV0pFJB0sHgB2HarnnN8uB+C+q6dxkXN1tG7tWWG7iErWQHONXZY3A8QFTdX22gf1zvTXiTmQNswexRSdZEMlbShkjbW3UbH2iKTEHPt8XxMMPxuiE47cZyBgg0kp1W/0drBYg2CA2Hqwlq88vJLSmmbOGp3FH6+eSnKsp+cntY4VfHAffPwUxKWBJ8F+iUfFQlwq7F9np8J2RdmpsP3Nxy5Mcp69bOfeleCJheZae12FhCy7j2nX2pbGpMvsY7/PBk9Srh3jUEr1CQ2CMPTrV7bw5+U72h7f/tnxXDt7KB73SfolHvDDvpXga4bGSqjcab/gB0+zoRGTBM11sPIB2P0OpBTYZXFp9nbLi0e+Xkyy/fM12dlVR82FyVfA0M+AJx5iUzQYlAoiDYIwVFbTxB3/2Uisx83z6+xZyQsmDeKs0Vms2VPFd84dRUF6fN8Uxu8Dd6dZzH0t4Gu08yKZAKx73LY43FEwZDasfhgCviOfkzHS/tWUwClX2S6oQ9tsd9WhIti+zHZTGQNjL7LbphfaINnzHgyeCtGJNlAaKm3wdC7XoSJwuSEhG9we2w2mVATQIAhj/oBhX2UDz64t5o9vbG9bPjwrgceum9l3YXC8GiqhcpdtOdSV2S/kgx/b5d5GqN1/Yq87/Bzb3dWxRZI0GFILwBMHez+wrRJx2yBIzoO8aTDhC+COgWFzbBdXR94mcEcfPe7ha4G9K2DYGTZclOrHNAgixP7qRs749Zv4A+3/jlfNLGBiXgqL1+/n15dOZlhmAqt3V+L1G2aPyACgtKaJ1HgPMVH95MuspQGKXrVfzNV7bYti3OfsL39fkx3D2POebS1sXQINVXB475GvERVnWyStknKh6bANgMIzID7DthwOl8D+tdBYZbdLHw7jLoaWOvj4X+0D7FnjYN4v7OvsXG7HWQ58ZNclZNnWTWMVZI6BjBEw4lwbSpkjbbA9da0dcE8aBPGZkDwYCs8Cb4P9a2mArNG2tdN6EaIlN9vXKtsE835l91O9F5693o7rnHYDFJ4Jax6DQZOg9iDkjIfM0bDjTbu/nAl2ubghsdM1LjqrK7Nh2VgNtQegYGb7utqD0FJvy+P32hA9GZrr2qdXORZvk73tHNQnqnyrHRMbNufkvF4/p0EQQQ43ejnvd8s5VNdy1Lr0hGiuOLWA+52xhb99fSaFmQmc8es3iXIJ8yflsmZ3JZ89ZTC3zh/LYyt2k5cWT3qCh1+/spW7r5hCXmocAD5/gEavn6RjDVL3Fb/PjmXsW2XDYtQFsPZv9qim9BH2ixNjj3bqPBZRe9B2XaUUwNu/gYrtdtuO0ofbcZJWiYNg5Pmw9SUbAIk59mpyyfn2S7Jql/3ynXSZ7Y7a3+ncj65M+KLtPqvaffT+wQZIx6vXicuGZE9mfQvWP2FbL3O+D4e22ueNmgepQ2D327a1YwLw2k+PfO7s/4aiZTB6Lnz8NNQdtCG5+T92TGf0fNuNd/Bj+x4XnGavsV2+DfKn2zB84UbY/R4s+A0MPd0Ga8V2+x5tf90e0Xb9Utsq64qvxe6vYCY8eL7d76UPQWK2DaSAz77eyPPt5Vw/fNAeKTfyPLjo7qO7/gIBO2lj3jT4daFddsPbNjw9ce3beRvh1dvA3wJn/I/99wd45277vk26zD6u3GX3X1cGxg/TvgLv3mM/U5/5Drz8Q5j4RSg82wbYmsfsD5FBk7qu754V9vM74tye/11PgAZBhCkqrWXd3mqio1zcvngDNU0+bpk/lrte3gLYbqNDtc3UNPm6fY1rZg3h8Q/sr+zMxBgO1TUzPCuB/zprBG9sKWPt3iqavAFumT+WoRnxTC1I458f7iUxJor7l+/gD1dOZVtpLfMmDqKkqpFR2Yl4AwHue2M7bpeLuRNyGJdrJ9JrbPHT0OIjKdZDdFQ/OOzU12K/AKIT7H/omhI7pceL34fcKfZLZtCk9i+Zih2QOtT+B45JtL/q962C1Q/ZLztPHJz/Uzue0eAMvCflwiu3QP4M2xrYvsx+QedOsaHlrbdjIxO+AG/8EiqKYMo1sN4JrOuXwnMLYfe7MPly2zIo3WhbSgWzbCtj3d9t+dIKbcug8wD+8UjIgoxRtiust2JTbCus42s0Vh09NpQyxLaGStbYFtWp/5/9si7dYA9xru7U2jseo+dDwal2MsaKop63nXkDJOfC1pftgRJtxB7ckDYUVi2yi87/mW2Rrn/iyNc482b7YwJsOFbuoEvJ+RCfZluaoy6AuHR46ftH1nX6dfb9G3sRbH/NtlymXnNc1e9UTySTAAAS70lEQVRIgyCCef0BqhpayE6KZcknB7j39SL+cu10Fr29kydW2g/diKwEbjhrBMMyEvhwdyV7Kxp4avW+Xu8j2u3ikimDeXpNcdsyEft9mJcaR0l1I9lJMRRmJrByV2XbNueMySIvLa4tcADe+eE5JMVGsWZPFWePycbtEopKa9lT0cBf3t7BhMEp/OSz43G72n/VV9a38M2/r+Hnn5/A2EE2XDaUHCYzMYak2Cg2lBzGAO9tP8T1cwpJjY8+0bfz5DKmvXXibbR/8elHb9d02HbLJA92uo4a2s/daKi0R2l11VXTVGPHRPKm29dd86jtEqvZb4/0ikuFiZfaEGussl1uW160LYHYFBtsUbH21258ul2/6d92QD5pECC29eJttEHUUgeH99kB/Q8fghV/tL+8z/8ZvHePXZdSYPcZ8Nuuu9Shtgvs8D5br1axqc7BAlvt4yGfgXm/tPt/7w92WdY4GDwFPnqy/XlphfYQ5oZDtuzJg9tbcu5oexBC0TJbt3NvswcY/P2LNnBaJefZ927atTDly3Zix7ItUNP++T5umaNtcLTyxNt/y47dlzHJMP1rsOLe7l/n+tdssJ0ADQJ1lD0V9fz1nZ18//zRZCQe2XxuaPFx09MfMSIrkWtnD+XJlfs4f3w2/15XwqrdVWQlxjB3fA7eQIDdh+r56zu7jnr9IenxXD4jn98u3XbE8mi3i9R4D2W1xz5HYUxOEmNzk1i8/siB45HZieSmxJKVFENji5+XNxwEYFxuMjOHpZGZGMPvlm0jLd7D1CFpvLGljOykGMpqm7loUi6/u/wUDtU1MzglDpcTKP6AwRcIEBPlJhAwbDlYy4jshF6Nm3y4u5LROUmkxPWTbrL+oqeQ6kogYLv3SjfCkFm2hVS5CzJHHdmd522yXUyJ2fZx0TLbLTf5yvajxFrHg2IS4XCx/ZIN+NqDtuORbgE/FC21rabybTD8LKcLsVM3YsUOG1D+Ftj1ln1OSoENweTBzhjOHti21P6Kr95jQ7JyJ5x9C9QcsK1LdzRkj7dhtPVlSMi0dR5/se12Wna77XoEG4RJg21AlKy2r3uCNAhUUC16ewfLt5azYkcFACt/fB7JsR7iot0s+eQAz60tobK+mQeunU56fDRul7CjvJ6lmw4ya3gGPr/hmodWMjwzgaykGCbmpdDsDbBuXxWb9tfQ7LP94DnJMdzxuQnc/PRH1Lf4gfaWRyuPW/D6u/4ct27rdknbgPolUwYzf+IgfvPqVg43+hiSHsfavfbX4XfOHckP5o4B4FBdM795ZSu1zV6SYjz4jeHSafks31bGX97ayeCUWJ5cOIs/vbGdK2cOYV9lA7sO1fP9C0ZjjMEfMEQ553h4/QEO1TUzKDkWcb5oXt14kMn5KeSmxHVRcqU+PQ0C1Sfe236IA4ebuGx6/nE/t/WzJ50GcuuafZTXNnPOb5dz5xcncdXMIRw83MTqPZUMTU+gID2Ow41efrJ4I29vK2f5TWdz4z/X8VGx7ZvuGAxLvnsGl96/gkavDZGZw9JZt6+q2+DITophSkEqSzeVHlddhqTHs7fSdnP8+9uns3h9CY+8t5uLJucSE+Xi9c1lHG70ApCZGE1ynIed5fVEu11MHZLK1acN4aJJduqQ8rrmtnD4uLiaVzcepLrBy+2fG49bhOpGLz6/4ZEVu/jaZ4aRFOuh2etva+Ut31qGP2A4d2x223t7uMFLSry2XiKNBoEa8Lz+QI9nTTe2+KmobyY/LZ6GFh+7DtWTn2rPoXi7qJy0+GjmjMqkqLQWERiZnQRASXUjRaW1nDosnbe2lZOfFseqXZXkJMdy63OfUNdsBzYnDE7m/y6dzPefWs+onESWfHKwbd+PXncqf31nJ+9tr+hVXZJio6jtNFA/NCOe4qrGtpbK6JxE0hOi+WBnJTOHpTMuN4nH3t/Ttv25Y7MJGMPyreVtLZzpQ9M4UN1Iiz/AyzeeSWlNE5/947sATkhEtZ1rcv64HFbtquCUglR+9YVJNHn9lNc2c8/rRXz7nJHsKKujtslHZX0zcycMYvrQNH7z6lYumTKYiYNTEIFDdS1kJbV3K7668SAf7Kzg3LHZ+AOGs8dkt60rKq3lqr9+wKPXzWRiXkqv3qdWK7YfYvPBWq6fU4jXH6C+2dd/xnkGkH4bBCJSAPwNGAQEgEXGmD/09BwNAtVXjDH4AoaSqkYGpcQS63HjDxjcLqHFF2DzgRoyk2LIS40jEDAcqm9m2aZSbnt+AzeeN4qR2Yls3F9Ds8/PI+/tBmDXnQswBvZWNnDB79/C6zc8tXAWpw3PoMnrxxcwvPjRfh5dsZstB2vJT4ujuMoOKM6bkMOPF4zjrW3l3L54Y1s5O3Z1tRqaEU+T18/hRi9njsrqtlWTGBPVFnY9mTE0jdV77LkWYwclERPl4qPiw1w0KZcd5XWcPSabB9468giZqUNSuXrmEP61eh8f7q5qW/7980cDsHxbGWeOyiIxJoomr5+vzynkKw+vYvOBGgozE7j6tCG89PGBti7HOSMziY5y8W7RIV7/wVnERbvJSLCBsL2sjlc2HGTGsHROK7QtvfQEe4DCKxsO4nYJ54zJYvOBWibmJR/V8uzs8Q/2cEp+KpPyjy+0ulJR10xafHTbeFSo9OcgyAVyjTFrRSQJWAN83hizqbvnaBCo/iwQMBSV1TFmUNIRy3+1ZDNTClJZMKl9ttj91Y08v66E/zprRJdfEs0+PzFRbt7bfogtB2u5ZtYQYqLcGGP48fMbEIFb5o+lyetnZ3k9/1y1l59dMpHXN5fyi5c2MzwzgdsuGsfUIWlsOVhDeW0zs4dn4DeGe14rYvbwDNwu4csPrjxq39fMGsIHOyuJ9bjYUGJPqhuSHk92UkxbIPRWT+M2HXUMm96I9bho8gYoSI9jX2X70TdZSTGUd3EwwuzhGby/s4Lk2CjuuXIK722vYEPJYRaeOZzzxuWw61A9f3y9iISYKP7+gW19fXTHXH7+wiaqG1qYmJfCkPR4BqXE8suXNhPlFv501TRS4jy8+Ml+rphRwHWPfsjMYems3FXJd84dSVltM995ch0XnzIYXyDAjxeMY1ByLKt225Ze67iRMQYRwRhDiz9AtNuFL2DaWsHby2r5w+vb+eG8MSc8W0C/DYKjCiCyGPiTMWZZd9toECh18hhjuOD3b3NaYToet4v3d1Rw1pgsfrxgXNs2gYChqqGF9IRoRIRdh+pJT4imscWPxy2U1jRTXNVAVlIML3x0gNE5iUwbmkZslJtn1uzj2tnD2FtZT3ZSLMmxHt7feYjZIzKJ9bh4e9shkmOj+L9XtrB2bzXZSTGcPSaLd4sOEeNxU9PopaK+/eTIwSmxXDgxl/K6Zl746MijyYZlxLO7wo7NJMXaVkZvAsjjFmYMTef9nb3r2juZLj5lME1eP0s3lZIYE4XHLdQ2+YiLdlPb5OPrpxdy07zR3PT0RyzdWMoHPz6PzMQTmx9rQASBiAwD3gYmGmNqOq1bCCwEGDJkyPQ9e/Yc9Xyl1Ilp/TUaSg++s5NfvLSZ2xaM4xtnDm9bXlzVwC3PfsJdl9pxDLfLRWGmPYfik+LDvF1UzvjcZMprm7n81AIaW/ws+eQAZ4zKJDs5lrKaJuJjonj43V1cNDmXwowE9lU1cPeybZw+IpOZhelcuegDGlp81DT5+Nwpg/nhvDGkJ0TzzcfXsGl/Df/72XFkJcZyamEa975exCPv7ebmeWOYmJfC+zsquHvZtu6qBdjxmefXlbQdINBZdJSLzIRoRg9KIjclFo/bxTNrimlwjoxrNX/iIO6/ZvoJv8f9PghEJBF4C/ilMea5nrbVFoFS4afFF+DtbeWcOza7z/vSG1p8uETYXlbHiKxE4qLtuSNNXj8iHHUuSefg3FFeR05yLFEuobK+pe3s+GWbSpk7PoeMxBiKSmvxBQwNLT72VDQQH+1m6cZSvnXOCFLioo8YdG/1TlE5i9fvZ0PJYS4Yn8OXTxvKoJQTn2epXweBiHiAF4FXjTF3H2t7DQKllDp+vQ2CPp/kRWysPgRs7k0IKKWUCq5QzPZ1OnAtcK6IrHf+FoSgHEoppYCoY29ychlj3gX0+oRKKdVP9IP5f5VSSoWSBoFSSkU4DQKllIpwGgRKKRXhNAiUUirCaRAopVSE0yBQSqkIp0GglFIRToNAKaUinAaBUkpFOA0CpZSKcBoESikV4TQIlFIqwmkQKKVUhNMgUEqpCKdBoJRSEU6DQCmlIpwGgVJKRTgNAqWUinAaBEopFeE0CJRSKsJpECilVITTIFBKqQinQaCUUhFOg0AppSKcBoFSSkU4DQKllIpwGgRKKRXhNAiUUirCaRAopVSE0yBQSqkIp0GglFIRToNAKaUinAaBUkpFOA0CpZSKcCEJAhG5UES2ish2EbklFGVQSill9XkQiIgbuA+YD4wHrhKR8X1dDqWUUlYoWgQzge3GmJ3GmBbgn8AlISiHUkopICoE+8wD9nV4XAyc1nkjEVkILHQe1onI1hPcXyZw6ASfO1BpnSOD1jkyfJo6D+3NRqEIAulimTlqgTGLgEWfemciq40xMz7t6wwkWufIoHWODH1R51B0DRUDBR0e5wP7Q1AOpZRShCYIPgRGiUihiEQDVwL/CUE5lFJKEYKuIWOMT0T+G3gVcAMPG2M2BnGXn7p7aQDSOkcGrXNkCHqdxZijuueVUkpFED2zWCmlIpwGgVJKRbiwDoJwncpCRB4WkTIR2dBhWbqILBORIuc2zVkuInKv8x58LCLTQlfyEyMiBSLypohsFpGNInKjszyc6xwrIqtE5COnzj9zlheKyEqnzk85B1wgIjHO4+3O+mGhLP+nISJuEVknIi86j8O6ziKyW0Q+EZH1IrLaWdann+2wDYIwn8riUeDCTstuAV43xowCXnceg63/KOdvIXB/H5XxZPIBPzDGjANmAd92/i3Duc7NwLnGmFOAKcCFIjIL+D/g906dq4Drne2vB6qMMSOB3zvbDVQ3Aps7PI6EOp9jjJnS4XyBvv1sG2PC8g+YDbza4fGtwK2hLtdJrN8wYEOHx1uBXOd+LrDVuf8X4Kquthuof8Bi4IJIqTMQD6zFnoF/CIhylrd9xrFH4c127kc520moy34Cdc3HfvGdC7yIPQE13Ou8G8jstKxPP9th2yKg66ks8kJUlr6QY4w5AODcZjvLw+p9cJr/U4GVhHmdnS6S9UAZsAzYAVQbY3zOJh3r1VZnZ/1hIKNvS3xS3AP8EAg4jzMI/zobYKmIrHGm1oE+/myHYoqJvtKrqSwiQNi8DyKSCDwLfM8YUyPSVdXspl0sG3B1Nsb4gSkikgo8D4zrajPndsDXWUQ+C5QZY9aIyNmti7vYNGzq7DjdGLNfRLKBZSKypYdtg1LncG4RRNpUFqUikgvg3JY5y8PifRARDzYEnjDGPOcsDus6tzLGVAPLseMjqSLS+gOuY73a6uysTwEq+7akn9rpwMUishs7K/G52BZCONcZY8x+57YMG/gz6ePPdjgHQaRNZfEf4KvO/a9i+9Fbl3/FOdpgFnC4tck5UIj96f8QsNkYc3eHVeFc5yynJYCIxAHnYwdQ3wQuczbrXOfW9+Iy4A3jdCIPFMaYW40x+caYYdj/r28YY75MGNdZRBJEJKn1PjAX2EBff7ZDPVAS5EGYBcA2bN/qbaEuz0ms15PAAcCL/YVwPbZv9HWgyLlNd7YV7NFTO4BPgBmhLv8J1HcOtvn7MbDe+VsQ5nWeDKxz6rwBuN1ZPhxYBWwHngZinOWxzuPtzvrhoa7Dp6z/2cCL4V5np24fOX8bW7+n+vqzrVNMKKVUhAvnriGllFK9oEGglFIRToNAKaUinAaBUkpFOA0CpZSKcBoESgWZiJzdOpOmUv2RBoFSSkU4DQKlHCJyjXMNgPUi8hdn0rc6EfmdiKwVkddFJMvZdoqIfODMCf98h/niR4rIa851BNaKyAjn5RNF5BkR2SIiT0gPEyUp1dc0CJQCRGQccAV2ArApgB/4MpAArDXGTAPeAu5wnvI34EfGmMnYMzxblz8B3GfsdQQ+gz0DHOyMqd/DXhtjOHZeHaX6hXCefVSp43EeMB340PmxHoed6CsAPOVs8zjwnIikAKnGmLec5Y8BTztzxuQZY54HMMY0ATivt8oYU+w8Xo+9nsS7wa+WUsemQaCUJcBjxphbj1go8pNO2/U0J0tP3T3NHe770f97qh/RriGlrNeBy5w54VuvGTsU+3+kdebLq4F3jTGHgSoROcNZfi3wljGmBigWkc87rxEjIvF9WgulToD+KlEKMMZsEpH/xV4pyoWd2fXbQD0wQUTWYK+AdYXzlK8CDzhf9DuB65zl1wJ/EZGfO6/xpT6shlInRGcfVaoHIlJnjEkMdTmUCibtGlJKqQinLQKllIpw2iJQSqkIp0GglFIRToNAKaUinAaBUkpFOA0CpZSKcP8/sRt5z2OQtygAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "experimento_ssd300_fault_1.h5\n" + ] + } + ], + "source": [ + "#Graficar aprendizaje\n", + "\n", + "history_path =config['train']['saved_weights_name'].split('.')[0] + '_history'\n", + "\n", + "hist_load = np.load(history_path + '.npy',allow_pickle=True).item()\n", + "\n", + "print(hist_load.keys())\n", + "\n", + "# summarize history for loss\n", + "plt.plot(hist_load['loss'])\n", + "plt.plot(hist_load['val_loss'])\n", + "plt.title('model loss')\n", + "plt.ylabel('loss')\n", + "plt.xlabel('epoch')\n", + "plt.legend(['train', 'test'], loc='upper left')\n", + "plt.ylim((0, 10)) \n", + "plt.show()\n", + "\n", + "print(config['train']['saved_weights_name'])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Evaluación del Modelo" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Processing image set 'train.txt': 100%|██████████| 33/33 [00:00<00:00, 112.45it/s]\n", + "Processing image set 'test.txt': 100%|██████████| 2/2 [00:00<00:00, 57.78it/s]\n", + "Number of images in the evaluation dataset: 2\n", + "\n", + "Producing predictions batch-wise: 100%|██████████| 1/1 [00:00<00:00, 1.32it/s]\n", + "Matching predictions to ground truth, class 1/1.: 100%|██████████| 400/400 [00:00<00:00, 9288.89it/s]\n", + "Computing precisions and recalls, class 1/1\n", + "Computing average precision, class 1/1\n", + "400 instances of class 1 with average precision: 0.7948\n", + "mAP using the weighted average of precisions among classes: 0.7948\n", + "mAP: 0.7948\n", + "1 AP 0.795\n", + "\n", + " mAP 0.795\n" + ] + } + ], + "source": [ + "\n", + "config_path = 'config_300_fault_1.json'\n", + "\n", + "with open(config_path) as config_buffer:\n", + " config = json.loads(config_buffer.read())\n", + "\n", + " \n", + "model_mode = 'training'\n", + "# TODO: Set the path to the `.h5` file of the model to be loaded.\n", + "model_path = config['train']['saved_weights_name']\n", + "\n", + "# We need to create an SSDLoss object in order to pass that to the model loader.\n", + "ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n", + "\n", + "K.clear_session() # Clear previous models from memory.\n", + "\n", + "model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n", + " 'L2Normalization': L2Normalization,\n", + " 'DecodeDetections': DecodeDetections,\n", + " 'compute_loss': ssd_loss.compute_loss})\n", + "\n", + "\n", + " \n", + "train_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n", + "val_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n", + "\n", + "# 2: Parse the image and label lists for the training and validation datasets. This can take a while.\n", + "\n", + "\n", + "\n", + "# The XML parser needs to now what object class names to look for and in which order to map them to integers.\n", + "classes = ['background' ] + labels\n", + "\n", + "train_dataset.parse_xml(images_dirs= [config['train']['train_image_folder']],\n", + " image_set_filenames=[config['train']['train_image_set_filename']],\n", + " annotations_dirs=[config['train']['train_annot_folder']],\n", + " classes=classes,\n", + " include_classes='all',\n", + " #classes = ['background', 'panel', 'cell'], \n", + " #include_classes=classes,\n", + " exclude_truncated=False,\n", + " exclude_difficult=False,\n", + " ret=False)\n", + "\n", + "val_dataset.parse_xml(images_dirs= [config['test']['test_image_folder']],\n", + " image_set_filenames=[config['test']['test_image_set_filename']],\n", + " annotations_dirs=[config['test']['test_annot_folder']],\n", + " classes=classes,\n", + " include_classes='all',\n", + " #classes = ['background', 'panel', 'cell'], \n", + " #include_classes=classes,\n", + " exclude_truncated=False,\n", + " exclude_difficult=False,\n", + " ret=False)\n", + "\n", + "#########################\n", + "# 3: Set the batch size.\n", + "#########################\n", + "batch_size = config['train']['batch_size'] # Change the batch size if you like, or if you run into GPU memory issues.\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "evaluator = Evaluator(model=model,\n", + " n_classes=n_classes,\n", + " data_generator=val_dataset,\n", + " model_mode='training')\n", + "\n", + "results = evaluator(img_height=img_height,\n", + " img_width=img_width,\n", + " batch_size=4,\n", + " data_generator_mode='resize',\n", + " round_confidences=False,\n", + " matching_iou_threshold=0.5,\n", + " border_pixels='include',\n", + " sorting_algorithm='quicksort',\n", + " average_precision_mode='sample',\n", + " num_recall_points=11,\n", + " ignore_neutral_boxes=True,\n", + " return_precisions=True,\n", + " return_recalls=True,\n", + " return_average_precisions=True,\n", + " verbose=True)\n", + "\n", + "mean_average_precision, average_precisions, precisions, recalls = results\n", + "total_instances = []\n", + "precisions = []\n", + "\n", + "for i in range(1, len(average_precisions)):\n", + " \n", + " print('{:.0f} instances of class'.format(len(recalls[i])),\n", + " classes[i], 'with average precision: {:.4f}'.format(average_precisions[i]))\n", + " total_instances.append(len(recalls[i]))\n", + " precisions.append(average_precisions[i])\n", + "\n", + "if sum(total_instances) == 0:\n", + " \n", + " print('No test instances found.')\n", + "\n", + "else:\n", + "\n", + " print('mAP using the weighted average of precisions among classes: {:.4f}'.format(sum([a * b for a, b in zip(total_instances, precisions)]) / sum(total_instances)))\n", + " print('mAP: {:.4f}'.format(sum(precisions) / sum(x > 0 for x in total_instances)))\n", + "\n", + " for i in range(1, len(average_precisions)):\n", + " print(\"{:<14}{:<6}{}\".format(classes[i], 'AP', round(average_precisions[i], 3)))\n", + " print()\n", + " print(\"{:<14}{:<6}{}\".format('','mAP', round(mean_average_precision, 3)))" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "1" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ceil(val_dataset_size/batch_size)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Cargar nuevamente el modelo desde los pesos.\n", + "Predicción" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Training on: \t{'1': 1}\n", + "\n" + ] + } + ], + "source": [ + "from imageio import imread\n", + "from keras.preprocessing import image\n", + "import time\n", + "\n", + "config_path = 'config_300_fault_1.json'\n", + "input_path = ['fault_jpg_1/']\n", + "output_path = 'result_ssd300_fault_1/'\n", + "\n", + "with open(config_path) as config_buffer:\n", + " config = json.loads(config_buffer.read())\n", + "\n", + "makedirs(output_path)\n", + "###############################\n", + "# Parse the annotations\n", + "###############################\n", + "score_threshold = 0.25\n", + "score_threshold_iou = 0.5\n", + "labels = config['model']['labels']\n", + "categories = {}\n", + "#categories = {\"Razor\": 1, \"Gun\": 2, \"Knife\": 3, \"Shuriken\": 4} #la categoría 0 es la background\n", + "for i in range(len(labels)): categories[labels[i]] = i+1\n", + "print('\\nTraining on: \\t' + str(categories) + '\\n')\n", + "\n", + "img_height = config['model']['input'] # Height of the model input images\n", + "img_width = config['model']['input'] # Width of the model input images\n", + "img_channels = 3 # Number of color channels of the model input images\n", + "n_classes = len(labels) # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO\n", + "classes = ['background'] + labels\n", + "\n", + "model_mode = 'training'\n", + "# TODO: Set the path to the `.h5` file of the model to be loaded.\n", + "model_path = config['train']['saved_weights_name']\n", + "\n", + "# We need to create an SSDLoss object in order to pass that to the model loader.\n", + "ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n", + "\n", + "K.clear_session() # Clear previous models from memory.\n", + "\n", + "model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n", + " 'L2Normalization': L2Normalization,\n", + " 'DecodeDetections': DecodeDetections,\n", + " 'compute_loss': ssd_loss.compute_loss})\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Tiempo Total: 1.982\n", + "Tiempo promedio por imagen: 0.079\n", + "OK\n" + ] + } + ], + "source": [ + "image_paths = []\n", + "for inp in input_path:\n", + " if os.path.isdir(inp):\n", + " for inp_file in os.listdir(inp):\n", + " image_paths += [inp + inp_file]\n", + " else:\n", + " image_paths += [inp]\n", + "\n", + "image_paths = [inp_file for inp_file in image_paths if (inp_file[-4:] in ['.jpg', '.png', 'JPEG'])]\n", + "times = []\n", + "\n", + "\n", + "for img_path in image_paths:\n", + " orig_images = [] # Store the images here.\n", + " input_images = [] # Store resized versions of the images here.\n", + " #print(img_path)\n", + "\n", + " # preprocess image for network\n", + " orig_images.append(imread(img_path))\n", + " img = image.load_img(img_path, target_size=(img_height, img_width))\n", + " img = image.img_to_array(img)\n", + " input_images.append(img)\n", + " input_images = np.array(input_images)\n", + " # process image\n", + " start = time.time()\n", + " y_pred = model.predict(input_images)\n", + " y_pred_decoded = decode_detections(y_pred,\n", + " confidence_thresh=score_threshold,\n", + " iou_threshold=score_threshold_iou,\n", + " top_k=200,\n", + " normalize_coords=True,\n", + " img_height=img_height,\n", + " img_width=img_width)\n", + "\n", + "\n", + " #print(\"processing time: \", time.time() - start)\n", + " times.append(time.time() - start)\n", + " # correct for image scale\n", + "\n", + " # visualize detections\n", + " # Set the colors for the bounding boxes\n", + " colors = plt.cm.brg(np.linspace(0, 1, 21)).tolist()\n", + "\n", + " plt.figure(figsize=(20,12))\n", + " plt.imshow(orig_images[0],cmap = 'gray')\n", + "\n", + " current_axis = plt.gca()\n", + " #print(y_pred)\n", + " for box in y_pred_decoded[0]:\n", + " # Transform the predicted bounding boxes for the 300x300 image to the original image dimensions.\n", + "\n", + " xmin = box[2] * orig_images[0].shape[1] / img_width\n", + " ymin = box[3] * orig_images[0].shape[0] / img_height\n", + " xmax = box[4] * orig_images[0].shape[1] / img_width\n", + " ymax = box[5] * orig_images[0].shape[0] / img_height\n", + "\n", + " color = colors[int(box[0])]\n", + " label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])\n", + " current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color=color, fill=False, linewidth=2))\n", + " current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':color, 'alpha':1.0})\n", + "\n", + " #plt.figure(figsize=(15, 15))\n", + " #plt.axis('off')\n", + " save_path = output_path + img_path.split('/')[-1]\n", + " plt.savefig(save_path)\n", + " plt.close()\n", + " \n", + "file = open(output_path + 'time.txt','w')\n", + "\n", + "file.write('Tiempo promedio:' + str(np.mean(times)))\n", + "\n", + "file.close()\n", + "print('Tiempo Total: {:.3f}'.format(np.sum(times)))\n", + "print('Tiempo promedio por imagen: {:.3f}'.format(np.mean(times)))\n", + "print('OK')" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1 : 99\n" + ] + } + ], + "source": [ + "\n", + "# Summary instance training\n", + "category_train_list = []\n", + "for image_label in train_dataset.labels:\n", + " category_train_list += [i[0] for i in train_dataset.labels[0]]\n", + "summary_category_training = {train_dataset.classes[i]: category_train_list.count(i) for i in list(set(category_train_list))}\n", + "for i in summary_category_training.keys():\n", + " print(i, ': {:.0f}'.format(summary_category_training[i]))\n" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1 : 99\n" + ] + } + ], + "source": [ + "for i in summary_category_training.keys():\n", + " print(i, ': {:.0f}'.format(summary_category_training[i]))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/Primer_resultado_fault_1/Panel_Detector_Fault_1.ipynb b/Primer_resultado_fault_1/Panel_Detector_Fault_1.ipynb new file mode 100644 index 0000000..9abf0e9 --- /dev/null +++ b/Primer_resultado_fault_1/Panel_Detector_Fault_1.ipynb @@ -0,0 +1,4263 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Cargar el modelo ssd7 \n", + "(https://github.com/pierluigiferrari/ssd_keras#how-to-fine-tune-one-of-the-trained-models-on-your-own-dataset)\n", + "\n", + "Training del SSD7 (modelo reducido de SSD). Parámetros en config_7.json y descargar VGG_ILSVRC_16_layers_fc_reduced.h5\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Using TensorFlow backend.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Training on: \t{'1': 1}\n", + "\n", + "WARNING:tensorflow:From /home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Colocations handled automatically by placer.\n", + "OK create model\n", + "\n", + "Loading pretrained weights VGG.\n", + "\n", + "WARNING:tensorflow:From /home/dl-desktop/Desktop/Rentadrone/ssd_keras-master/keras_loss_function/keras_ssd_loss.py:133: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use tf.cast instead.\n", + "WARNING:tensorflow:From /home/dl-desktop/Desktop/Rentadrone/ssd_keras-master/keras_loss_function/keras_ssd_loss.py:166: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use tf.cast instead.\n", + "__________________________________________________________________________________________________\n", + "Layer (type) Output Shape Param # Connected to \n", + "==================================================================================================\n", + "input_1 (InputLayer) (None, 400, 400, 3) 0 \n", + "__________________________________________________________________________________________________\n", + "identity_layer (Lambda) (None, 400, 400, 3) 0 input_1[0][0] \n", + "__________________________________________________________________________________________________\n", + "input_mean_normalization (Lambd (None, 400, 400, 3) 0 identity_layer[0][0] \n", + "__________________________________________________________________________________________________\n", + "input_channel_swap (Lambda) (None, 400, 400, 3) 0 input_mean_normalization[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv1_1 (Conv2D) (None, 400, 400, 64) 1792 input_channel_swap[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv1_2 (Conv2D) (None, 400, 400, 64) 36928 conv1_1[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool1 (MaxPooling2D) (None, 200, 200, 64) 0 conv1_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2_1 (Conv2D) (None, 200, 200, 128 73856 pool1[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2_2 (Conv2D) (None, 200, 200, 128 147584 conv2_1[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool2 (MaxPooling2D) (None, 100, 100, 128 0 conv2_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv3_1 (Conv2D) (None, 100, 100, 256 295168 pool2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv3_2 (Conv2D) (None, 100, 100, 256 590080 conv3_1[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv3_3 (Conv2D) (None, 100, 100, 256 590080 conv3_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool3 (MaxPooling2D) (None, 50, 50, 256) 0 conv3_3[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv4_1 (Conv2D) (None, 50, 50, 512) 1180160 pool3[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv4_2 (Conv2D) (None, 50, 50, 512) 2359808 conv4_1[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv4_3 (Conv2D) (None, 50, 50, 512) 2359808 conv4_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool4 (MaxPooling2D) (None, 25, 25, 512) 0 conv4_3[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv5_1 (Conv2D) (None, 25, 25, 512) 2359808 pool4[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv5_2 (Conv2D) (None, 25, 25, 512) 2359808 conv5_1[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv5_3 (Conv2D) (None, 25, 25, 512) 2359808 conv5_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "pool5 (MaxPooling2D) (None, 25, 25, 512) 0 conv5_3[0][0] \n", + "__________________________________________________________________________________________________\n", + "fc6 (Conv2D) (None, 25, 25, 1024) 4719616 pool5[0][0] \n", + "__________________________________________________________________________________________________\n", + "fc7 (Conv2D) (None, 25, 25, 1024) 1049600 fc6[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv6_1 (Conv2D) (None, 25, 25, 256) 262400 fc7[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv6_padding (ZeroPadding2D) (None, 27, 27, 256) 0 conv6_1[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv6_2 (Conv2D) (None, 13, 13, 512) 1180160 conv6_padding[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv7_1 (Conv2D) (None, 13, 13, 128) 65664 conv6_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv7_padding (ZeroPadding2D) (None, 15, 15, 128) 0 conv7_1[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv7_2 (Conv2D) (None, 7, 7, 256) 295168 conv7_padding[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv8_1 (Conv2D) (None, 7, 7, 128) 32896 conv7_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv8_2 (Conv2D) (None, 5, 5, 256) 295168 conv8_1[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv9_1 (Conv2D) (None, 5, 5, 128) 32896 conv8_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv4_3_norm (L2Normalization) (None, 50, 50, 512) 512 conv4_3[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv9_2 (Conv2D) (None, 3, 3, 256) 295168 conv9_1[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv4_3_norm_mbox_conf (Conv2D) (None, 50, 50, 8) 36872 conv4_3_norm[0][0] \n", + "__________________________________________________________________________________________________\n", + "fc7_mbox_conf (Conv2D) (None, 25, 25, 12) 110604 fc7[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv6_2_mbox_conf (Conv2D) (None, 13, 13, 12) 55308 conv6_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv7_2_mbox_conf (Conv2D) (None, 7, 7, 12) 27660 conv7_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv8_2_mbox_conf (Conv2D) (None, 5, 5, 8) 18440 conv8_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv9_2_mbox_conf (Conv2D) (None, 3, 3, 8) 18440 conv9_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv4_3_norm_mbox_loc (Conv2D) (None, 50, 50, 16) 73744 conv4_3_norm[0][0] \n", + "__________________________________________________________________________________________________\n", + "fc7_mbox_loc (Conv2D) (None, 25, 25, 24) 221208 fc7[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv6_2_mbox_loc (Conv2D) (None, 13, 13, 24) 110616 conv6_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv7_2_mbox_loc (Conv2D) (None, 7, 7, 24) 55320 conv7_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv8_2_mbox_loc (Conv2D) (None, 5, 5, 16) 36880 conv8_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv9_2_mbox_loc (Conv2D) (None, 3, 3, 16) 36880 conv9_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv4_3_norm_mbox_conf_reshape (None, 10000, 2) 0 conv4_3_norm_mbox_conf[0][0] \n", + "__________________________________________________________________________________________________\n", + "fc7_mbox_conf_reshape (Reshape) (None, 3750, 2) 0 fc7_mbox_conf[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv6_2_mbox_conf_reshape (Resh (None, 1014, 2) 0 conv6_2_mbox_conf[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv7_2_mbox_conf_reshape (Resh (None, 294, 2) 0 conv7_2_mbox_conf[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv8_2_mbox_conf_reshape (Resh (None, 100, 2) 0 conv8_2_mbox_conf[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv9_2_mbox_conf_reshape (Resh (None, 36, 2) 0 conv9_2_mbox_conf[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv4_3_norm_mbox_priorbox (Anc (None, 50, 50, 4, 8) 0 conv4_3_norm_mbox_loc[0][0] \n", + "__________________________________________________________________________________________________\n", + "fc7_mbox_priorbox (AnchorBoxes) (None, 25, 25, 6, 8) 0 fc7_mbox_loc[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv6_2_mbox_priorbox (AnchorBo (None, 13, 13, 6, 8) 0 conv6_2_mbox_loc[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv7_2_mbox_priorbox (AnchorBo (None, 7, 7, 6, 8) 0 conv7_2_mbox_loc[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv8_2_mbox_priorbox (AnchorBo (None, 5, 5, 4, 8) 0 conv8_2_mbox_loc[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv9_2_mbox_priorbox (AnchorBo (None, 3, 3, 4, 8) 0 conv9_2_mbox_loc[0][0] \n", + "__________________________________________________________________________________________________\n", + "mbox_conf (Concatenate) (None, 15194, 2) 0 conv4_3_norm_mbox_conf_reshape[0]\n", + " fc7_mbox_conf_reshape[0][0] \n", + " conv6_2_mbox_conf_reshape[0][0] \n", + " conv7_2_mbox_conf_reshape[0][0] \n", + " conv8_2_mbox_conf_reshape[0][0] \n", + " conv9_2_mbox_conf_reshape[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv4_3_norm_mbox_loc_reshape ( (None, 10000, 4) 0 conv4_3_norm_mbox_loc[0][0] \n", + "__________________________________________________________________________________________________\n", + "fc7_mbox_loc_reshape (Reshape) (None, 3750, 4) 0 fc7_mbox_loc[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv6_2_mbox_loc_reshape (Resha (None, 1014, 4) 0 conv6_2_mbox_loc[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv7_2_mbox_loc_reshape (Resha (None, 294, 4) 0 conv7_2_mbox_loc[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv8_2_mbox_loc_reshape (Resha (None, 100, 4) 0 conv8_2_mbox_loc[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv9_2_mbox_loc_reshape (Resha (None, 36, 4) 0 conv9_2_mbox_loc[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv4_3_norm_mbox_priorbox_resh (None, 10000, 8) 0 conv4_3_norm_mbox_priorbox[0][0] \n", + "__________________________________________________________________________________________________\n", + "fc7_mbox_priorbox_reshape (Resh (None, 3750, 8) 0 fc7_mbox_priorbox[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv6_2_mbox_priorbox_reshape ( (None, 1014, 8) 0 conv6_2_mbox_priorbox[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv7_2_mbox_priorbox_reshape ( (None, 294, 8) 0 conv7_2_mbox_priorbox[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv8_2_mbox_priorbox_reshape ( (None, 100, 8) 0 conv8_2_mbox_priorbox[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv9_2_mbox_priorbox_reshape ( (None, 36, 8) 0 conv9_2_mbox_priorbox[0][0] \n", + "__________________________________________________________________________________________________\n", + "mbox_conf_softmax (Activation) (None, 15194, 2) 0 mbox_conf[0][0] \n", + "__________________________________________________________________________________________________\n", + "mbox_loc (Concatenate) (None, 15194, 4) 0 conv4_3_norm_mbox_loc_reshape[0][\n", + " fc7_mbox_loc_reshape[0][0] \n", + " conv6_2_mbox_loc_reshape[0][0] \n", + " conv7_2_mbox_loc_reshape[0][0] \n", + " conv8_2_mbox_loc_reshape[0][0] \n", + " conv9_2_mbox_loc_reshape[0][0] \n", + "__________________________________________________________________________________________________\n", + "mbox_priorbox (Concatenate) (None, 15194, 8) 0 conv4_3_norm_mbox_priorbox_reshap\n", + " fc7_mbox_priorbox_reshape[0][0] \n", + " conv6_2_mbox_priorbox_reshape[0][\n", + " conv7_2_mbox_priorbox_reshape[0][\n", + " conv8_2_mbox_priorbox_reshape[0][\n", + " conv9_2_mbox_priorbox_reshape[0][\n", + "__________________________________________________________________________________________________\n", + "predictions (Concatenate) (None, 15194, 14) 0 mbox_conf_softmax[0][0] \n", + " mbox_loc[0][0] \n", + " mbox_priorbox[0][0] \n", + "==================================================================================================\n", + "Total params: 23,745,908\n", + "Trainable params: 23,745,908\n", + "Non-trainable params: 0\n", + "__________________________________________________________________________________________________\n" + ] + } + ], + "source": [ + "from keras.optimizers import Adam, SGD\n", + "from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TerminateOnNaN, CSVLogger\n", + "from keras import backend as K\n", + "from keras.models import load_model\n", + "from math import ceil\n", + "import numpy as np\n", + "from matplotlib import pyplot as plt\n", + "import os\n", + "import json\n", + "import xml.etree.cElementTree as ET\n", + "\n", + "import sys\n", + "sys.path += [os.path.abspath('../../ssd_keras-master')]\n", + "\n", + "from keras_loss_function.keras_ssd_loss import SSDLoss\n", + "from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes\n", + "from keras_layers.keras_layer_DecodeDetections import DecodeDetections\n", + "from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast\n", + "from keras_layers.keras_layer_L2Normalization import L2Normalization\n", + "from ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder\n", + "from ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast\n", + "from data_generator.object_detection_2d_data_generator import DataGenerator\n", + "from data_generator.object_detection_2d_geometric_ops import Resize\n", + "from data_generator.object_detection_2d_photometric_ops import ConvertTo3Channels\n", + "from data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation\n", + "from data_generator.object_detection_2d_misc_utils import apply_inverse_transforms\n", + "from eval_utils.average_precision_evaluator import Evaluator\n", + "from data_generator.data_augmentation_chain_variable_input_size import DataAugmentationVariableInputSize\n", + "from data_generator.data_augmentation_chain_constant_input_size import DataAugmentationConstantInputSize\n", + "\n", + "\n", + "def makedirs(path):\n", + " try:\n", + " os.makedirs(path)\n", + " except OSError:\n", + " if not os.path.isdir(path):\n", + " raise\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "K.tensorflow_backend._get_available_gpus()\n", + "\n", + "\n", + "def lr_schedule(epoch):\n", + " if epoch < 80:\n", + " return 0.001\n", + " elif epoch < 100:\n", + " return 0.0001\n", + " else:\n", + " return 0.00001\n", + "\n", + "config_path = 'config_300_fault_1.json'\n", + "\n", + "\n", + "with open(config_path) as config_buffer:\n", + " config = json.loads(config_buffer.read())\n", + "\n", + "###############################\n", + "# Parse the annotations\n", + "###############################\n", + "path_imgs_training = config['train']['train_image_folder']\n", + "path_anns_training = config['train']['train_annot_folder']\n", + "path_imgs_val = config['test']['test_image_folder']\n", + "path_anns_val = config['test']['test_annot_folder']\n", + "labels = config['model']['labels']\n", + "categories = {}\n", + "#categories = {\"Razor\": 1, \"Gun\": 2, \"Knife\": 3, \"Shuriken\": 4} #la categoría 0 es la background\n", + "for i in range(len(labels)): categories[labels[i]] = i+1\n", + "print('\\nTraining on: \\t' + str(categories) + '\\n')\n", + "\n", + "####################################\n", + "# Parameters\n", + "###################################\n", + " #%%\n", + "img_height = config['model']['input'] # Height of the model input images\n", + "img_width = config['model']['input'] # Width of the model input images\n", + "img_channels = 3 # Number of color channels of the model input images\n", + "mean_color = [123, 117, 104] # The per-channel mean of the images in the dataset. Do not change this value if you're using any of the pre-trained weights.\n", + "swap_channels = [2, 1, 0] # The color channel order in the original SSD is BGR, so we'll have the model reverse the color channel order of the input images.\n", + "n_classes = len(labels) # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO\n", + "scales_pascal = [0.01, 0.05, 0.1, 0.2, 0.37, 0.54, 0.71] # The anchor box scaling factors used in the original SSD300 for the Pascal VOC datasets\n", + "#scales_coco = [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05] # The anchor box scaling factors used in the original SSD300 for the MS COCO datasets\n", + "scales = scales_pascal\n", + "aspect_ratios = [[1.0, 2.0, 0.5],\n", + " [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n", + " [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n", + " [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n", + " [1.0, 2.0, 0.5],\n", + " [1.0, 2.0, 0.5]] # The anchor box aspect ratios used in the original SSD300; the order matters\n", + "two_boxes_for_ar1 = True\n", + "steps = [8, 16, 32, 64, 100, 300] # The space between two adjacent anchor box center points for each predictor layer.\n", + "offsets = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5] # The offsets of the first anchor box center points from the top and left borders of the image as a fraction of the step size for each predictor layer.\n", + "clip_boxes = False # Whether or not to clip the anchor boxes to lie entirely within the image boundaries\n", + "variances = [0.1, 0.1, 0.2, 0.2] # The variances by which the encoded target coordinates are divided as in the original implementation\n", + "normalize_coords = True\n", + "\n", + "K.clear_session() # Clear previous models from memory.\n", + "\n", + "\n", + "model_path = config['train']['saved_weights_name']\n", + "# 3: Instantiate an optimizer and the SSD loss function and compile the model.\n", + "# If you want to follow the original Caffe implementation, use the preset SGD\n", + "# optimizer, otherwise I'd recommend the commented-out Adam optimizer.\n", + "\n", + "\n", + "if config['model']['backend'] == 'ssd7':\n", + " #weights_path = 'VGG_ILSVRC_16_layers_fc_reduced.h5'\n", + " scales = [0.08, 0.16, 0.32, 0.64, 0.96] # An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`.\n", + " aspect_ratios = [0.5 ,1.0, 2.0] # The list of aspect ratios for the anchor boxes\n", + " two_boxes_for_ar1 = True # Whether or not you want to generate two anchor boxes for aspect ratio 1\n", + " steps = None # In case you'd like to set the step sizes for the anchor box grids manually; not recommended\n", + " offsets = None\n", + "\n", + "if os.path.exists(model_path):\n", + " print(\"\\nLoading pretrained weights.\\n\")\n", + " # We need to create an SSDLoss object in order to pass that to the model loader.\n", + " ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n", + "\n", + " K.clear_session() # Clear previous models from memory.\n", + " model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n", + " 'L2Normalization': L2Normalization,\n", + " 'compute_loss': ssd_loss.compute_loss})\n", + "\n", + "\n", + "else:\n", + " ####################################\n", + " # Build the Keras model.\n", + " ###################################\n", + "\n", + " if config['model']['backend'] == 'ssd300':\n", + " #weights_path = 'VGG_VOC0712Plus_SSD_300x300_ft_iter_160000.h5'\n", + " from models.keras_ssd300 import ssd_300 as ssd\n", + "\n", + " model = ssd(image_size=(img_height, img_width, img_channels),\n", + " n_classes=n_classes,\n", + " mode='training',\n", + " l2_regularization=0.0005,\n", + " scales=scales,\n", + " aspect_ratios_per_layer=aspect_ratios,\n", + " two_boxes_for_ar1=two_boxes_for_ar1,\n", + " steps=steps,\n", + " offsets=offsets,\n", + " clip_boxes=clip_boxes,\n", + " variances=variances,\n", + " normalize_coords=normalize_coords,\n", + " subtract_mean=mean_color,\n", + " swap_channels=swap_channels)\n", + "\n", + "\n", + " elif config['model']['backend'] == 'ssd7':\n", + " #weights_path = 'VGG_ILSVRC_16_layers_fc_reduced.h5'\n", + " from models.keras_ssd7 import build_model as ssd\n", + " scales = [0.08, 0.16, 0.32, 0.64, 0.96] # An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`.\n", + " aspect_ratios = [0.5 ,1.0, 2.0] # The list of aspect ratios for the anchor boxes\n", + " two_boxes_for_ar1 = True # Whether or not you want to generate two anchor boxes for aspect ratio 1\n", + " steps = None # In case you'd like to set the step sizes for the anchor box grids manually; not recommended\n", + " offsets = None\n", + " model = ssd(image_size=(img_height, img_width, img_channels),\n", + " n_classes=n_classes,\n", + " mode='training',\n", + " l2_regularization=0.0005,\n", + " scales=scales,\n", + " aspect_ratios_global=aspect_ratios,\n", + " aspect_ratios_per_layer=None,\n", + " two_boxes_for_ar1=two_boxes_for_ar1,\n", + " steps=steps,\n", + " offsets=offsets,\n", + " clip_boxes=clip_boxes,\n", + " variances=variances,\n", + " normalize_coords=normalize_coords,\n", + " subtract_mean=None,\n", + " divide_by_stddev=None)\n", + "\n", + " else :\n", + " print('Wrong Backend')\n", + "\n", + "\n", + "\n", + " print('OK create model')\n", + " #sgd = SGD(lr=config['train']['learning_rate'], momentum=0.9, decay=0.0, nesterov=False)\n", + "\n", + " # TODO: Set the path to the weights you want to load. only for ssd300 or ssd512\n", + "\n", + " weights_path = '../ssd_keras-master/VGG_ILSVRC_16_layers_fc_reduced.h5'\n", + " print(\"\\nLoading pretrained weights VGG.\\n\")\n", + " model.load_weights(weights_path, by_name=True)\n", + "\n", + " # 3: Instantiate an optimizer and the SSD loss function and compile the model.\n", + " # If you want to follow the original Caffe implementation, use the preset SGD\n", + " # optimizer, otherwise I'd recommend the commented-out Adam optimizer.\n", + "\n", + "\n", + " #adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n", + " #sgd = SGD(lr=0.001, momentum=0.9, decay=0.0, nesterov=False)\n", + " optimizer = Adam(lr=config['train']['learning_rate'], beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n", + " ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n", + " model.compile(optimizer=optimizer, loss=ssd_loss.compute_loss)\n", + "\n", + " model.summary()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Instanciar los generadores de datos y entrenamiento del modelo.\n", + "\n", + "*Cambio realizado para leer png y jpg. keras-ssd-master/data_generator/object_detection_2d_data_generator.py función parse_xml\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Processing image set 'train.txt': 100%|██████████| 33/33 [00:00<00:00, 88.77it/s]\n", + "Processing image set 'test.txt': 100%|██████████| 2/2 [00:00<00:00, 61.92it/s]\n", + "1 : 444\n", + "Number of images in the training dataset:\t 33\n", + "Number of images in the validation dataset:\t 2\n", + "WARNING:tensorflow:From /home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/math_grad.py:102: div (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Deprecated in favor of operator or tf.math.divide.\n", + "Epoch 1/500\n", + "\n", + "Epoch 00001: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 95s 947ms/step - loss: 16.3709 - val_loss: 7.3757\n", + "\n", + "Epoch 00001: val_loss improved from inf to 7.37568, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 2/500\n", + "\n", + "Epoch 00002: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 8.3424 - val_loss: 5.9648\n", + "\n", + "Epoch 00002: val_loss improved from 7.37568 to 5.96482, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 3/500\n", + "\n", + "Epoch 00003: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 81s 814ms/step - loss: 6.9268 - val_loss: 5.5916\n", + "\n", + "Epoch 00003: val_loss improved from 5.96482 to 5.59162, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 4/500\n", + "\n", + "Epoch 00004: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 81s 806ms/step - loss: 6.5707 - val_loss: 5.6131\n", + "\n", + "Epoch 00004: val_loss did not improve from 5.59162\n", + "Epoch 5/500\n", + "\n", + "Epoch 00005: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 81s 809ms/step - loss: 6.2085 - val_loss: 5.8056\n", + "\n", + "Epoch 00005: val_loss did not improve from 5.59162\n", + "Epoch 6/500\n", + "\n", + "Epoch 00006: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 803ms/step - loss: 5.9796 - val_loss: 5.4107\n", + "\n", + "Epoch 00006: val_loss improved from 5.59162 to 5.41071, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 7/500\n", + "\n", + "Epoch 00007: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 82s 817ms/step - loss: 5.8464 - val_loss: 5.4046\n", + "\n", + "Epoch 00007: val_loss improved from 5.41071 to 5.40461, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 8/500\n", + "\n", + "Epoch 00008: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 82s 821ms/step - loss: 5.8391 - val_loss: 5.1717\n", + "\n", + "Epoch 00008: val_loss improved from 5.40461 to 5.17174, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 9/500\n", + "\n", + "Epoch 00009: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 81s 807ms/step - loss: 5.6631 - val_loss: 5.1447\n", + "\n", + "Epoch 00009: val_loss improved from 5.17174 to 5.14472, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 10/500\n", + "\n", + "Epoch 00010: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 5.6221 - val_loss: 5.3356\n", + "\n", + "Epoch 00010: val_loss did not improve from 5.14472\n", + "Epoch 11/500\n", + "\n", + "Epoch 00011: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 803ms/step - loss: 5.5115 - val_loss: 5.6827\n", + "\n", + "Epoch 00011: val_loss did not improve from 5.14472\n", + "Epoch 12/500\n", + "\n", + "Epoch 00012: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 5.4163 - val_loss: 5.0174\n", + "\n", + "Epoch 00012: val_loss improved from 5.14472 to 5.01743, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 13/500\n", + "\n", + "Epoch 00013: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 803ms/step - loss: 5.2737 - val_loss: 4.8928\n", + "\n", + "Epoch 00013: val_loss improved from 5.01743 to 4.89279, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 14/500\n", + "\n", + "Epoch 00014: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 5.1896 - val_loss: 4.6932\n", + "\n", + "Epoch 00014: val_loss improved from 4.89279 to 4.69325, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 15/500\n", + "\n", + "Epoch 00015: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 5.0712 - val_loss: 4.7150\n", + "\n", + "Epoch 00015: val_loss did not improve from 4.69325\n", + "Epoch 16/500\n", + "\n", + "Epoch 00016: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 803ms/step - loss: 5.0187 - val_loss: 4.7564\n", + "\n", + "Epoch 00016: val_loss did not improve from 4.69325\n", + "Epoch 17/500\n", + "\n", + "Epoch 00017: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 803ms/step - loss: 4.9779 - val_loss: 4.6682\n", + "\n", + "Epoch 00017: val_loss improved from 4.69325 to 4.66824, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 18/500\n", + "\n", + "Epoch 00018: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 804ms/step - loss: 5.0324 - val_loss: 4.3389\n", + "\n", + "Epoch 00018: val_loss improved from 4.66824 to 4.33889, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 19/500\n", + "\n", + "Epoch 00019: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 4.8554 - val_loss: 4.3513\n", + "\n", + "Epoch 00019: val_loss did not improve from 4.33889\n", + "Epoch 20/500\n", + "\n", + "Epoch 00020: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 4.7299 - val_loss: 4.2093\n", + "\n", + "Epoch 00020: val_loss improved from 4.33889 to 4.20925, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 21/500\n", + "\n", + "Epoch 00021: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 4.7058 - val_loss: 4.3614\n", + "\n", + "Epoch 00021: val_loss did not improve from 4.20925\n", + "Epoch 22/500\n", + "\n", + "Epoch 00022: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 4.6238 - val_loss: 4.1315\n", + "\n", + "Epoch 00022: val_loss improved from 4.20925 to 4.13152, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 23/500\n", + "\n", + "Epoch 00023: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 4.5439 - val_loss: 4.0052\n", + "\n", + "Epoch 00023: val_loss improved from 4.13152 to 4.00518, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 24/500\n", + "\n", + "Epoch 00024: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 4.4489 - val_loss: 4.1691\n", + "\n", + "Epoch 00024: val_loss did not improve from 4.00518\n", + "Epoch 25/500\n", + "\n", + "Epoch 00025: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 4.2741 - val_loss: 4.1397\n", + "\n", + "Epoch 00025: val_loss did not improve from 4.00518\n", + "Epoch 26/500\n", + "\n", + "Epoch 00026: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 4.2250 - val_loss: 3.9073\n", + "\n", + "Epoch 00026: val_loss improved from 4.00518 to 3.90726, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 27/500\n", + "\n", + "Epoch 00027: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 4.1448 - val_loss: 4.9886\n", + "\n", + "Epoch 00027: val_loss did not improve from 3.90726\n", + "Epoch 28/500\n", + "\n", + "Epoch 00028: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 4.0781 - val_loss: 3.9171\n", + "\n", + "Epoch 00028: val_loss did not improve from 3.90726\n", + "Epoch 29/500\n", + "\n", + "Epoch 00029: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 4.0313 - val_loss: 3.8165\n", + "\n", + "Epoch 00029: val_loss improved from 3.90726 to 3.81654, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 30/500\n", + "\n", + "Epoch 00030: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.8771 - val_loss: 3.8606\n", + "\n", + "Epoch 00030: val_loss did not improve from 3.81654\n", + "Epoch 31/500\n", + "\n", + "Epoch 00031: LearningRateScheduler setting learning rate to 0.001.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "100/100 [==============================] - 80s 801ms/step - loss: 3.7454 - val_loss: 3.9101\n", + "\n", + "Epoch 00031: val_loss did not improve from 3.81654\n", + "Epoch 32/500\n", + "\n", + "Epoch 00032: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.7843 - val_loss: 3.7655\n", + "\n", + "Epoch 00032: val_loss improved from 3.81654 to 3.76554, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 33/500\n", + "\n", + "Epoch 00033: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 3.6930 - val_loss: 3.7563\n", + "\n", + "Epoch 00033: val_loss improved from 3.76554 to 3.75635, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 34/500\n", + "\n", + "Epoch 00034: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.7410 - val_loss: 3.6279\n", + "\n", + "Epoch 00034: val_loss improved from 3.75635 to 3.62786, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 35/500\n", + "\n", + "Epoch 00035: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.6264 - val_loss: 3.6042\n", + "\n", + "Epoch 00035: val_loss improved from 3.62786 to 3.60423, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 36/500\n", + "\n", + "Epoch 00036: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 3.7013 - val_loss: 3.6819\n", + "\n", + "Epoch 00036: val_loss did not improve from 3.60423\n", + "Epoch 37/500\n", + "\n", + "Epoch 00037: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.7254 - val_loss: 3.8854\n", + "\n", + "Epoch 00037: val_loss did not improve from 3.60423\n", + "Epoch 38/500\n", + "\n", + "Epoch 00038: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.7286 - val_loss: 3.7263\n", + "\n", + "Epoch 00038: val_loss did not improve from 3.60423\n", + "Epoch 39/500\n", + "\n", + "Epoch 00039: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.6215 - val_loss: 3.7384\n", + "\n", + "Epoch 00039: val_loss did not improve from 3.60423\n", + "Epoch 40/500\n", + "\n", + "Epoch 00040: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.5454 - val_loss: 3.6938\n", + "\n", + "Epoch 00040: val_loss did not improve from 3.60423\n", + "Epoch 41/500\n", + "\n", + "Epoch 00041: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.5633 - val_loss: 3.8448\n", + "\n", + "Epoch 00041: val_loss did not improve from 3.60423\n", + "Epoch 42/500\n", + "\n", + "Epoch 00042: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 3.5717 - val_loss: 3.7542\n", + "\n", + "Epoch 00042: val_loss did not improve from 3.60423\n", + "Epoch 43/500\n", + "\n", + "Epoch 00043: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 3.4769 - val_loss: 3.5321\n", + "\n", + "Epoch 00043: val_loss improved from 3.60423 to 3.53213, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 44/500\n", + "\n", + "Epoch 00044: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 3.5226 - val_loss: 3.7346\n", + "\n", + "Epoch 00044: val_loss did not improve from 3.53213\n", + "Epoch 45/500\n", + "\n", + "Epoch 00045: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 3.4415 - val_loss: 3.6502\n", + "\n", + "Epoch 00045: val_loss did not improve from 3.53213\n", + "Epoch 46/500\n", + "\n", + "Epoch 00046: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 3.4931 - val_loss: 3.6032\n", + "\n", + "Epoch 00046: val_loss did not improve from 3.53213\n", + "Epoch 47/500\n", + "\n", + "Epoch 00047: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.4958 - val_loss: 3.6229\n", + "\n", + "Epoch 00047: val_loss did not improve from 3.53213\n", + "Epoch 48/500\n", + "\n", + "Epoch 00048: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.4587 - val_loss: 3.6163\n", + "\n", + "Epoch 00048: val_loss did not improve from 3.53213\n", + "Epoch 49/500\n", + "\n", + "Epoch 00049: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.4677 - val_loss: 3.7527\n", + "\n", + "Epoch 00049: val_loss did not improve from 3.53213\n", + "Epoch 50/500\n", + "\n", + "Epoch 00050: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.4107 - val_loss: 3.5594\n", + "\n", + "Epoch 00050: val_loss did not improve from 3.53213\n", + "Epoch 51/500\n", + "\n", + "Epoch 00051: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.4377 - val_loss: 3.5592\n", + "\n", + "Epoch 00051: val_loss did not improve from 3.53213\n", + "Epoch 52/500\n", + "\n", + "Epoch 00052: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 803ms/step - loss: 3.3956 - val_loss: 3.7218\n", + "\n", + "Epoch 00052: val_loss did not improve from 3.53213\n", + "Epoch 53/500\n", + "\n", + "Epoch 00053: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 3.4251 - val_loss: 3.5406\n", + "\n", + "Epoch 00053: val_loss did not improve from 3.53213\n", + "Epoch 54/500\n", + "\n", + "Epoch 00054: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.3444 - val_loss: 3.7238\n", + "\n", + "Epoch 00054: val_loss did not improve from 3.53213\n", + "Epoch 55/500\n", + "\n", + "Epoch 00055: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.3245 - val_loss: 3.9998\n", + "\n", + "Epoch 00055: val_loss did not improve from 3.53213\n", + "Epoch 56/500\n", + "\n", + "Epoch 00056: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.3507 - val_loss: 3.7415\n", + "\n", + "Epoch 00056: val_loss did not improve from 3.53213\n", + "Epoch 57/500\n", + "\n", + "Epoch 00057: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.3039 - val_loss: 3.5360\n", + "\n", + "Epoch 00057: val_loss did not improve from 3.53213\n", + "Epoch 58/500\n", + "\n", + "Epoch 00058: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.3545 - val_loss: 3.6459\n", + "\n", + "Epoch 00058: val_loss did not improve from 3.53213\n", + "Epoch 59/500\n", + "\n", + "Epoch 00059: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.5069 - val_loss: 3.5454\n", + "\n", + "Epoch 00059: val_loss did not improve from 3.53213\n", + "Epoch 60/500\n", + "\n", + "Epoch 00060: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.3454 - val_loss: 3.5025\n", + "\n", + "Epoch 00060: val_loss improved from 3.53213 to 3.50248, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 61/500\n", + "\n", + "Epoch 00061: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.3401 - val_loss: 3.4923\n", + "\n", + "Epoch 00061: val_loss improved from 3.50248 to 3.49233, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 62/500\n", + "\n", + "Epoch 00062: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.2500 - val_loss: 3.3722\n", + "\n", + "Epoch 00062: val_loss improved from 3.49233 to 3.37216, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 63/500\n", + "\n", + "Epoch 00063: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 3.2749 - val_loss: 3.5324\n", + "\n", + "Epoch 00063: val_loss did not improve from 3.37216\n", + "Epoch 64/500\n", + "\n", + "Epoch 00064: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.2514 - val_loss: 3.3746\n", + "\n", + "Epoch 00064: val_loss did not improve from 3.37216\n", + "Epoch 65/500\n", + "\n", + "Epoch 00065: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.1946 - val_loss: 3.0985\n", + "\n", + "Epoch 00065: val_loss improved from 3.37216 to 3.09848, saving model to experimento_ssd300_fault_1.h5\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 66/500\n", + "\n", + "Epoch 00066: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.2282 - val_loss: 3.2196\n", + "\n", + "Epoch 00066: val_loss did not improve from 3.09848\n", + "Epoch 67/500\n", + "\n", + "Epoch 00067: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.1572 - val_loss: 3.2833\n", + "\n", + "Epoch 00067: val_loss did not improve from 3.09848\n", + "Epoch 68/500\n", + "\n", + "Epoch 00068: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.1747 - val_loss: 3.3269\n", + "\n", + "Epoch 00068: val_loss did not improve from 3.09848\n", + "Epoch 69/500\n", + "\n", + "Epoch 00069: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 3.2022 - val_loss: 3.5159\n", + "\n", + "Epoch 00069: val_loss did not improve from 3.09848\n", + "Epoch 70/500\n", + "\n", + "Epoch 00070: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.2015 - val_loss: 3.0329\n", + "\n", + "Epoch 00070: val_loss improved from 3.09848 to 3.03288, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 71/500\n", + "\n", + "Epoch 00071: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.1563 - val_loss: 3.1785\n", + "\n", + "Epoch 00071: val_loss did not improve from 3.03288\n", + "Epoch 72/500\n", + "\n", + "Epoch 00072: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.0944 - val_loss: 3.3246\n", + "\n", + "Epoch 00072: val_loss did not improve from 3.03288\n", + "Epoch 73/500\n", + "\n", + "Epoch 00073: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.0834 - val_loss: 3.3990\n", + "\n", + "Epoch 00073: val_loss did not improve from 3.03288\n", + "Epoch 74/500\n", + "\n", + "Epoch 00074: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.0638 - val_loss: 3.2314\n", + "\n", + "Epoch 00074: val_loss did not improve from 3.03288\n", + "Epoch 75/500\n", + "\n", + "Epoch 00075: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.0576 - val_loss: 3.2828\n", + "\n", + "Epoch 00075: val_loss did not improve from 3.03288\n", + "Epoch 76/500\n", + "\n", + "Epoch 00076: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 3.1641 - val_loss: 3.1036\n", + "\n", + "Epoch 00076: val_loss did not improve from 3.03288\n", + "Epoch 77/500\n", + "\n", + "Epoch 00077: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.0128 - val_loss: 3.3556\n", + "\n", + "Epoch 00077: val_loss did not improve from 3.03288\n", + "Epoch 78/500\n", + "\n", + "Epoch 00078: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 3.0574 - val_loss: 3.1095\n", + "\n", + "Epoch 00078: val_loss did not improve from 3.03288\n", + "Epoch 79/500\n", + "\n", + "Epoch 00079: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 3.0328 - val_loss: 3.1693\n", + "\n", + "Epoch 00079: val_loss did not improve from 3.03288\n", + "Epoch 80/500\n", + "\n", + "Epoch 00080: LearningRateScheduler setting learning rate to 0.001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 3.0884 - val_loss: 2.9858\n", + "\n", + "Epoch 00080: val_loss improved from 3.03288 to 2.98575, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 81/500\n", + "\n", + "Epoch 00081: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.8945 - val_loss: 3.1375\n", + "\n", + "Epoch 00081: val_loss did not improve from 2.98575\n", + "Epoch 82/500\n", + "\n", + "Epoch 00082: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.8242 - val_loss: 3.1283\n", + "\n", + "Epoch 00082: val_loss did not improve from 2.98575\n", + "Epoch 83/500\n", + "\n", + "Epoch 00083: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.8158 - val_loss: 3.0812\n", + "\n", + "Epoch 00083: val_loss did not improve from 2.98575\n", + "Epoch 84/500\n", + "\n", + "Epoch 00084: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.7252 - val_loss: 3.0028\n", + "\n", + "Epoch 00084: val_loss did not improve from 2.98575\n", + "Epoch 85/500\n", + "\n", + "Epoch 00085: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.7557 - val_loss: 3.0320\n", + "\n", + "Epoch 00085: val_loss did not improve from 2.98575\n", + "Epoch 86/500\n", + "\n", + "Epoch 00086: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.6858 - val_loss: 3.0034\n", + "\n", + "Epoch 00086: val_loss did not improve from 2.98575\n", + "Epoch 87/500\n", + "\n", + "Epoch 00087: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.6145 - val_loss: 3.0041\n", + "\n", + "Epoch 00087: val_loss did not improve from 2.98575\n", + "Epoch 88/500\n", + "\n", + "Epoch 00088: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.6769 - val_loss: 3.0108\n", + "\n", + "Epoch 00088: val_loss did not improve from 2.98575\n", + "Epoch 89/500\n", + "\n", + "Epoch 00089: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.6359 - val_loss: 2.9382\n", + "\n", + "Epoch 00089: val_loss improved from 2.98575 to 2.93818, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 90/500\n", + "\n", + "Epoch 00090: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.5922 - val_loss: 2.9442\n", + "\n", + "Epoch 00090: val_loss did not improve from 2.93818\n", + "Epoch 91/500\n", + "\n", + "Epoch 00091: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.6036 - val_loss: 2.9341\n", + "\n", + "Epoch 00091: val_loss improved from 2.93818 to 2.93406, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 92/500\n", + "\n", + "Epoch 00092: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.5844 - val_loss: 2.9017\n", + "\n", + "Epoch 00092: val_loss improved from 2.93406 to 2.90171, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 93/500\n", + "\n", + "Epoch 00093: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.5528 - val_loss: 2.8696\n", + "\n", + "Epoch 00093: val_loss improved from 2.90171 to 2.86965, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 94/500\n", + "\n", + "Epoch 00094: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.5428 - val_loss: 2.9039\n", + "\n", + "Epoch 00094: val_loss did not improve from 2.86965\n", + "Epoch 95/500\n", + "\n", + "Epoch 00095: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.5477 - val_loss: 2.8995\n", + "\n", + "Epoch 00095: val_loss did not improve from 2.86965\n", + "Epoch 96/500\n", + "\n", + "Epoch 00096: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.5020 - val_loss: 2.9773\n", + "\n", + "Epoch 00096: val_loss did not improve from 2.86965\n", + "Epoch 97/500\n", + "\n", + "Epoch 00097: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 2.5610 - val_loss: 2.8112\n", + "\n", + "Epoch 00097: val_loss improved from 2.86965 to 2.81115, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 98/500\n", + "\n", + "Epoch 00098: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.4317 - val_loss: 2.8764\n", + "\n", + "Epoch 00098: val_loss did not improve from 2.81115\n", + "Epoch 99/500\n", + "\n", + "Epoch 00099: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.5008 - val_loss: 2.8408\n", + "\n", + "Epoch 00099: val_loss did not improve from 2.81115\n", + "Epoch 100/500\n", + "\n", + "Epoch 00100: LearningRateScheduler setting learning rate to 0.0001.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.4662 - val_loss: 2.8257\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Epoch 00100: val_loss did not improve from 2.81115\n", + "Epoch 101/500\n", + "\n", + "Epoch 00101: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.4243 - val_loss: 2.7867\n", + "\n", + "Epoch 00101: val_loss improved from 2.81115 to 2.78665, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 102/500\n", + "\n", + "Epoch 00102: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.4661 - val_loss: 2.7834\n", + "\n", + "Epoch 00102: val_loss improved from 2.78665 to 2.78338, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 103/500\n", + "\n", + "Epoch 00103: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.4118 - val_loss: 2.7545\n", + "\n", + "Epoch 00103: val_loss improved from 2.78338 to 2.75448, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 104/500\n", + "\n", + "Epoch 00104: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.4510 - val_loss: 2.7466\n", + "\n", + "Epoch 00104: val_loss improved from 2.75448 to 2.74665, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 105/500\n", + "\n", + "Epoch 00105: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.4811 - val_loss: 2.7390\n", + "\n", + "Epoch 00105: val_loss improved from 2.74665 to 2.73900, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 106/500\n", + "\n", + "Epoch 00106: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.4332 - val_loss: 2.7359\n", + "\n", + "Epoch 00106: val_loss improved from 2.73900 to 2.73586, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 107/500\n", + "\n", + "Epoch 00107: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.4275 - val_loss: 2.7517\n", + "\n", + "Epoch 00107: val_loss did not improve from 2.73586\n", + "Epoch 108/500\n", + "\n", + "Epoch 00108: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3691 - val_loss: 2.7352\n", + "\n", + "Epoch 00108: val_loss improved from 2.73586 to 2.73520, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 109/500\n", + "\n", + "Epoch 00109: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.4060 - val_loss: 2.7100\n", + "\n", + "Epoch 00109: val_loss improved from 2.73520 to 2.70995, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 110/500\n", + "\n", + "Epoch 00110: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.4380 - val_loss: 2.6994\n", + "\n", + "Epoch 00110: val_loss improved from 2.70995 to 2.69939, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 111/500\n", + "\n", + "Epoch 00111: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.4594 - val_loss: 2.7045\n", + "\n", + "Epoch 00111: val_loss did not improve from 2.69939\n", + "Epoch 112/500\n", + "\n", + "Epoch 00112: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 2.4270 - val_loss: 2.7049\n", + "\n", + "Epoch 00112: val_loss did not improve from 2.69939\n", + "Epoch 113/500\n", + "\n", + "Epoch 00113: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3366 - val_loss: 2.7099\n", + "\n", + "Epoch 00113: val_loss did not improve from 2.69939\n", + "Epoch 114/500\n", + "\n", + "Epoch 00114: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3707 - val_loss: 2.7035\n", + "\n", + "Epoch 00114: val_loss did not improve from 2.69939\n", + "Epoch 115/500\n", + "\n", + "Epoch 00115: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.4000 - val_loss: 2.7090\n", + "\n", + "Epoch 00115: val_loss did not improve from 2.69939\n", + "Epoch 116/500\n", + "\n", + "Epoch 00116: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.3661 - val_loss: 2.7183\n", + "\n", + "Epoch 00116: val_loss did not improve from 2.69939\n", + "Epoch 117/500\n", + "\n", + "Epoch 00117: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.4599 - val_loss: 2.7063\n", + "\n", + "Epoch 00117: val_loss did not improve from 2.69939\n", + "Epoch 118/500\n", + "\n", + "Epoch 00118: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3982 - val_loss: 2.7186\n", + "\n", + "Epoch 00118: val_loss did not improve from 2.69939\n", + "Epoch 119/500\n", + "\n", + "Epoch 00119: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.4185 - val_loss: 2.7158\n", + "\n", + "Epoch 00119: val_loss did not improve from 2.69939\n", + "Epoch 120/500\n", + "\n", + "Epoch 00120: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 2.4261 - val_loss: 2.7000\n", + "\n", + "Epoch 00120: val_loss did not improve from 2.69939\n", + "Epoch 121/500\n", + "\n", + "Epoch 00121: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3914 - val_loss: 2.7090\n", + "\n", + "Epoch 00121: val_loss did not improve from 2.69939\n", + "Epoch 122/500\n", + "\n", + "Epoch 00122: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.4349 - val_loss: 2.6965\n", + "\n", + "Epoch 00122: val_loss improved from 2.69939 to 2.69654, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 123/500\n", + "\n", + "Epoch 00123: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.4154 - val_loss: 2.7004\n", + "\n", + "Epoch 00123: val_loss did not improve from 2.69654\n", + "Epoch 124/500\n", + "\n", + "Epoch 00124: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3835 - val_loss: 2.7080\n", + "\n", + "Epoch 00124: val_loss did not improve from 2.69654\n", + "Epoch 125/500\n", + "\n", + "Epoch 00125: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.3307 - val_loss: 2.6899\n", + "\n", + "Epoch 00125: val_loss improved from 2.69654 to 2.68988, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 126/500\n", + "\n", + "Epoch 00126: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 2.3386 - val_loss: 2.6951\n", + "\n", + "Epoch 00126: val_loss did not improve from 2.68988\n", + "Epoch 127/500\n", + "\n", + "Epoch 00127: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.4267 - val_loss: 2.6741\n", + "\n", + "Epoch 00127: val_loss improved from 2.68988 to 2.67407, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 128/500\n", + "\n", + "Epoch 00128: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3711 - val_loss: 2.6878\n", + "\n", + "Epoch 00128: val_loss did not improve from 2.67407\n", + "Epoch 129/500\n", + "\n", + "Epoch 00129: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3586 - val_loss: 2.6804\n", + "\n", + "Epoch 00129: val_loss did not improve from 2.67407\n", + "Epoch 130/500\n", + "\n", + "Epoch 00130: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3444 - val_loss: 2.6767\n", + "\n", + "Epoch 00130: val_loss did not improve from 2.67407\n", + "Epoch 131/500\n", + "\n", + "Epoch 00131: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.4412 - val_loss: 2.6694\n", + "\n", + "Epoch 00131: val_loss improved from 2.67407 to 2.66941, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 132/500\n", + "\n", + "Epoch 00132: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3977 - val_loss: 2.6752\n", + "\n", + "Epoch 00132: val_loss did not improve from 2.66941\n", + "Epoch 133/500\n", + "\n", + "Epoch 00133: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3353 - val_loss: 2.6631\n", + "\n", + "Epoch 00133: val_loss improved from 2.66941 to 2.66305, saving model to experimento_ssd300_fault_1.h5\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 134/500\n", + "\n", + "Epoch 00134: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.3689 - val_loss: 2.6725\n", + "\n", + "Epoch 00134: val_loss did not improve from 2.66305\n", + "Epoch 135/500\n", + "\n", + "Epoch 00135: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3339 - val_loss: 2.6928\n", + "\n", + "Epoch 00135: val_loss did not improve from 2.66305\n", + "Epoch 136/500\n", + "\n", + "Epoch 00136: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3850 - val_loss: 2.6774\n", + "\n", + "Epoch 00136: val_loss did not improve from 2.66305\n", + "Epoch 137/500\n", + "\n", + "Epoch 00137: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.3342 - val_loss: 2.6772\n", + "\n", + "Epoch 00137: val_loss did not improve from 2.66305\n", + "Epoch 138/500\n", + "\n", + "Epoch 00138: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3865 - val_loss: 2.6818\n", + "\n", + "Epoch 00138: val_loss did not improve from 2.66305\n", + "Epoch 139/500\n", + "\n", + "Epoch 00139: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3988 - val_loss: 2.6868\n", + "\n", + "Epoch 00139: val_loss did not improve from 2.66305\n", + "Epoch 140/500\n", + "\n", + "Epoch 00140: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3424 - val_loss: 2.6794\n", + "\n", + "Epoch 00140: val_loss did not improve from 2.66305\n", + "Epoch 141/500\n", + "\n", + "Epoch 00141: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3614 - val_loss: 2.6603\n", + "\n", + "Epoch 00141: val_loss improved from 2.66305 to 2.66026, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 142/500\n", + "\n", + "Epoch 00142: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3719 - val_loss: 2.6549\n", + "\n", + "Epoch 00142: val_loss improved from 2.66026 to 2.65490, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 143/500\n", + "\n", + "Epoch 00143: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3043 - val_loss: 2.6432\n", + "\n", + "Epoch 00143: val_loss improved from 2.65490 to 2.64322, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 144/500\n", + "\n", + "Epoch 00144: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2873 - val_loss: 2.6532\n", + "\n", + "Epoch 00144: val_loss did not improve from 2.64322\n", + "Epoch 145/500\n", + "\n", + "Epoch 00145: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3059 - val_loss: 2.6704\n", + "\n", + "Epoch 00145: val_loss did not improve from 2.64322\n", + "Epoch 146/500\n", + "\n", + "Epoch 00146: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3863 - val_loss: 2.6564\n", + "\n", + "Epoch 00146: val_loss did not improve from 2.64322\n", + "Epoch 147/500\n", + "\n", + "Epoch 00147: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2575 - val_loss: 2.6529\n", + "\n", + "Epoch 00147: val_loss did not improve from 2.64322\n", + "Epoch 148/500\n", + "\n", + "Epoch 00148: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3390 - val_loss: 2.6654\n", + "\n", + "Epoch 00148: val_loss did not improve from 2.64322\n", + "Epoch 149/500\n", + "\n", + "Epoch 00149: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.3935 - val_loss: 2.6721\n", + "\n", + "Epoch 00149: val_loss did not improve from 2.64322\n", + "Epoch 150/500\n", + "\n", + "Epoch 00150: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3100 - val_loss: 2.6558\n", + "\n", + "Epoch 00150: val_loss did not improve from 2.64322\n", + "Epoch 151/500\n", + "\n", + "Epoch 00151: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3418 - val_loss: 2.6429\n", + "\n", + "Epoch 00151: val_loss improved from 2.64322 to 2.64291, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 152/500\n", + "\n", + "Epoch 00152: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.3659 - val_loss: 2.6681\n", + "\n", + "Epoch 00152: val_loss did not improve from 2.64291\n", + "Epoch 153/500\n", + "\n", + "Epoch 00153: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3580 - val_loss: 2.6447\n", + "\n", + "Epoch 00153: val_loss did not improve from 2.64291\n", + "Epoch 154/500\n", + "\n", + "Epoch 00154: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3295 - val_loss: 2.6318\n", + "\n", + "Epoch 00154: val_loss improved from 2.64291 to 2.63178, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 155/500\n", + "\n", + "Epoch 00155: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.2901 - val_loss: 2.6443\n", + "\n", + "Epoch 00155: val_loss did not improve from 2.63178\n", + "Epoch 156/500\n", + "\n", + "Epoch 00156: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3641 - val_loss: 2.6441\n", + "\n", + "Epoch 00156: val_loss did not improve from 2.63178\n", + "Epoch 157/500\n", + "\n", + "Epoch 00157: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2748 - val_loss: 2.6224\n", + "\n", + "Epoch 00157: val_loss improved from 2.63178 to 2.62240, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 158/500\n", + "\n", + "Epoch 00158: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3301 - val_loss: 2.6146\n", + "\n", + "Epoch 00158: val_loss improved from 2.62240 to 2.61456, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 159/500\n", + "\n", + "Epoch 00159: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3196 - val_loss: 2.6206\n", + "\n", + "Epoch 00159: val_loss did not improve from 2.61456\n", + "Epoch 160/500\n", + "\n", + "Epoch 00160: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3206 - val_loss: 2.6014\n", + "\n", + "Epoch 00160: val_loss improved from 2.61456 to 2.60142, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 161/500\n", + "\n", + "Epoch 00161: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.3785 - val_loss: 2.6181\n", + "\n", + "Epoch 00161: val_loss did not improve from 2.60142\n", + "Epoch 162/500\n", + "\n", + "Epoch 00162: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 2.3454 - val_loss: 2.6311\n", + "\n", + "Epoch 00162: val_loss did not improve from 2.60142\n", + "Epoch 163/500\n", + "\n", + "Epoch 00163: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3812 - val_loss: 2.6259\n", + "\n", + "Epoch 00163: val_loss did not improve from 2.60142\n", + "Epoch 164/500\n", + "\n", + "Epoch 00164: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3041 - val_loss: 2.5997\n", + "\n", + "Epoch 00164: val_loss improved from 2.60142 to 2.59965, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 165/500\n", + "\n", + "Epoch 00165: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3461 - val_loss: 2.5921\n", + "\n", + "Epoch 00165: val_loss improved from 2.59965 to 2.59209, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 166/500\n", + "\n", + "Epoch 00166: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.2970 - val_loss: 2.5947\n", + "\n", + "Epoch 00166: val_loss did not improve from 2.59209\n", + "Epoch 167/500\n", + "\n", + "Epoch 00167: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.3181 - val_loss: 2.5825\n", + "\n", + "Epoch 00167: val_loss improved from 2.59209 to 2.58251, saving model to experimento_ssd300_fault_1.h5\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 168/500\n", + "\n", + "Epoch 00168: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3404 - val_loss: 2.5698\n", + "\n", + "Epoch 00168: val_loss improved from 2.58251 to 2.56985, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 169/500\n", + "\n", + "Epoch 00169: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3116 - val_loss: 2.5853\n", + "\n", + "Epoch 00169: val_loss did not improve from 2.56985\n", + "Epoch 170/500\n", + "\n", + "Epoch 00170: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3500 - val_loss: 2.5701\n", + "\n", + "Epoch 00170: val_loss did not improve from 2.56985\n", + "Epoch 171/500\n", + "\n", + "Epoch 00171: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.2736 - val_loss: 2.5792\n", + "\n", + "Epoch 00171: val_loss did not improve from 2.56985\n", + "Epoch 172/500\n", + "\n", + "Epoch 00172: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3543 - val_loss: 2.5719\n", + "\n", + "Epoch 00172: val_loss did not improve from 2.56985\n", + "Epoch 173/500\n", + "\n", + "Epoch 00173: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.2950 - val_loss: 2.5573\n", + "\n", + "Epoch 00173: val_loss improved from 2.56985 to 2.55734, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 174/500\n", + "\n", + "Epoch 00174: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2876 - val_loss: 2.5647\n", + "\n", + "Epoch 00174: val_loss did not improve from 2.55734\n", + "Epoch 175/500\n", + "\n", + "Epoch 00175: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3045 - val_loss: 2.5684\n", + "\n", + "Epoch 00175: val_loss did not improve from 2.55734\n", + "Epoch 176/500\n", + "\n", + "Epoch 00176: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3047 - val_loss: 2.5709\n", + "\n", + "Epoch 00176: val_loss did not improve from 2.55734\n", + "Epoch 177/500\n", + "\n", + "Epoch 00177: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3187 - val_loss: 2.5369\n", + "\n", + "Epoch 00177: val_loss improved from 2.55734 to 2.53691, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 178/500\n", + "\n", + "Epoch 00178: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3003 - val_loss: 2.5418\n", + "\n", + "Epoch 00178: val_loss did not improve from 2.53691\n", + "Epoch 179/500\n", + "\n", + "Epoch 00179: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.2856 - val_loss: 2.5507\n", + "\n", + "Epoch 00179: val_loss did not improve from 2.53691\n", + "Epoch 180/500\n", + "\n", + "Epoch 00180: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2895 - val_loss: 2.5314\n", + "\n", + "Epoch 00180: val_loss improved from 2.53691 to 2.53143, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 181/500\n", + "\n", + "Epoch 00181: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 2.2750 - val_loss: 2.5598\n", + "\n", + "Epoch 00181: val_loss did not improve from 2.53143\n", + "Epoch 182/500\n", + "\n", + "Epoch 00182: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.2768 - val_loss: 2.5310\n", + "\n", + "Epoch 00182: val_loss improved from 2.53143 to 2.53100, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 183/500\n", + "\n", + "Epoch 00183: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3176 - val_loss: 2.5475\n", + "\n", + "Epoch 00183: val_loss did not improve from 2.53100\n", + "Epoch 184/500\n", + "\n", + "Epoch 00184: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2267 - val_loss: 2.5595\n", + "\n", + "Epoch 00184: val_loss did not improve from 2.53100\n", + "Epoch 185/500\n", + "\n", + "Epoch 00185: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.3264 - val_loss: 2.5471\n", + "\n", + "Epoch 00185: val_loss did not improve from 2.53100\n", + "Epoch 186/500\n", + "\n", + "Epoch 00186: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3468 - val_loss: 2.5730\n", + "\n", + "Epoch 00186: val_loss did not improve from 2.53100\n", + "Epoch 187/500\n", + "\n", + "Epoch 00187: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.3092 - val_loss: 2.5463\n", + "\n", + "Epoch 00187: val_loss did not improve from 2.53100\n", + "Epoch 188/500\n", + "\n", + "Epoch 00188: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.2971 - val_loss: 2.5499\n", + "\n", + "Epoch 00188: val_loss did not improve from 2.53100\n", + "Epoch 189/500\n", + "\n", + "Epoch 00189: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2848 - val_loss: 2.5662\n", + "\n", + "Epoch 00189: val_loss did not improve from 2.53100\n", + "Epoch 190/500\n", + "\n", + "Epoch 00190: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2599 - val_loss: 2.5368\n", + "\n", + "Epoch 00190: val_loss did not improve from 2.53100\n", + "Epoch 191/500\n", + "\n", + "Epoch 00191: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.2911 - val_loss: 2.5382\n", + "\n", + "Epoch 00191: val_loss did not improve from 2.53100\n", + "Epoch 192/500\n", + "\n", + "Epoch 00192: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 2.2675 - val_loss: 2.5432\n", + "\n", + "Epoch 00192: val_loss did not improve from 2.53100\n", + "Epoch 193/500\n", + "\n", + "Epoch 00193: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2750 - val_loss: 2.5171\n", + "\n", + "Epoch 00193: val_loss improved from 2.53100 to 2.51713, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 194/500\n", + "\n", + "Epoch 00194: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.2869 - val_loss: 2.5373\n", + "\n", + "Epoch 00194: val_loss did not improve from 2.51713\n", + "Epoch 195/500\n", + "\n", + "Epoch 00195: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2793 - val_loss: 2.5192\n", + "\n", + "Epoch 00195: val_loss did not improve from 2.51713\n", + "Epoch 196/500\n", + "\n", + "Epoch 00196: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2531 - val_loss: 2.5084\n", + "\n", + "Epoch 00196: val_loss improved from 2.51713 to 2.50840, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 197/500\n", + "\n", + "Epoch 00197: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.3398 - val_loss: 2.5500\n", + "\n", + "Epoch 00197: val_loss did not improve from 2.50840\n", + "Epoch 198/500\n", + "\n", + "Epoch 00198: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2146 - val_loss: 2.4976\n", + "\n", + "Epoch 00198: val_loss improved from 2.50840 to 2.49763, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 199/500\n", + "\n", + "Epoch 00199: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 2.2694 - val_loss: 2.5370\n", + "\n", + "Epoch 00199: val_loss did not improve from 2.49763\n", + "Epoch 200/500\n", + "\n", + "Epoch 00200: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 804ms/step - loss: 2.2821 - val_loss: 2.5114\n", + "\n", + "Epoch 00200: val_loss did not improve from 2.49763\n", + "Epoch 201/500\n", + "\n", + "Epoch 00201: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 805ms/step - loss: 2.2925 - val_loss: 2.4810\n", + "\n", + "Epoch 00201: val_loss improved from 2.49763 to 2.48098, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 202/500\n", + "\n", + "Epoch 00202: LearningRateScheduler setting learning rate to 1e-05.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "100/100 [==============================] - 80s 804ms/step - loss: 2.2775 - val_loss: 2.5117\n", + "\n", + "Epoch 00202: val_loss did not improve from 2.48098\n", + "Epoch 203/500\n", + "\n", + "Epoch 00203: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 81s 810ms/step - loss: 2.2070 - val_loss: 2.5340\n", + "\n", + "Epoch 00203: val_loss did not improve from 2.48098\n", + "Epoch 204/500\n", + "\n", + "Epoch 00204: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 81s 811ms/step - loss: 2.3192 - val_loss: 2.5374\n", + "\n", + "Epoch 00204: val_loss did not improve from 2.48098\n", + "Epoch 205/500\n", + "\n", + "Epoch 00205: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 81s 807ms/step - loss: 2.2391 - val_loss: 2.4816\n", + "\n", + "Epoch 00205: val_loss did not improve from 2.48098\n", + "Epoch 206/500\n", + "\n", + "Epoch 00206: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 81s 815ms/step - loss: 2.2264 - val_loss: 2.5246\n", + "\n", + "Epoch 00206: val_loss did not improve from 2.48098\n", + "Epoch 207/500\n", + "\n", + "Epoch 00207: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 81s 811ms/step - loss: 2.2346 - val_loss: 2.4970\n", + "\n", + "Epoch 00207: val_loss did not improve from 2.48098\n", + "Epoch 208/500\n", + "\n", + "Epoch 00208: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 82s 825ms/step - loss: 2.3264 - val_loss: 2.5283\n", + "\n", + "Epoch 00208: val_loss did not improve from 2.48098\n", + "Epoch 209/500\n", + "\n", + "Epoch 00209: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 81s 811ms/step - loss: 2.1975 - val_loss: 2.5046\n", + "\n", + "Epoch 00209: val_loss did not improve from 2.48098\n", + "Epoch 210/500\n", + "\n", + "Epoch 00210: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.2417 - val_loss: 2.5188\n", + "\n", + "Epoch 00210: val_loss did not improve from 2.48098\n", + "Epoch 211/500\n", + "\n", + "Epoch 00211: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.3256 - val_loss: 2.5239\n", + "\n", + "Epoch 00211: val_loss did not improve from 2.48098\n", + "Epoch 212/500\n", + "\n", + "Epoch 00212: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.3239 - val_loss: 2.5052\n", + "\n", + "Epoch 00212: val_loss did not improve from 2.48098\n", + "Epoch 213/500\n", + "\n", + "Epoch 00213: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.2680 - val_loss: 2.5129\n", + "\n", + "Epoch 00213: val_loss did not improve from 2.48098\n", + "Epoch 214/500\n", + "\n", + "Epoch 00214: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.2134 - val_loss: 2.5480\n", + "\n", + "Epoch 00214: val_loss did not improve from 2.48098\n", + "Epoch 215/500\n", + "\n", + "Epoch 00215: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.2342 - val_loss: 2.5272\n", + "\n", + "Epoch 00215: val_loss did not improve from 2.48098\n", + "Epoch 216/500\n", + "\n", + "Epoch 00216: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 81s 813ms/step - loss: 2.2669 - val_loss: 2.5219\n", + "\n", + "Epoch 00216: val_loss did not improve from 2.48098\n", + "Epoch 217/500\n", + "\n", + "Epoch 00217: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 83s 825ms/step - loss: 2.2314 - val_loss: 2.5375\n", + "\n", + "Epoch 00217: val_loss did not improve from 2.48098\n", + "Epoch 218/500\n", + "\n", + "Epoch 00218: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 83s 826ms/step - loss: 2.2534 - val_loss: 2.5319\n", + "\n", + "Epoch 00218: val_loss did not improve from 2.48098\n", + "Epoch 219/500\n", + "\n", + "Epoch 00219: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 82s 819ms/step - loss: 2.2478 - val_loss: 2.5195\n", + "\n", + "Epoch 00219: val_loss did not improve from 2.48098\n", + "Epoch 220/500\n", + "\n", + "Epoch 00220: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 84s 837ms/step - loss: 2.2300 - val_loss: 2.5264\n", + "\n", + "Epoch 00220: val_loss did not improve from 2.48098\n", + "Epoch 221/500\n", + "\n", + "Epoch 00221: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 84s 837ms/step - loss: 2.2123 - val_loss: 2.5198\n", + "\n", + "Epoch 00221: val_loss did not improve from 2.48098\n", + "Epoch 222/500\n", + "\n", + "Epoch 00222: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 84s 840ms/step - loss: 2.2166 - val_loss: 2.5085\n", + "\n", + "Epoch 00222: val_loss did not improve from 2.48098\n", + "Epoch 223/500\n", + "\n", + "Epoch 00223: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 84s 840ms/step - loss: 2.2422 - val_loss: 2.4828\n", + "\n", + "Epoch 00223: val_loss did not improve from 2.48098\n", + "Epoch 224/500\n", + "\n", + "Epoch 00224: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 84s 837ms/step - loss: 2.2560 - val_loss: 2.5009\n", + "\n", + "Epoch 00224: val_loss did not improve from 2.48098\n", + "Epoch 225/500\n", + "\n", + "Epoch 00225: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 81s 807ms/step - loss: 2.2109 - val_loss: 2.4980\n", + "\n", + "Epoch 00225: val_loss did not improve from 2.48098\n", + "Epoch 226/500\n", + "\n", + "Epoch 00226: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 82s 818ms/step - loss: 2.2310 - val_loss: 2.4782\n", + "\n", + "Epoch 00226: val_loss improved from 2.48098 to 2.47816, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 227/500\n", + "\n", + "Epoch 00227: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 83s 832ms/step - loss: 2.2518 - val_loss: 2.4627\n", + "\n", + "Epoch 00227: val_loss improved from 2.47816 to 2.46269, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 228/500\n", + "\n", + "Epoch 00228: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 83s 834ms/step - loss: 2.2025 - val_loss: 2.4646\n", + "\n", + "Epoch 00228: val_loss did not improve from 2.46269\n", + "Epoch 229/500\n", + "\n", + "Epoch 00229: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 83s 833ms/step - loss: 2.2603 - val_loss: 2.4577\n", + "\n", + "Epoch 00229: val_loss improved from 2.46269 to 2.45775, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 230/500\n", + "\n", + "Epoch 00230: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 83s 833ms/step - loss: 2.2676 - val_loss: 2.4634\n", + "\n", + "Epoch 00230: val_loss did not improve from 2.45775\n", + "Epoch 231/500\n", + "\n", + "Epoch 00231: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 83s 834ms/step - loss: 2.1934 - val_loss: 2.4829\n", + "\n", + "Epoch 00231: val_loss did not improve from 2.45775\n", + "Epoch 232/500\n", + "\n", + "Epoch 00232: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 83s 834ms/step - loss: 2.1895 - val_loss: 2.4623\n", + "\n", + "Epoch 00232: val_loss did not improve from 2.45775\n", + "Epoch 233/500\n", + "\n", + "Epoch 00233: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 83s 832ms/step - loss: 2.1922 - val_loss: 2.4841\n", + "\n", + "Epoch 00233: val_loss did not improve from 2.45775\n", + "Epoch 234/500\n", + "\n", + "Epoch 00234: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 83s 833ms/step - loss: 2.2130 - val_loss: 2.4436\n", + "\n", + "Epoch 00234: val_loss improved from 2.45775 to 2.44358, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 235/500\n", + "\n", + "Epoch 00235: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 83s 834ms/step - loss: 2.1743 - val_loss: 2.4906\n", + "\n", + "Epoch 00235: val_loss did not improve from 2.44358\n", + "Epoch 236/500\n", + "\n", + "Epoch 00236: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 83s 832ms/step - loss: 2.2005 - val_loss: 2.4778\n", + "\n", + "Epoch 00236: val_loss did not improve from 2.44358\n", + "Epoch 237/500\n", + "\n", + "Epoch 00237: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 82s 818ms/step - loss: 2.2146 - val_loss: 2.4629\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Epoch 00237: val_loss did not improve from 2.44358\n", + "Epoch 238/500\n", + "\n", + "Epoch 00238: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2378 - val_loss: 2.4258\n", + "\n", + "Epoch 00238: val_loss improved from 2.44358 to 2.42576, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 239/500\n", + "\n", + "Epoch 00239: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 796ms/step - loss: 2.2562 - val_loss: 2.4473\n", + "\n", + "Epoch 00239: val_loss did not improve from 2.42576\n", + "Epoch 240/500\n", + "\n", + "Epoch 00240: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1777 - val_loss: 2.4431\n", + "\n", + "Epoch 00240: val_loss did not improve from 2.42576\n", + "Epoch 241/500\n", + "\n", + "Epoch 00241: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.2274 - val_loss: 2.4602\n", + "\n", + "Epoch 00241: val_loss did not improve from 2.42576\n", + "Epoch 242/500\n", + "\n", + "Epoch 00242: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 81s 805ms/step - loss: 2.2486 - val_loss: 2.4562\n", + "\n", + "Epoch 00242: val_loss did not improve from 2.42576\n", + "Epoch 243/500\n", + "\n", + "Epoch 00243: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 82s 817ms/step - loss: 2.1823 - val_loss: 2.4786\n", + "\n", + "Epoch 00243: val_loss did not improve from 2.42576\n", + "Epoch 244/500\n", + "\n", + "Epoch 00244: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 87s 870ms/step - loss: 2.2288 - val_loss: 2.4849\n", + "\n", + "Epoch 00244: val_loss did not improve from 2.42576\n", + "Epoch 245/500\n", + "\n", + "Epoch 00245: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 85s 852ms/step - loss: 2.1660 - val_loss: 2.4766\n", + "\n", + "Epoch 00245: val_loss did not improve from 2.42576\n", + "Epoch 246/500\n", + "\n", + "Epoch 00246: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2301 - val_loss: 2.5027\n", + "\n", + "Epoch 00246: val_loss did not improve from 2.42576\n", + "Epoch 247/500\n", + "\n", + "Epoch 00247: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2135 - val_loss: 2.4650\n", + "\n", + "Epoch 00247: val_loss did not improve from 2.42576\n", + "Epoch 248/500\n", + "\n", + "Epoch 00248: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.2765 - val_loss: 2.4521\n", + "\n", + "Epoch 00248: val_loss did not improve from 2.42576\n", + "Epoch 249/500\n", + "\n", + "Epoch 00249: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2387 - val_loss: 2.4847\n", + "\n", + "Epoch 00249: val_loss did not improve from 2.42576\n", + "Epoch 250/500\n", + "\n", + "Epoch 00250: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.1875 - val_loss: 2.4815\n", + "\n", + "Epoch 00250: val_loss did not improve from 2.42576\n", + "Epoch 251/500\n", + "\n", + "Epoch 00251: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1833 - val_loss: 2.4569\n", + "\n", + "Epoch 00251: val_loss did not improve from 2.42576\n", + "Epoch 252/500\n", + "\n", + "Epoch 00252: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2238 - val_loss: 2.4827\n", + "\n", + "Epoch 00252: val_loss did not improve from 2.42576\n", + "Epoch 253/500\n", + "\n", + "Epoch 00253: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2501 - val_loss: 2.4717\n", + "\n", + "Epoch 00253: val_loss did not improve from 2.42576\n", + "Epoch 254/500\n", + "\n", + "Epoch 00254: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1308 - val_loss: 2.4734\n", + "\n", + "Epoch 00254: val_loss did not improve from 2.42576\n", + "Epoch 255/500\n", + "\n", + "Epoch 00255: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 802ms/step - loss: 2.1565 - val_loss: 2.4662\n", + "\n", + "Epoch 00255: val_loss did not improve from 2.42576\n", + "Epoch 256/500\n", + "\n", + "Epoch 00256: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.2195 - val_loss: 2.4509\n", + "\n", + "Epoch 00256: val_loss did not improve from 2.42576\n", + "Epoch 257/500\n", + "\n", + "Epoch 00257: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.2483 - val_loss: 2.4529\n", + "\n", + "Epoch 00257: val_loss did not improve from 2.42576\n", + "Epoch 258/500\n", + "\n", + "Epoch 00258: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.1620 - val_loss: 2.4285\n", + "\n", + "Epoch 00258: val_loss did not improve from 2.42576\n", + "Epoch 259/500\n", + "\n", + "Epoch 00259: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 801ms/step - loss: 2.1740 - val_loss: 2.4242\n", + "\n", + "Epoch 00259: val_loss improved from 2.42576 to 2.42424, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 260/500\n", + "\n", + "Epoch 00260: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 796ms/step - loss: 2.1539 - val_loss: 2.4161\n", + "\n", + "Epoch 00260: val_loss improved from 2.42424 to 2.41611, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 261/500\n", + "\n", + "Epoch 00261: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1664 - val_loss: 2.4259\n", + "\n", + "Epoch 00261: val_loss did not improve from 2.41611\n", + "Epoch 262/500\n", + "\n", + "Epoch 00262: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.2136 - val_loss: 2.4453\n", + "\n", + "Epoch 00262: val_loss did not improve from 2.41611\n", + "Epoch 263/500\n", + "\n", + "Epoch 00263: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.1044 - val_loss: 2.4816\n", + "\n", + "Epoch 00263: val_loss did not improve from 2.41611\n", + "Epoch 264/500\n", + "\n", + "Epoch 00264: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1978 - val_loss: 2.4537\n", + "\n", + "Epoch 00264: val_loss did not improve from 2.41611\n", + "Epoch 265/500\n", + "\n", + "Epoch 00265: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1747 - val_loss: 2.4479\n", + "\n", + "Epoch 00265: val_loss did not improve from 2.41611\n", + "Epoch 266/500\n", + "\n", + "Epoch 00266: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1925 - val_loss: 2.4660\n", + "\n", + "Epoch 00266: val_loss did not improve from 2.41611\n", + "Epoch 267/500\n", + "\n", + "Epoch 00267: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1858 - val_loss: 2.4388\n", + "\n", + "Epoch 00267: val_loss did not improve from 2.41611\n", + "Epoch 268/500\n", + "\n", + "Epoch 00268: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.2165 - val_loss: 2.4604\n", + "\n", + "Epoch 00268: val_loss did not improve from 2.41611\n", + "Epoch 269/500\n", + "\n", + "Epoch 00269: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.2140 - val_loss: 2.4490\n", + "\n", + "Epoch 00269: val_loss did not improve from 2.41611\n", + "Epoch 270/500\n", + "\n", + "Epoch 00270: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1678 - val_loss: 2.4427\n", + "\n", + "Epoch 00270: val_loss did not improve from 2.41611\n", + "Epoch 271/500\n", + "\n", + "Epoch 00271: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.2064 - val_loss: 2.4158\n", + "\n", + "Epoch 00271: val_loss improved from 2.41611 to 2.41575, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 272/500\n", + "\n", + "Epoch 00272: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1161 - val_loss: 2.4396\n", + "\n", + "Epoch 00272: val_loss did not improve from 2.41575\n", + "Epoch 273/500\n", + "\n", + "Epoch 00273: LearningRateScheduler setting learning rate to 1e-05.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "100/100 [==============================] - 80s 799ms/step - loss: 2.1986 - val_loss: 2.4483\n", + "\n", + "Epoch 00273: val_loss did not improve from 2.41575\n", + "Epoch 274/500\n", + "\n", + "Epoch 00274: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1965 - val_loss: 2.4307\n", + "\n", + "Epoch 00274: val_loss did not improve from 2.41575\n", + "Epoch 275/500\n", + "\n", + "Epoch 00275: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 796ms/step - loss: 2.2041 - val_loss: 2.4547\n", + "\n", + "Epoch 00275: val_loss did not improve from 2.41575\n", + "Epoch 276/500\n", + "\n", + "Epoch 00276: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.2368 - val_loss: 2.4124\n", + "\n", + "Epoch 00276: val_loss improved from 2.41575 to 2.41239, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 277/500\n", + "\n", + "Epoch 00277: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1827 - val_loss: 2.4110\n", + "\n", + "Epoch 00277: val_loss improved from 2.41239 to 2.41095, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 278/500\n", + "\n", + "Epoch 00278: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.1829 - val_loss: 2.4286\n", + "\n", + "Epoch 00278: val_loss did not improve from 2.41095\n", + "Epoch 279/500\n", + "\n", + "Epoch 00279: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.2048 - val_loss: 2.4266\n", + "\n", + "Epoch 00279: val_loss did not improve from 2.41095\n", + "Epoch 280/500\n", + "\n", + "Epoch 00280: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1765 - val_loss: 2.4449\n", + "\n", + "Epoch 00280: val_loss did not improve from 2.41095\n", + "Epoch 281/500\n", + "\n", + "Epoch 00281: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 796ms/step - loss: 2.1752 - val_loss: 2.4267\n", + "\n", + "Epoch 00281: val_loss did not improve from 2.41095\n", + "Epoch 282/500\n", + "\n", + "Epoch 00282: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1497 - val_loss: 2.4363\n", + "\n", + "Epoch 00282: val_loss did not improve from 2.41095\n", + "Epoch 283/500\n", + "\n", + "Epoch 00283: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.1504 - val_loss: 2.4205\n", + "\n", + "Epoch 00283: val_loss did not improve from 2.41095\n", + "Epoch 284/500\n", + "\n", + "Epoch 00284: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 796ms/step - loss: 2.1331 - val_loss: 2.4749\n", + "\n", + "Epoch 00284: val_loss did not improve from 2.41095\n", + "Epoch 285/500\n", + "\n", + "Epoch 00285: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1746 - val_loss: 2.4471\n", + "\n", + "Epoch 00285: val_loss did not improve from 2.41095\n", + "Epoch 286/500\n", + "\n", + "Epoch 00286: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1463 - val_loss: 2.4463\n", + "\n", + "Epoch 00286: val_loss did not improve from 2.41095\n", + "Epoch 287/500\n", + "\n", + "Epoch 00287: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 796ms/step - loss: 2.1568 - val_loss: 2.4754\n", + "\n", + "Epoch 00287: val_loss did not improve from 2.41095\n", + "Epoch 288/500\n", + "\n", + "Epoch 00288: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1094 - val_loss: 2.4033\n", + "\n", + "Epoch 00288: val_loss improved from 2.41095 to 2.40333, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 289/500\n", + "\n", + "Epoch 00289: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1294 - val_loss: 2.4347\n", + "\n", + "Epoch 00289: val_loss did not improve from 2.40333\n", + "Epoch 290/500\n", + "\n", + "Epoch 00290: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 796ms/step - loss: 2.1655 - val_loss: 2.4334\n", + "\n", + "Epoch 00290: val_loss did not improve from 2.40333\n", + "Epoch 291/500\n", + "\n", + "Epoch 00291: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1800 - val_loss: 2.4345\n", + "\n", + "Epoch 00291: val_loss did not improve from 2.40333\n", + "Epoch 292/500\n", + "\n", + "Epoch 00292: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1725 - val_loss: 2.4484\n", + "\n", + "Epoch 00292: val_loss did not improve from 2.40333\n", + "Epoch 293/500\n", + "\n", + "Epoch 00293: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 796ms/step - loss: 2.2019 - val_loss: 2.4093\n", + "\n", + "Epoch 00293: val_loss did not improve from 2.40333\n", + "Epoch 294/500\n", + "\n", + "Epoch 00294: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1663 - val_loss: 2.4833\n", + "\n", + "Epoch 00294: val_loss did not improve from 2.40333\n", + "Epoch 295/500\n", + "\n", + "Epoch 00295: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.1420 - val_loss: 2.4724\n", + "\n", + "Epoch 00295: val_loss did not improve from 2.40333\n", + "Epoch 296/500\n", + "\n", + "Epoch 00296: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 796ms/step - loss: 2.0923 - val_loss: 2.4685\n", + "\n", + "Epoch 00296: val_loss did not improve from 2.40333\n", + "Epoch 297/500\n", + "\n", + "Epoch 00297: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1342 - val_loss: 2.4261\n", + "\n", + "Epoch 00297: val_loss did not improve from 2.40333\n", + "Epoch 298/500\n", + "\n", + "Epoch 00298: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1697 - val_loss: 2.4211\n", + "\n", + "Epoch 00298: val_loss did not improve from 2.40333\n", + "Epoch 299/500\n", + "\n", + "Epoch 00299: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1354 - val_loss: 2.4090\n", + "\n", + "Epoch 00299: val_loss did not improve from 2.40333\n", + "Epoch 300/500\n", + "\n", + "Epoch 00300: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1326 - val_loss: 2.4623\n", + "\n", + "Epoch 00300: val_loss did not improve from 2.40333\n", + "Epoch 301/500\n", + "\n", + "Epoch 00301: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1773 - val_loss: 2.4599\n", + "\n", + "Epoch 00301: val_loss did not improve from 2.40333\n", + "Epoch 302/500\n", + "\n", + "Epoch 00302: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.1958 - val_loss: 2.4428\n", + "\n", + "Epoch 00302: val_loss did not improve from 2.40333\n", + "Epoch 303/500\n", + "\n", + "Epoch 00303: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0970 - val_loss: 2.4402\n", + "\n", + "Epoch 00303: val_loss did not improve from 2.40333\n", + "Epoch 304/500\n", + "\n", + "Epoch 00304: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1320 - val_loss: 2.4646\n", + "\n", + "Epoch 00304: val_loss did not improve from 2.40333\n", + "Epoch 305/500\n", + "\n", + "Epoch 00305: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.1519 - val_loss: 2.4214\n", + "\n", + "Epoch 00305: val_loss did not improve from 2.40333\n", + "Epoch 306/500\n", + "\n", + "Epoch 00306: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1758 - val_loss: 2.4445\n", + "\n", + "Epoch 00306: val_loss did not improve from 2.40333\n", + "Epoch 307/500\n", + "\n", + "Epoch 00307: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1346 - val_loss: 2.4386\n", + "\n", + "Epoch 00307: val_loss did not improve from 2.40333\n", + "Epoch 308/500\n", + "\n", + "Epoch 00308: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.0994 - val_loss: 2.4430\n", + "\n", + "Epoch 00308: val_loss did not improve from 2.40333\n", + "Epoch 309/500\n", + "\n", + "Epoch 00309: LearningRateScheduler setting learning rate to 1e-05.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "100/100 [==============================] - 80s 798ms/step - loss: 2.0948 - val_loss: 2.4618\n", + "\n", + "Epoch 00309: val_loss did not improve from 2.40333\n", + "Epoch 310/500\n", + "\n", + "Epoch 00310: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1108 - val_loss: 2.4125\n", + "\n", + "Epoch 00310: val_loss did not improve from 2.40333\n", + "Epoch 311/500\n", + "\n", + "Epoch 00311: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1230 - val_loss: 2.4152\n", + "\n", + "Epoch 00311: val_loss did not improve from 2.40333\n", + "Epoch 312/500\n", + "\n", + "Epoch 00312: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1885 - val_loss: 2.3989\n", + "\n", + "Epoch 00312: val_loss improved from 2.40333 to 2.39892, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 313/500\n", + "\n", + "Epoch 00313: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1826 - val_loss: 2.4245\n", + "\n", + "Epoch 00313: val_loss did not improve from 2.39892\n", + "Epoch 314/500\n", + "\n", + "Epoch 00314: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.1490 - val_loss: 2.4514\n", + "\n", + "Epoch 00314: val_loss did not improve from 2.39892\n", + "Epoch 315/500\n", + "\n", + "Epoch 00315: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1385 - val_loss: 2.4354\n", + "\n", + "Epoch 00315: val_loss did not improve from 2.39892\n", + "Epoch 316/500\n", + "\n", + "Epoch 00316: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1267 - val_loss: 2.4204\n", + "\n", + "Epoch 00316: val_loss did not improve from 2.39892\n", + "Epoch 317/500\n", + "\n", + "Epoch 00317: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.1377 - val_loss: 2.3919\n", + "\n", + "Epoch 00317: val_loss improved from 2.39892 to 2.39192, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 318/500\n", + "\n", + "Epoch 00318: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1597 - val_loss: 2.4013\n", + "\n", + "Epoch 00318: val_loss did not improve from 2.39192\n", + "Epoch 319/500\n", + "\n", + "Epoch 00319: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1371 - val_loss: 2.4382\n", + "\n", + "Epoch 00319: val_loss did not improve from 2.39192\n", + "Epoch 320/500\n", + "\n", + "Epoch 00320: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1655 - val_loss: 2.3971\n", + "\n", + "Epoch 00320: val_loss did not improve from 2.39192\n", + "Epoch 321/500\n", + "\n", + "Epoch 00321: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1480 - val_loss: 2.3843\n", + "\n", + "Epoch 00321: val_loss improved from 2.39192 to 2.38430, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 322/500\n", + "\n", + "Epoch 00322: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1041 - val_loss: 2.4188\n", + "\n", + "Epoch 00322: val_loss did not improve from 2.38430\n", + "Epoch 323/500\n", + "\n", + "Epoch 00323: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.1651 - val_loss: 2.3858\n", + "\n", + "Epoch 00323: val_loss did not improve from 2.38430\n", + "Epoch 324/500\n", + "\n", + "Epoch 00324: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1550 - val_loss: 2.3975\n", + "\n", + "Epoch 00324: val_loss did not improve from 2.38430\n", + "Epoch 325/500\n", + "\n", + "Epoch 00325: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1363 - val_loss: 2.4203\n", + "\n", + "Epoch 00325: val_loss did not improve from 2.38430\n", + "Epoch 326/500\n", + "\n", + "Epoch 00326: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1205 - val_loss: 2.4071\n", + "\n", + "Epoch 00326: val_loss did not improve from 2.38430\n", + "Epoch 327/500\n", + "\n", + "Epoch 00327: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0460 - val_loss: 2.4163\n", + "\n", + "Epoch 00327: val_loss did not improve from 2.38430\n", + "Epoch 328/500\n", + "\n", + "Epoch 00328: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1368 - val_loss: 2.4043\n", + "\n", + "Epoch 00328: val_loss did not improve from 2.38430\n", + "Epoch 329/500\n", + "\n", + "Epoch 00329: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.1165 - val_loss: 2.4102\n", + "\n", + "Epoch 00329: val_loss did not improve from 2.38430\n", + "Epoch 330/500\n", + "\n", + "Epoch 00330: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0698 - val_loss: 2.4503\n", + "\n", + "Epoch 00330: val_loss did not improve from 2.38430\n", + "Epoch 331/500\n", + "\n", + "Epoch 00331: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1698 - val_loss: 2.3874\n", + "\n", + "Epoch 00331: val_loss did not improve from 2.38430\n", + "Epoch 332/500\n", + "\n", + "Epoch 00332: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0500 - val_loss: 2.4484\n", + "\n", + "Epoch 00332: val_loss did not improve from 2.38430\n", + "Epoch 333/500\n", + "\n", + "Epoch 00333: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1160 - val_loss: 2.4091\n", + "\n", + "Epoch 00333: val_loss did not improve from 2.38430\n", + "Epoch 334/500\n", + "\n", + "Epoch 00334: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1460 - val_loss: 2.4595\n", + "\n", + "Epoch 00334: val_loss did not improve from 2.38430\n", + "Epoch 335/500\n", + "\n", + "Epoch 00335: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1175 - val_loss: 2.4163\n", + "\n", + "Epoch 00335: val_loss did not improve from 2.38430\n", + "Epoch 336/500\n", + "\n", + "Epoch 00336: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1029 - val_loss: 2.4175\n", + "\n", + "Epoch 00336: val_loss did not improve from 2.38430\n", + "Epoch 337/500\n", + "\n", + "Epoch 00337: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1533 - val_loss: 2.4147\n", + "\n", + "Epoch 00337: val_loss did not improve from 2.38430\n", + "Epoch 338/500\n", + "\n", + "Epoch 00338: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1118 - val_loss: 2.4363\n", + "\n", + "Epoch 00338: val_loss did not improve from 2.38430\n", + "Epoch 339/500\n", + "\n", + "Epoch 00339: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1367 - val_loss: 2.4138\n", + "\n", + "Epoch 00339: val_loss did not improve from 2.38430\n", + "Epoch 340/500\n", + "\n", + "Epoch 00340: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0941 - val_loss: 2.4105\n", + "\n", + "Epoch 00340: val_loss did not improve from 2.38430\n", + "Epoch 341/500\n", + "\n", + "Epoch 00341: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0905 - val_loss: 2.4222\n", + "\n", + "Epoch 00341: val_loss did not improve from 2.38430\n", + "Epoch 342/500\n", + "\n", + "Epoch 00342: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0956 - val_loss: 2.4531\n", + "\n", + "Epoch 00342: val_loss did not improve from 2.38430\n", + "Epoch 343/500\n", + "\n", + "Epoch 00343: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1527 - val_loss: 2.4406\n", + "\n", + "Epoch 00343: val_loss did not improve from 2.38430\n", + "Epoch 344/500\n", + "\n", + "Epoch 00344: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.1195 - val_loss: 2.4184\n", + "\n", + "Epoch 00344: val_loss did not improve from 2.38430\n", + "Epoch 345/500\n", + "\n", + "Epoch 00345: LearningRateScheduler setting learning rate to 1e-05.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "100/100 [==============================] - 80s 799ms/step - loss: 2.1247 - val_loss: 2.3930\n", + "\n", + "Epoch 00345: val_loss did not improve from 2.38430\n", + "Epoch 346/500\n", + "\n", + "Epoch 00346: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0853 - val_loss: 2.4446\n", + "\n", + "Epoch 00346: val_loss did not improve from 2.38430\n", + "Epoch 347/500\n", + "\n", + "Epoch 00347: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0962 - val_loss: 2.3987\n", + "\n", + "Epoch 00347: val_loss did not improve from 2.38430\n", + "Epoch 348/500\n", + "\n", + "Epoch 00348: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0868 - val_loss: 2.4302\n", + "\n", + "Epoch 00348: val_loss did not improve from 2.38430\n", + "Epoch 349/500\n", + "\n", + "Epoch 00349: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1354 - val_loss: 2.4145\n", + "\n", + "Epoch 00349: val_loss did not improve from 2.38430\n", + "Epoch 350/500\n", + "\n", + "Epoch 00350: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0808 - val_loss: 2.4016\n", + "\n", + "Epoch 00350: val_loss did not improve from 2.38430\n", + "Epoch 351/500\n", + "\n", + "Epoch 00351: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0929 - val_loss: 2.4298\n", + "\n", + "Epoch 00351: val_loss did not improve from 2.38430\n", + "Epoch 352/500\n", + "\n", + "Epoch 00352: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1178 - val_loss: 2.3901\n", + "\n", + "Epoch 00352: val_loss did not improve from 2.38430\n", + "Epoch 353/500\n", + "\n", + "Epoch 00353: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0798 - val_loss: 2.4365\n", + "\n", + "Epoch 00353: val_loss did not improve from 2.38430\n", + "Epoch 354/500\n", + "\n", + "Epoch 00354: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0993 - val_loss: 2.4296\n", + "\n", + "Epoch 00354: val_loss did not improve from 2.38430\n", + "Epoch 355/500\n", + "\n", + "Epoch 00355: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0661 - val_loss: 2.4054\n", + "\n", + "Epoch 00355: val_loss did not improve from 2.38430\n", + "Epoch 356/500\n", + "\n", + "Epoch 00356: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0964 - val_loss: 2.4211\n", + "\n", + "Epoch 00356: val_loss did not improve from 2.38430\n", + "Epoch 357/500\n", + "\n", + "Epoch 00357: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1526 - val_loss: 2.4048\n", + "\n", + "Epoch 00357: val_loss did not improve from 2.38430\n", + "Epoch 358/500\n", + "\n", + "Epoch 00358: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0620 - val_loss: 2.4042\n", + "\n", + "Epoch 00358: val_loss did not improve from 2.38430\n", + "Epoch 359/500\n", + "\n", + "Epoch 00359: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0800 - val_loss: 2.4167\n", + "\n", + "Epoch 00359: val_loss did not improve from 2.38430\n", + "Epoch 360/500\n", + "\n", + "Epoch 00360: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1386 - val_loss: 2.4166\n", + "\n", + "Epoch 00360: val_loss did not improve from 2.38430\n", + "Epoch 361/500\n", + "\n", + "Epoch 00361: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0717 - val_loss: 2.3937\n", + "\n", + "Epoch 00361: val_loss did not improve from 2.38430\n", + "Epoch 362/500\n", + "\n", + "Epoch 00362: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1056 - val_loss: 2.3814\n", + "\n", + "Epoch 00362: val_loss improved from 2.38430 to 2.38138, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 363/500\n", + "\n", + "Epoch 00363: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0884 - val_loss: 2.3623\n", + "\n", + "Epoch 00363: val_loss improved from 2.38138 to 2.36234, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 364/500\n", + "\n", + "Epoch 00364: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0678 - val_loss: 2.3943\n", + "\n", + "Epoch 00364: val_loss did not improve from 2.36234\n", + "Epoch 365/500\n", + "\n", + "Epoch 00365: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0853 - val_loss: 2.4385\n", + "\n", + "Epoch 00365: val_loss did not improve from 2.36234\n", + "Epoch 366/500\n", + "\n", + "Epoch 00366: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1190 - val_loss: 2.4279\n", + "\n", + "Epoch 00366: val_loss did not improve from 2.36234\n", + "Epoch 367/500\n", + "\n", + "Epoch 00367: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1034 - val_loss: 2.3857\n", + "\n", + "Epoch 00367: val_loss did not improve from 2.36234\n", + "Epoch 368/500\n", + "\n", + "Epoch 00368: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0699 - val_loss: 2.4007\n", + "\n", + "Epoch 00368: val_loss did not improve from 2.36234\n", + "Epoch 369/500\n", + "\n", + "Epoch 00369: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1649 - val_loss: 2.3795\n", + "\n", + "Epoch 00369: val_loss did not improve from 2.36234\n", + "Epoch 370/500\n", + "\n", + "Epoch 00370: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0986 - val_loss: 2.3778\n", + "\n", + "Epoch 00370: val_loss did not improve from 2.36234\n", + "Epoch 371/500\n", + "\n", + "Epoch 00371: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1006 - val_loss: 2.4018\n", + "\n", + "Epoch 00371: val_loss did not improve from 2.36234\n", + "Epoch 372/500\n", + "\n", + "Epoch 00372: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0778 - val_loss: 2.3997\n", + "\n", + "Epoch 00372: val_loss did not improve from 2.36234\n", + "Epoch 373/500\n", + "\n", + "Epoch 00373: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0916 - val_loss: 2.4090\n", + "\n", + "Epoch 00373: val_loss did not improve from 2.36234\n", + "Epoch 374/500\n", + "\n", + "Epoch 00374: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0937 - val_loss: 2.4017\n", + "\n", + "Epoch 00374: val_loss did not improve from 2.36234\n", + "Epoch 375/500\n", + "\n", + "Epoch 00375: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0926 - val_loss: 2.3936\n", + "\n", + "Epoch 00375: val_loss did not improve from 2.36234\n", + "Epoch 376/500\n", + "\n", + "Epoch 00376: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1085 - val_loss: 2.4056\n", + "\n", + "Epoch 00376: val_loss did not improve from 2.36234\n", + "Epoch 377/500\n", + "\n", + "Epoch 00377: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.0807 - val_loss: 2.4472\n", + "\n", + "Epoch 00377: val_loss did not improve from 2.36234\n", + "Epoch 378/500\n", + "\n", + "Epoch 00378: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0767 - val_loss: 2.3985\n", + "\n", + "Epoch 00378: val_loss did not improve from 2.36234\n", + "Epoch 379/500\n", + "\n", + "Epoch 00379: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1006 - val_loss: 2.3763\n", + "\n", + "Epoch 00379: val_loss did not improve from 2.36234\n", + "Epoch 380/500\n", + "\n", + "Epoch 00380: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0999 - val_loss: 2.3724\n", + "\n", + "Epoch 00380: val_loss did not improve from 2.36234\n", + "Epoch 381/500\n", + "\n", + "Epoch 00381: LearningRateScheduler setting learning rate to 1e-05.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "100/100 [==============================] - 80s 799ms/step - loss: 2.0636 - val_loss: 2.3865\n", + "\n", + "Epoch 00381: val_loss did not improve from 2.36234\n", + "Epoch 382/500\n", + "\n", + "Epoch 00382: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 1.9868 - val_loss: 2.3687\n", + "\n", + "Epoch 00382: val_loss did not improve from 2.36234\n", + "Epoch 383/500\n", + "\n", + "Epoch 00383: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0264 - val_loss: 2.3919\n", + "\n", + "Epoch 00383: val_loss did not improve from 2.36234\n", + "Epoch 384/500\n", + "\n", + "Epoch 00384: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1080 - val_loss: 2.3729\n", + "\n", + "Epoch 00384: val_loss did not improve from 2.36234\n", + "Epoch 385/500\n", + "\n", + "Epoch 00385: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1055 - val_loss: 2.3540\n", + "\n", + "Epoch 00385: val_loss improved from 2.36234 to 2.35402, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 386/500\n", + "\n", + "Epoch 00386: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0652 - val_loss: 2.3670\n", + "\n", + "Epoch 00386: val_loss did not improve from 2.35402\n", + "Epoch 387/500\n", + "\n", + "Epoch 00387: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1103 - val_loss: 2.3755\n", + "\n", + "Epoch 00387: val_loss did not improve from 2.35402\n", + "Epoch 388/500\n", + "\n", + "Epoch 00388: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0526 - val_loss: 2.3800\n", + "\n", + "Epoch 00388: val_loss did not improve from 2.35402\n", + "Epoch 389/500\n", + "\n", + "Epoch 00389: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0536 - val_loss: 2.3885\n", + "\n", + "Epoch 00389: val_loss did not improve from 2.35402\n", + "Epoch 390/500\n", + "\n", + "Epoch 00390: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1088 - val_loss: 2.3599\n", + "\n", + "Epoch 00390: val_loss did not improve from 2.35402\n", + "Epoch 391/500\n", + "\n", + "Epoch 00391: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0575 - val_loss: 2.3714\n", + "\n", + "Epoch 00391: val_loss did not improve from 2.35402\n", + "Epoch 392/500\n", + "\n", + "Epoch 00392: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0073 - val_loss: 2.4156\n", + "\n", + "Epoch 00392: val_loss did not improve from 2.35402\n", + "Epoch 393/500\n", + "\n", + "Epoch 00393: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1487 - val_loss: 2.3745\n", + "\n", + "Epoch 00393: val_loss did not improve from 2.35402\n", + "Epoch 394/500\n", + "\n", + "Epoch 00394: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0446 - val_loss: 2.3935\n", + "\n", + "Epoch 00394: val_loss did not improve from 2.35402\n", + "Epoch 395/500\n", + "\n", + "Epoch 00395: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1099 - val_loss: 2.4126\n", + "\n", + "Epoch 00395: val_loss did not improve from 2.35402\n", + "Epoch 396/500\n", + "\n", + "Epoch 00396: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0329 - val_loss: 2.4481\n", + "\n", + "Epoch 00396: val_loss did not improve from 2.35402\n", + "Epoch 397/500\n", + "\n", + "Epoch 00397: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0798 - val_loss: 2.3902\n", + "\n", + "Epoch 00397: val_loss did not improve from 2.35402\n", + "Epoch 398/500\n", + "\n", + "Epoch 00398: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.1269 - val_loss: 2.4099\n", + "\n", + "Epoch 00398: val_loss did not improve from 2.35402\n", + "Epoch 399/500\n", + "\n", + "Epoch 00399: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0647 - val_loss: 2.3668\n", + "\n", + "Epoch 00399: val_loss did not improve from 2.35402\n", + "Epoch 400/500\n", + "\n", + "Epoch 00400: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0623 - val_loss: 2.3641\n", + "\n", + "Epoch 00400: val_loss did not improve from 2.35402\n", + "Epoch 401/500\n", + "\n", + "Epoch 00401: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0806 - val_loss: 2.4077\n", + "\n", + "Epoch 00401: val_loss did not improve from 2.35402\n", + "Epoch 402/500\n", + "\n", + "Epoch 00402: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0625 - val_loss: 2.3878\n", + "\n", + "Epoch 00402: val_loss did not improve from 2.35402\n", + "Epoch 403/500\n", + "\n", + "Epoch 00403: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0204 - val_loss: 2.3818\n", + "\n", + "Epoch 00403: val_loss did not improve from 2.35402\n", + "Epoch 404/500\n", + "\n", + "Epoch 00404: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0725 - val_loss: 2.4053\n", + "\n", + "Epoch 00404: val_loss did not improve from 2.35402\n", + "Epoch 405/500\n", + "\n", + "Epoch 00405: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0589 - val_loss: 2.4102\n", + "\n", + "Epoch 00405: val_loss did not improve from 2.35402\n", + "Epoch 406/500\n", + "\n", + "Epoch 00406: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0186 - val_loss: 2.3415\n", + "\n", + "Epoch 00406: val_loss improved from 2.35402 to 2.34146, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 407/500\n", + "\n", + "Epoch 00407: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0659 - val_loss: 2.3922\n", + "\n", + "Epoch 00407: val_loss did not improve from 2.34146\n", + "Epoch 408/500\n", + "\n", + "Epoch 00408: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0959 - val_loss: 2.3671\n", + "\n", + "Epoch 00408: val_loss did not improve from 2.34146\n", + "Epoch 409/500\n", + "\n", + "Epoch 00409: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0579 - val_loss: 2.4155\n", + "\n", + "Epoch 00409: val_loss did not improve from 2.34146\n", + "Epoch 410/500\n", + "\n", + "Epoch 00410: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0830 - val_loss: 2.3769\n", + "\n", + "Epoch 00410: val_loss did not improve from 2.34146\n", + "Epoch 411/500\n", + "\n", + "Epoch 00411: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0422 - val_loss: 2.4380\n", + "\n", + "Epoch 00411: val_loss did not improve from 2.34146\n", + "Epoch 412/500\n", + "\n", + "Epoch 00412: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0299 - val_loss: 2.3743\n", + "\n", + "Epoch 00412: val_loss did not improve from 2.34146\n", + "Epoch 413/500\n", + "\n", + "Epoch 00413: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.0745 - val_loss: 2.4274\n", + "\n", + "Epoch 00413: val_loss did not improve from 2.34146\n", + "Epoch 414/500\n", + "\n", + "Epoch 00414: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.1003 - val_loss: 2.3616\n", + "\n", + "Epoch 00414: val_loss did not improve from 2.34146\n", + "Epoch 415/500\n", + "\n", + "Epoch 00415: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0427 - val_loss: 2.3785\n", + "\n", + "Epoch 00415: val_loss did not improve from 2.34146\n", + "Epoch 416/500\n", + "\n", + "Epoch 00416: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0771 - val_loss: 2.3834\n", + "\n", + "Epoch 00416: val_loss did not improve from 2.34146\n", + "Epoch 417/500\n", + "\n", + "Epoch 00417: LearningRateScheduler setting learning rate to 1e-05.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "100/100 [==============================] - 80s 799ms/step - loss: 2.0350 - val_loss: 2.3746\n", + "\n", + "Epoch 00417: val_loss did not improve from 2.34146\n", + "Epoch 418/500\n", + "\n", + "Epoch 00418: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0556 - val_loss: 2.3677\n", + "\n", + "Epoch 00418: val_loss did not improve from 2.34146\n", + "Epoch 419/500\n", + "\n", + "Epoch 00419: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.1044 - val_loss: 2.3601\n", + "\n", + "Epoch 00419: val_loss did not improve from 2.34146\n", + "Epoch 420/500\n", + "\n", + "Epoch 00420: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0146 - val_loss: 2.3894\n", + "\n", + "Epoch 00420: val_loss did not improve from 2.34146\n", + "Epoch 421/500\n", + "\n", + "Epoch 00421: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0476 - val_loss: 2.3624\n", + "\n", + "Epoch 00421: val_loss did not improve from 2.34146\n", + "Epoch 422/500\n", + "\n", + "Epoch 00422: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0478 - val_loss: 2.3906\n", + "\n", + "Epoch 00422: val_loss did not improve from 2.34146\n", + "Epoch 423/500\n", + "\n", + "Epoch 00423: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0872 - val_loss: 2.3948\n", + "\n", + "Epoch 00423: val_loss did not improve from 2.34146\n", + "Epoch 424/500\n", + "\n", + "Epoch 00424: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0461 - val_loss: 2.3858\n", + "\n", + "Epoch 00424: val_loss did not improve from 2.34146\n", + "Epoch 425/500\n", + "\n", + "Epoch 00425: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0589 - val_loss: 2.4122\n", + "\n", + "Epoch 00425: val_loss did not improve from 2.34146\n", + "Epoch 426/500\n", + "\n", + "Epoch 00426: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0172 - val_loss: 2.3670\n", + "\n", + "Epoch 00426: val_loss did not improve from 2.34146\n", + "Epoch 427/500\n", + "\n", + "Epoch 00427: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0098 - val_loss: 2.3985\n", + "\n", + "Epoch 00427: val_loss did not improve from 2.34146\n", + "Epoch 428/500\n", + "\n", + "Epoch 00428: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0527 - val_loss: 2.3992\n", + "\n", + "Epoch 00428: val_loss did not improve from 2.34146\n", + "Epoch 429/500\n", + "\n", + "Epoch 00429: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0470 - val_loss: 2.4607\n", + "\n", + "Epoch 00429: val_loss did not improve from 2.34146\n", + "Epoch 430/500\n", + "\n", + "Epoch 00430: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0599 - val_loss: 2.4135\n", + "\n", + "Epoch 00430: val_loss did not improve from 2.34146\n", + "Epoch 431/500\n", + "\n", + "Epoch 00431: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0177 - val_loss: 2.3874\n", + "\n", + "Epoch 00431: val_loss did not improve from 2.34146\n", + "Epoch 432/500\n", + "\n", + "Epoch 00432: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 1.9960 - val_loss: 2.4020\n", + "\n", + "Epoch 00432: val_loss did not improve from 2.34146\n", + "Epoch 433/500\n", + "\n", + "Epoch 00433: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0111 - val_loss: 2.3511\n", + "\n", + "Epoch 00433: val_loss did not improve from 2.34146\n", + "Epoch 434/500\n", + "\n", + "Epoch 00434: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0170 - val_loss: 2.3787\n", + "\n", + "Epoch 00434: val_loss did not improve from 2.34146\n", + "Epoch 435/500\n", + "\n", + "Epoch 00435: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 1.9859 - val_loss: 2.3683\n", + "\n", + "Epoch 00435: val_loss did not improve from 2.34146\n", + "Epoch 436/500\n", + "\n", + "Epoch 00436: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0651 - val_loss: 2.3521\n", + "\n", + "Epoch 00436: val_loss did not improve from 2.34146\n", + "Epoch 437/500\n", + "\n", + "Epoch 00437: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0509 - val_loss: 2.3714\n", + "\n", + "Epoch 00437: val_loss did not improve from 2.34146\n", + "Epoch 438/500\n", + "\n", + "Epoch 00438: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0418 - val_loss: 2.3868\n", + "\n", + "Epoch 00438: val_loss did not improve from 2.34146\n", + "Epoch 439/500\n", + "\n", + "Epoch 00439: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0409 - val_loss: 2.3806\n", + "\n", + "Epoch 00439: val_loss did not improve from 2.34146\n", + "Epoch 440/500\n", + "\n", + "Epoch 00440: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.1010 - val_loss: 2.3676\n", + "\n", + "Epoch 00440: val_loss did not improve from 2.34146\n", + "Epoch 441/500\n", + "\n", + "Epoch 00441: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0553 - val_loss: 2.4117\n", + "\n", + "Epoch 00441: val_loss did not improve from 2.34146\n", + "Epoch 442/500\n", + "\n", + "Epoch 00442: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 1.9936 - val_loss: 2.4315\n", + "\n", + "Epoch 00442: val_loss did not improve from 2.34146\n", + "Epoch 443/500\n", + "\n", + "Epoch 00443: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0162 - val_loss: 2.4066\n", + "\n", + "Epoch 00443: val_loss did not improve from 2.34146\n", + "Epoch 444/500\n", + "\n", + "Epoch 00444: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0278 - val_loss: 2.4046\n", + "\n", + "Epoch 00444: val_loss did not improve from 2.34146\n", + "Epoch 445/500\n", + "\n", + "Epoch 00445: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0602 - val_loss: 2.3750\n", + "\n", + "Epoch 00445: val_loss did not improve from 2.34146\n", + "Epoch 446/500\n", + "\n", + "Epoch 00446: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0345 - val_loss: 2.3787\n", + "\n", + "Epoch 00446: val_loss did not improve from 2.34146\n", + "Epoch 447/500\n", + "\n", + "Epoch 00447: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0533 - val_loss: 2.3742\n", + "\n", + "Epoch 00447: val_loss did not improve from 2.34146\n", + "Epoch 448/500\n", + "\n", + "Epoch 00448: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0464 - val_loss: 2.3607\n", + "\n", + "Epoch 00448: val_loss did not improve from 2.34146\n", + "Epoch 449/500\n", + "\n", + "Epoch 00449: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0469 - val_loss: 2.3640\n", + "\n", + "Epoch 00449: val_loss did not improve from 2.34146\n", + "Epoch 450/500\n", + "\n", + "Epoch 00450: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0163 - val_loss: 2.4275\n", + "\n", + "Epoch 00450: val_loss did not improve from 2.34146\n", + "Epoch 451/500\n", + "\n", + "Epoch 00451: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0045 - val_loss: 2.3684\n", + "\n", + "Epoch 00451: val_loss did not improve from 2.34146\n", + "Epoch 452/500\n", + "\n", + "Epoch 00452: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0544 - val_loss: 2.3581\n", + "\n", + "Epoch 00452: val_loss did not improve from 2.34146\n", + "Epoch 453/500\n", + "\n", + "Epoch 00453: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0316 - val_loss: 2.3709\n", + "\n", + "Epoch 00453: val_loss did not improve from 2.34146\n", + "Epoch 454/500\n", + "\n", + "Epoch 00454: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0651 - val_loss: 2.3356\n", + "\n", + "Epoch 00454: val_loss improved from 2.34146 to 2.33563, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 455/500\n", + "\n", + "Epoch 00455: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.0227 - val_loss: 2.3851\n", + "\n", + "Epoch 00455: val_loss did not improve from 2.33563\n", + "Epoch 456/500\n", + "\n", + "Epoch 00456: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0243 - val_loss: 2.3436\n", + "\n", + "Epoch 00456: val_loss did not improve from 2.33563\n", + "Epoch 457/500\n", + "\n", + "Epoch 00457: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0296 - val_loss: 2.4156\n", + "\n", + "Epoch 00457: val_loss did not improve from 2.33563\n", + "Epoch 458/500\n", + "\n", + "Epoch 00458: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 1.9851 - val_loss: 2.3550\n", + "\n", + "Epoch 00458: val_loss did not improve from 2.33563\n", + "Epoch 459/500\n", + "\n", + "Epoch 00459: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0508 - val_loss: 2.3610\n", + "\n", + "Epoch 00459: val_loss did not improve from 2.33563\n", + "Epoch 460/500\n", + "\n", + "Epoch 00460: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0041 - val_loss: 2.4261\n", + "\n", + "Epoch 00460: val_loss did not improve from 2.33563\n", + "Epoch 461/500\n", + "\n", + "Epoch 00461: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.0278 - val_loss: 2.3630\n", + "\n", + "Epoch 00461: val_loss did not improve from 2.33563\n", + "Epoch 462/500\n", + "\n", + "Epoch 00462: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 1.9905 - val_loss: 2.3738\n", + "\n", + "Epoch 00462: val_loss did not improve from 2.33563\n", + "Epoch 463/500\n", + "\n", + "Epoch 00463: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0774 - val_loss: 2.3582\n", + "\n", + "Epoch 00463: val_loss did not improve from 2.33563\n", + "Epoch 464/500\n", + "\n", + "Epoch 00464: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 1.9975 - val_loss: 2.3427\n", + "\n", + "Epoch 00464: val_loss did not improve from 2.33563\n", + "Epoch 465/500\n", + "\n", + "Epoch 00465: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0039 - val_loss: 2.4340\n", + "\n", + "Epoch 00465: val_loss did not improve from 2.33563\n", + "Epoch 466/500\n", + "\n", + "Epoch 00466: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0650 - val_loss: 2.3641\n", + "\n", + "Epoch 00466: val_loss did not improve from 2.33563\n", + "Epoch 467/500\n", + "\n", + "Epoch 00467: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 1.9773 - val_loss: 2.3502\n", + "\n", + "Epoch 00467: val_loss did not improve from 2.33563\n", + "Epoch 468/500\n", + "\n", + "Epoch 00468: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 1.9900 - val_loss: 2.3671\n", + "\n", + "Epoch 00468: val_loss did not improve from 2.33563\n", + "Epoch 469/500\n", + "\n", + "Epoch 00469: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0433 - val_loss: 2.4299\n", + "\n", + "Epoch 00469: val_loss did not improve from 2.33563\n", + "Epoch 470/500\n", + "\n", + "Epoch 00470: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0096 - val_loss: 2.3356\n", + "\n", + "Epoch 00470: val_loss improved from 2.33563 to 2.33556, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 471/500\n", + "\n", + "Epoch 00471: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0814 - val_loss: 2.4134\n", + "\n", + "Epoch 00471: val_loss did not improve from 2.33556\n", + "Epoch 472/500\n", + "\n", + "Epoch 00472: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0282 - val_loss: 2.3833\n", + "\n", + "Epoch 00472: val_loss did not improve from 2.33556\n", + "Epoch 473/500\n", + "\n", + "Epoch 00473: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 1.9912 - val_loss: 2.3809\n", + "\n", + "Epoch 00473: val_loss did not improve from 2.33556\n", + "Epoch 474/500\n", + "\n", + "Epoch 00474: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0636 - val_loss: 2.3998\n", + "\n", + "Epoch 00474: val_loss did not improve from 2.33556\n", + "Epoch 475/500\n", + "\n", + "Epoch 00475: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 1.9840 - val_loss: 2.3530\n", + "\n", + "Epoch 00475: val_loss did not improve from 2.33556\n", + "Epoch 476/500\n", + "\n", + "Epoch 00476: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.0183 - val_loss: 2.4219\n", + "\n", + "Epoch 00476: val_loss did not improve from 2.33556\n", + "Epoch 477/500\n", + "\n", + "Epoch 00477: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 1.9861 - val_loss: 2.3589\n", + "\n", + "Epoch 00477: val_loss did not improve from 2.33556\n", + "Epoch 478/500\n", + "\n", + "Epoch 00478: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0277 - val_loss: 2.3678\n", + "\n", + "Epoch 00478: val_loss did not improve from 2.33556\n", + "Epoch 479/500\n", + "\n", + "Epoch 00479: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 796ms/step - loss: 2.0421 - val_loss: 2.3922\n", + "\n", + "Epoch 00479: val_loss did not improve from 2.33556\n", + "Epoch 480/500\n", + "\n", + "Epoch 00480: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 1.9588 - val_loss: 2.3798\n", + "\n", + "Epoch 00480: val_loss did not improve from 2.33556\n", + "Epoch 481/500\n", + "\n", + "Epoch 00481: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0262 - val_loss: 2.3859\n", + "\n", + "Epoch 00481: val_loss did not improve from 2.33556\n", + "Epoch 482/500\n", + "\n", + "Epoch 00482: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 1.9900 - val_loss: 2.3502\n", + "\n", + "Epoch 00482: val_loss did not improve from 2.33556\n", + "Epoch 483/500\n", + "\n", + "Epoch 00483: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 1.9932 - val_loss: 2.3712\n", + "\n", + "Epoch 00483: val_loss did not improve from 2.33556\n", + "Epoch 484/500\n", + "\n", + "Epoch 00484: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0029 - val_loss: 2.3610\n", + "\n", + "Epoch 00484: val_loss did not improve from 2.33556\n", + "Epoch 485/500\n", + "\n", + "Epoch 00485: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0292 - val_loss: 2.3485\n", + "\n", + "Epoch 00485: val_loss did not improve from 2.33556\n", + "Epoch 486/500\n", + "\n", + "Epoch 00486: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0036 - val_loss: 2.3521\n", + "\n", + "Epoch 00486: val_loss did not improve from 2.33556\n", + "Epoch 487/500\n", + "\n", + "Epoch 00487: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0304 - val_loss: 2.3897\n", + "\n", + "Epoch 00487: val_loss did not improve from 2.33556\n", + "Epoch 488/500\n", + "\n", + "Epoch 00488: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 2.0249 - val_loss: 2.3887\n", + "\n", + "Epoch 00488: val_loss did not improve from 2.33556\n", + "Epoch 489/500\n", + "\n", + "Epoch 00489: LearningRateScheduler setting learning rate to 1e-05.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "100/100 [==============================] - 80s 799ms/step - loss: 2.0184 - val_loss: 2.4082\n", + "\n", + "Epoch 00489: val_loss did not improve from 2.33556\n", + "Epoch 490/500\n", + "\n", + "Epoch 00490: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0207 - val_loss: 2.4115\n", + "\n", + "Epoch 00490: val_loss did not improve from 2.33556\n", + "Epoch 491/500\n", + "\n", + "Epoch 00491: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0193 - val_loss: 2.3330\n", + "\n", + "Epoch 00491: val_loss improved from 2.33556 to 2.33299, saving model to experimento_ssd300_fault_1.h5\n", + "Epoch 492/500\n", + "\n", + "Epoch 00492: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0064 - val_loss: 2.3920\n", + "\n", + "Epoch 00492: val_loss did not improve from 2.33299\n", + "Epoch 493/500\n", + "\n", + "Epoch 00493: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 2.0322 - val_loss: 2.3671\n", + "\n", + "Epoch 00493: val_loss did not improve from 2.33299\n", + "Epoch 494/500\n", + "\n", + "Epoch 00494: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 1.9830 - val_loss: 2.3444\n", + "\n", + "Epoch 00494: val_loss did not improve from 2.33299\n", + "Epoch 495/500\n", + "\n", + "Epoch 00495: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 2.0090 - val_loss: 2.3845\n", + "\n", + "Epoch 00495: val_loss did not improve from 2.33299\n", + "Epoch 496/500\n", + "\n", + "Epoch 00496: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 800ms/step - loss: 1.9609 - val_loss: 2.3364\n", + "\n", + "Epoch 00496: val_loss did not improve from 2.33299\n", + "Epoch 497/500\n", + "\n", + "Epoch 00497: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 798ms/step - loss: 1.9617 - val_loss: 2.3641\n", + "\n", + "Epoch 00497: val_loss did not improve from 2.33299\n", + "Epoch 498/500\n", + "\n", + "Epoch 00498: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 1.9880 - val_loss: 2.3624\n", + "\n", + "Epoch 00498: val_loss did not improve from 2.33299\n", + "Epoch 499/500\n", + "\n", + "Epoch 00499: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 799ms/step - loss: 1.9825 - val_loss: 2.3824\n", + "\n", + "Epoch 00499: val_loss did not improve from 2.33299\n", + "Epoch 500/500\n", + "\n", + "Epoch 00500: LearningRateScheduler setting learning rate to 1e-05.\n", + "100/100 [==============================] - 80s 797ms/step - loss: 2.0526 - val_loss: 2.3589\n", + "\n", + "Epoch 00500: val_loss did not improve from 2.33299\n" + ] + } + ], + "source": [ + "#ENTRENAMIENTO DE MODELO\n", + "#####################################################################\n", + "# Instantiate two `DataGenerator` objects: One for training, one for validation.\n", + "######################################################################\n", + "# Optional: If you have enough memory, consider loading the images into memory for the reasons explained above.\n", + "\n", + "train_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n", + "val_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n", + "\n", + "# 2: Parse the image and label lists for the training and validation datasets. This can take a while.\n", + "\n", + "\n", + "\n", + "# The XML parser needs to now what object class names to look for and in which order to map them to integers.\n", + "classes = ['background' ] + labels\n", + "\n", + "train_dataset.parse_xml(images_dirs= [config['train']['train_image_folder']],\n", + " image_set_filenames=[config['train']['train_image_set_filename']],\n", + " annotations_dirs=[config['train']['train_annot_folder']],\n", + " classes=classes,\n", + " include_classes='all',\n", + " #classes = classes, \n", + " #include_classes= [1],\n", + " exclude_truncated=False,\n", + " exclude_difficult=False,\n", + " ret=False)\n", + "\n", + "val_dataset.parse_xml(images_dirs= [config['test']['test_image_folder']],\n", + " image_set_filenames=[config['test']['test_image_set_filename']],\n", + " annotations_dirs=[config['test']['test_annot_folder']],\n", + " classes=classes,\n", + " include_classes='all',\n", + " #classes = classes, \n", + " #include_classes=[1],\n", + " exclude_truncated=False,\n", + " exclude_difficult=False,\n", + " ret=False)\n", + "\n", + "#########################\n", + "# 3: Set the batch size.\n", + "#########################\n", + "batch_size = config['train']['batch_size'] # Change the batch size if you like, or if you run into GPU memory issues.\n", + "\n", + "##########################\n", + "# 4: Set the image transformations for pre-processing and data augmentation options.\n", + "##########################\n", + "# For the training generator:\n", + "\n", + "\n", + "# For the validation generator:\n", + "convert_to_3_channels = ConvertTo3Channels()\n", + "resize = Resize(height=img_height, width=img_width)\n", + "\n", + "######################################3\n", + "# 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.\n", + "#########################################\n", + "# The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.\n", + "if config['model']['backend'] == 'ssd300':\n", + " predictor_sizes = [model.get_layer('conv4_3_norm_mbox_conf').output_shape[1:3],\n", + " model.get_layer('fc7_mbox_conf').output_shape[1:3],\n", + " model.get_layer('conv6_2_mbox_conf').output_shape[1:3],\n", + " model.get_layer('conv7_2_mbox_conf').output_shape[1:3],\n", + " model.get_layer('conv8_2_mbox_conf').output_shape[1:3],\n", + " model.get_layer('conv9_2_mbox_conf').output_shape[1:3]]\n", + " ssd_input_encoder = SSDInputEncoder(img_height=img_height,\n", + " img_width=img_width,\n", + " n_classes=n_classes,\n", + " predictor_sizes=predictor_sizes,\n", + " scales=scales,\n", + " aspect_ratios_per_layer=aspect_ratios,\n", + " two_boxes_for_ar1=two_boxes_for_ar1,\n", + " steps=steps,\n", + " offsets=offsets,\n", + " clip_boxes=clip_boxes,\n", + " variances=variances,\n", + " matching_type='multi',\n", + " pos_iou_threshold=0.5,\n", + " neg_iou_limit=0.5,\n", + " normalize_coords=normalize_coords)\n", + "\n", + "elif config['model']['backend'] == 'ssd7':\n", + " predictor_sizes = [model.get_layer('classes4').output_shape[1:3],\n", + " model.get_layer('classes5').output_shape[1:3],\n", + " model.get_layer('classes6').output_shape[1:3],\n", + " model.get_layer('classes7').output_shape[1:3]]\n", + " ssd_input_encoder = SSDInputEncoder(img_height=img_height,\n", + " img_width=img_width,\n", + " n_classes=n_classes,\n", + " predictor_sizes=predictor_sizes,\n", + " scales=scales,\n", + " aspect_ratios_global=aspect_ratios,\n", + " two_boxes_for_ar1=two_boxes_for_ar1,\n", + " steps=steps,\n", + " offsets=offsets,\n", + " clip_boxes=clip_boxes,\n", + " variances=variances,\n", + " matching_type='multi',\n", + " pos_iou_threshold=0.5,\n", + " neg_iou_limit=0.3,\n", + " normalize_coords=normalize_coords)\n", + "\n", + "\n", + "\n", + " \n", + "data_augmentation_chain = DataAugmentationVariableInputSize(resize_height = img_height,\n", + " resize_width = img_width,\n", + " random_brightness=(-48, 48, 0.5),\n", + " random_contrast=(0.5, 1.8, 0.5),\n", + " random_saturation=(0.5, 1.8, 0.5),\n", + " random_hue=(18, 0.5),\n", + " random_flip=0.5,\n", + " n_trials_max=3,\n", + " clip_boxes=True,\n", + " overlap_criterion='area',\n", + " bounds_box_filter=(0.3, 1.0),\n", + " bounds_validator=(0.5, 1.0),\n", + " n_boxes_min=1,\n", + " background=(0,0,0))\n", + "#######################\n", + "# 6: Create the generator handles that will be passed to Keras' `fit_generator()` function.\n", + "#######################\n", + "\n", + "train_generator = train_dataset.generate(batch_size=batch_size,\n", + " shuffle=True,\n", + " transformations= [data_augmentation_chain],\n", + " label_encoder=ssd_input_encoder,\n", + " returns={'processed_images',\n", + " 'encoded_labels'},\n", + " keep_images_without_gt=False)\n", + "\n", + "val_generator = val_dataset.generate(batch_size=batch_size,\n", + " shuffle=False,\n", + " transformations=[convert_to_3_channels,\n", + " resize],\n", + " label_encoder=ssd_input_encoder,\n", + " returns={'processed_images',\n", + " 'encoded_labels'},\n", + " keep_images_without_gt=False)\n", + "\n", + "# Summary instance training\n", + "category_train_list = []\n", + "for image_label in train_dataset.labels:\n", + " category_train_list += [i[0] for i in image_label]\n", + "summary_category_training = {train_dataset.classes[i]: category_train_list.count(i) for i in list(set(category_train_list))}\n", + "for i in summary_category_training.keys():\n", + " print(i, ': {:.0f}'.format(summary_category_training[i]))\n", + "\n", + "\n", + "\n", + "# Get the number of samples in the training and validations datasets.\n", + "train_dataset_size = train_dataset.get_dataset_size()\n", + "val_dataset_size = val_dataset.get_dataset_size()\n", + "\n", + "print(\"Number of images in the training dataset:\\t{:>6}\".format(train_dataset_size))\n", + "print(\"Number of images in the validation dataset:\\t{:>6}\".format(val_dataset_size))\n", + "\n", + "\n", + "\n", + "##########################\n", + "# Define model callbacks.\n", + "#########################\n", + "\n", + "# TODO: Set the filepath under which you want to save the model.\n", + "model_checkpoint = ModelCheckpoint(filepath= config['train']['saved_weights_name'],\n", + " monitor='val_loss',\n", + " verbose=1,\n", + " save_best_only=True,\n", + " save_weights_only=False,\n", + " mode='auto',\n", + " period=1)\n", + "#model_checkpoint.best =\n", + "\n", + "csv_logger = CSVLogger(filename='log.csv',\n", + " separator=',',\n", + " append=True)\n", + "\n", + "learning_rate_scheduler = LearningRateScheduler(schedule=lr_schedule,\n", + " verbose=1)\n", + "\n", + "terminate_on_nan = TerminateOnNaN()\n", + "\n", + "callbacks = [model_checkpoint,\n", + " csv_logger,\n", + " learning_rate_scheduler,\n", + " terminate_on_nan]\n", + "\n", + "\n", + "\n", + "batch_images, batch_labels = next(train_generator)\n", + "\n", + "\n", + "initial_epoch = 0\n", + "final_epoch = 500 #config['train']['nb_epochs']\n", + "steps_per_epoch = 100\n", + "\n", + "history = model.fit_generator(generator=train_generator,\n", + " steps_per_epoch=steps_per_epoch,\n", + " epochs=final_epoch,\n", + " callbacks=callbacks,\n", + " validation_data=val_generator,\n", + " validation_steps=ceil(val_dataset_size/batch_size*10),\n", + " initial_epoch=initial_epoch,\n", + " verbose = 1 if config['train']['debug'] else 2)\n", + "\n", + "history_path = config['train']['saved_weights_name'].split('.')[0] + '_history'\n", + "\n", + "np.save(history_path, history.history)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['background', '1']" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "classes" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "dict_keys(['val_loss', 'loss', 'lr'])\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAEWCAYAAABrDZDcAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3Xl8nFXd///XZyaTfV+bJmmb7jvdKK2UHVpaFFSQTVCRn8Vbb0VvQUFuQf2qcLsgoghWVgUR2axAgZalbIWWbkD3dG/SNkmzNHsyy/n9ca4sTZM0LZ1MMvN5Ph55zMx1XTPXOdPpvOecc13nEmMMSimlIpcr1AVQSikVWhoESikV4TQIlFIqwmkQKKVUhNMgUEqpCKdBoJRSEU6DQKkeiMijIvKLXm67W0TO/7Svo1Rf0yBQSqkIp0GglFIRToNADXhOl8zNIvKxiNSLyEMikiMiL4tIrYi8JiJpHba/WEQ2iki1iCwXkXEd1k0VkbXO854CYjvt67Mist557goRmXyCZf6GiGwXkUoR+Y+IDHaWi4j8XkTKROSwU6eJzroFIrLJKVuJiNx0Qm+YUp1oEKhwcSlwATAa+BzwMvBjIBP7Of8ugIiMBp4EvgdkAUuAF0QkWkSigX8DfwfSgaed18V57jTgYeAGIAP4C/AfEYk5noKKyLnAncDlQC6wB/ins3oucKZTj1TgCqDCWfcQcIMxJgmYCLxxPPtVqjsaBCpc/NEYU2qMKQHeAVYaY9YZY5qB54GpznZXAC8ZY5YZY7zAb4E44DPALMAD3GOM8RpjngE+7LCPbwB/McasNMb4jTGPAc3O847Hl4GHjTFrnfLdCswWkWGAF0gCxgJijNlsjDngPM8LjBeRZGNMlTFm7XHuV6kuaRCocFHa4X5jF48TnfuDsb/AATDGBIB9QJ6zrsQcORPjng73hwI/cLqFqkWkGihwnnc8OpehDvurP88Y8wbwJ+A+oFREFolIsrPppcACYI+IvCUis49zv0p1SYNARZr92C90wPbJY7/MS4ADQJ6zrNWQDvf3Ab80xqR2+Is3xjz5KcuQgO1qKgEwxtxrjJkOTMB2Ed3sLP/QGHMJkI3twvrXce5XqS5pEKhI8y/gIhE5T0Q8wA+w3TsrgPcBH/BdEYkSkS8CMzs896/AN0XkNGdQN0FELhKRpOMswz+A60RkijO+8CtsV9ZuETnVeX0PUA80AX5nDOPLIpLidGnVAP5P8T4o1UaDQEUUY8xW4Brgj8Ah7MDy54wxLcaYFuCLwNeAKux4wnMdnrsaO07wJ2f9dmfb4y3D68BPgGexrZARwJXO6mRs4FRhu48qsOMYANcCu0WkBvimUw+lPjXRC9MopVRk0xaBUkpFuKAFgYg87JwUs6HDsnQRWSYiRc5tWk+voZRSKviC2SJ4FLiw07JbgNeNMaOA153HSimlQiioYwTOCTIvGmNaT5HfCpxtjDkgIrnAcmPMmKAVQCml1DFF9fH+clrPknTCILu7DUVkIbAQICEhYfrYsWOPe2cNLT52lNdTmJlAYkxfV1UppUJrzZo1h4wxWcfart9+OxpjFgGLAGbMmGFWr1593K+xencllz3wPou+PpMzRx/zvVBKqbAiInuOvVXfHzVU6nQJ4dyWBXNnreeH6gGySinVvb4Ogv8AX3XufxVYHNzd2STQcyWUUqp7wTx89EnsKftjRKRYRK4H7gIuEJEi7JTBdwVr/7YM9lZjQCmluhe0MQJjzFXdrDrvZLy+1+uluLiYpqambrdx+wL89eJcUpvL2Ly5otvt+rPY2Fjy8/PxeDyhLopSKkz128HiYykuLiYpKYlhw4Zx5GSR7RpafEhZHcMyEkiOG3hfpMYYKioqKC4uprCwMNTFUUqFqQE7xURTUxMZGRndhkA4EBEyMjJ6bPUopdSnNWCDAAjrEGgVCXVUSoXWgA4CpZRSn15YB0Hrb+lgHDVUXV3Nn//85+N+3oIFC6iurg5CiZRS6sSEdRAEU3dB4Pf3fNGoJUuWkJqaGqxiKaXUcRuwRw2F2i233MKOHTuYMmUKHo+HxMREcnNzWb9+PZs2beLzn/88+/bto6mpiRtvvJGFCxcCMGzYMFavXk1dXR3z589nzpw5rFixgry8PBYvXkxcXFyIa6aUijRhEQQ/e2Ejm/bXHLU8YAyNLX5iPW7cruMbdB0/OJk7Pjeh2/V33XUXGzZsYP369SxfvpyLLrqIDRs2tB3m+fDDD5Oenk5jYyOnnnoql156KRkZGUe8RlFREU8++SR//etfufzyy3n22We55hq9+qBSqm+FRRD0BzNnzjziWP97772X559/HoB9+/ZRVFR0VBAUFhYyZcoUAKZPn87u3bv7rLxKKdUqLIKgu1/ujV4/RaW1DE2PJyU+OqhlSEhIaLu/fPlyXnvtNd5//33i4+M5++yzuzwXICYmpu2+2+2msbExqGVUSqmuhPVgcTCPGkpKSqK2trbLdYcPHyYtLY34+Hi2bNnCBx98EIQSKKXUyREWLYJQyMjI4PTTT2fixInExcWRk5PTtu7CCy/kgQceYPLkyYwZM4ZZs2aFsKRKKdWzoF6q8mTp6sI0mzdvZty4cT0+r8nrZ1tpLUPS40kNctdQMPWmrkop1ZmIrDHGzDjWdmHdNaSUUurYNAiUUirCaRAopVSE0yBQSqkIF9ZBEMzDR5VSKlyEdRAopZQ6tsgIgiA0CU50GmqAe+65h4aGhpNcIqWUOjHhHQRBvLiXBoFSKlxExJnFwRgj6DgN9QUXXEB2djb/+te/aG5u5gtf+AI/+9nPqK+v5/LLL6e4uBi/389PfvITSktL2b9/P+eccw6ZmZm8+eabQSidUkr1XngEwcu3wMFPjlrsMYbhLX5iPC5wHWfjZ9AkmH9Xt6s7TkO9dOlSnnnmGVatWoUxhosvvpi3336b8vJyBg8ezEsvvQTYOYhSUlK4++67efPNN8nMzDy+MimlVBCEd9dQH1m6dClLly5l6tSpTJs2jS1btlBUVMSkSZN47bXX+NGPfsQ777xDSkpKqIuqlFJHCY8WQTe/3H0+PzsP1pKfFk96QvDmGjLGcOutt3LDDTcctW7NmjUsWbKEW2+9lblz53L77bcHrRxKKXUiwrxFELwzCTpOQz1v3jwefvhh6urqACgpKaGsrIz9+/cTHx/PNddcw0033cTatWuPeq5SSoVaeLQIQqDjNNTz58/n6quvZvbs2QAkJiby+OOPs337dm6++WZcLhcej4f7778fgIULFzJ//nxyc3N1sFgpFXJhPQ11iy/AloM15KXFkZEQ0+O2/ZlOQ62UOhE6DTVBPY1AKaXCRlgHQZv+3+hRSqmQGdBBcMxurTBoEgyErjul1MA2YIMgNjaWioqKHr8oB/rso8YYKioqiI2NDXVRlFJhbMAeNZSfn09xcTHl5eXdbhMwhtLqJprKPZTHDsyqxsbGkp+fH+piKKXC2MD8dgQ8Hg+FhYU9btPk9XPRT17hhxeO4VtTR/ZRyZRSamAZsF1DvRHlsp1DPv9A7RxSSqngC0kQiMj3RWSjiGwQkSdFJCid4O62IAgE4+WVUios9HkQiEge8F1ghjFmIuAGrgzSvvC4BW9AWwRKKdWdUHUNRQFxIhIFxAP7g7Yjl0tbBEop1YM+DwJjTAnwW2AvcAA4bIxZ2nk7EVkoIqtFZHVPRwYdS5Rb8OoYgVJKdSsUXUNpwCVAITAYSBCRazpvZ4xZZIyZYYyZkZWVdcL787hdeLVFoJRS3QpF19D5wC5jTLkxxgs8B3wmWDuLcokeNaSUUj0IRRDsBWaJSLyICHAesDlYO/O4XXgD2iJQSqnuhGKMYCXwDLAW+MQpw6Jg7c/j1haBUkr1JCRnFhtj7gDu6It9Rbld+LRFoJRS3QrrM4vBjhHoUUNKKdW9sA8Cj1vPI1BKqZ6EfRBEuQWfnlmslFLdCvsg8Lj0PAKllOpJ2AdBlB41pJRSPYqAIHDppHNKKdWD8A6CPe8zuXmdDhYrpVQPwjsI3vkdl1U/pF1DSinVg/AOAreHKHw6xYRSSvUgvIPAFYXb+LVFoJRSPQjvIHB7iMKrYwRKKdWDkMw11Gfc0UQZP16jLQKllOpOeLcIXFG48WmLQCmlehDeQeD24DY+HSNQSqkehHcQuDy4jJ8WbREopVS3wjsIWlsEemaxUkp1K7yDwBWFy/jwBwxGB4yVUqpL4R0ETosAoL7FH+LCKKVU/xTeQeDyIBhcBKiqbwl1aZRSql8K7yBw29MkPPio1CBQSqkuhXcQuDwAROGnskGDQCmluhLeQeC2QeDBR7UGgVJKdSm8g8DV2jXkp7LeG+LCKKVU/xTeQeCOBiBa/DpYrJRS3QjzILBdQxlxLqq0a0gppboU3kHgDBanx4kGgVJKdSO8g8A5fDQ9VvTwUaWU6kZ4B4HTIkiLFap0sFgppboU3kHg7hAE2jWklFJdCu8gcA4fTXWCQCeeU0qpo4V3EDgtgtRowes31DX7QlwgpZTqf8I7CJwxguRoe2EaHSdQSqmjhXcQuFuDwD7U+YaUUupo4R0EzhhBWqyt5sHDTaEsjVJK9UshCQIRSRWRZ0Rki4hsFpHZQdmRM8VERpwAUFLdGJTdKKXUQBYVov3+AXjFGHOZiEQD8UHZi9M1lBBliI92U1KlQaCUUp31eRCISDJwJvA1AGNMCxCcznuna0gCPganxlFS3RCU3Sil1EAWiq6h4UA58IiIrBORB0UkofNGIrJQRFaLyOry8vIT25PTIsDvJS81jmJtESil1FFCEQRRwDTgfmPMVKAeuKXzRsaYRcaYGcaYGVlZWSe2J4/T4+RtYMygJIrK6mj26UXslVKqo1AEQTFQbIxZ6Tx+BhsMJ19sCiDQUMnUglRafAE27a8Jyq6UUmqg6vMgMMYcBPaJyBhn0XnApqDszOWGuDRoqGBalj2p7LbnN+DzB4KyO6WUGohCdR7Bd4AnRORjYArwq6DtKT4d1j9BzgPjueOUGjYdqGHXofqg7U4ppQaakASBMWa90/8/2RjzeWNMVdB2FpcOPnsi2bysSgC2ltYGbXdKKTXQhPeZxQDxGW13s1KTcQmU79oAO94IYaGUUqr/iIAgSG+76/HWMCIrkevWfQn+/oUQFkoppfqP8A+C6MT2+41VfOucEaEri1JK9UPhHwTDz4bM0fZ+YxUXTRrcvk4vVKOUUhEQBGMXwH9/COkjoLGK6KgOVfY1h65cSinVT4R/ELSKS4OGyiMWFZdVhKgwSinVf0ROEMSnw843oWpP26IPi4pDWCCllOofIicIhs2xt+/e3bZoe0lpiAqjlFL9R+QEwek3QkIWlKxtW6RdQ0opFUlBAJBSAAc/bntYXlGFV+cdUkpFuF4FgYjcKCLJYj0kImtFZG6wC3fSpRYc8TA60Mjq3cGb3UIppQaC3rYIvm6MqQHmAlnAdcBdQStVsKQcGQTJbi9LPjkQosIopVT/0NsgEOd2AfCIMeajDssGjkGTjnh4So6Hd4pO8OpnSikVJnp7zeI1IrIUKARuFZEkYOB1rk++Amr223MKXvweY9Ld7P64gdKaJnKSY0NdOqWUConetgiux15O8lRjTAPgwXYPDSwicMb/wKTLABiRZqv/7NpiXt9citEpJ5RSEai3QTAb2GqMqRaRa4D/BQ4Hr1hB5lzLeFB0M2eMyuTXr2zl+sdWs7HzZSwPfAzVe0NQQKWU6ju9DYL7gQYROQX4IbAH+FvQShVsLjdkjkZKN/Cziye0Ld5RXnfkdn85A+6ZhFJKhbPeBoHP2H6TS4A/GGP+ACQFr1h9oGAm7FvJ8MwEXv3emQBsL6s7xpOUUir89DYIakXkVuBa4CURcWPHCQauvBnQWAXVexgzKInCzISjWwRKKRUBehsEVwDN2PMJDgJ5wG+CVqq+0HqNgsqdAIzOSWRT5zECpZSKAL0KAufL/wkgRUQ+CzQZYwbuGAFA+nB7u28VFK9h6pA0dlc0UFGn1yhQSkWW3k4xcTmwCvgScDmwUkQuC2bBgi5pECCw/E548FymD7KnVLy5VU8wU0pFlt6eUHYb9hyCMgARyQJeA54JVsGCTsSeWNZoL1YztXk1EwZnc/viDVwwPoeUmMiaj08pFbl6+23nag0BR8VxPLf/+vIz8NUXITqRqOKV3PnFSTS0+PnPR/v1MpZKqYjR2xbBKyLyKvCk8/gKYElwitSH8qfb28QcqC9nUl4Ko3MSefGj/Vx7Skpoy6aUUn2kt4PFNwOLgMnAKcAiY8yPglmwPpWQBfXliAjzJgziw92VVNXqoaRKqcjQ6+4dY8yzxpj/McZ83xjzfDAL1ecSMqH+EABzxw8iYGDFNp2eWikVGXoMAhGpFZGaLv5qRSR8Drp3WgQAE/OSyU2J5b2t+0NcKKWU6hs9jhEYYwb2NBK9lZBpjx4K+BGXm9NHZrJj655Ql0oppfpEbweLw1tCFpgALLkJxMXI7G+xeW0jxDjrjbGHmyqlVBga+IeAngwZI+zt6ofhwwcZkxIgGm/7er+36+cppVQY0CAAGHEeTG+/zs7omEqi8bWv97eEoFBKKdU3NAjAdvuMuqDtYS7lDE3r0GumQaCUCmM6RtCq4LS2u671/+B7qV5ocBboWcZKqTCmLYJWCZlwRzVExcHWlxi8f2n7Om0RKKXCWMiCQETcIrJORF4MVRmOIgJZY45aXF1XH4LCKKVU3whli+BGYHMI99+11CFHLbpv2cYQFEQppfpGSIJARPKBi4AHQ7H/Hg2afNSitTtL8QfMib3eCzfCy7d8ykIppVTwhKpFcA/wQyDQ3QYislBEVovI6vLyPrxYzJzvwaUPHbnM30JxVUPX2x/Lmkdh5f2fulhKKRUsfR4EzqUuy4wxa3razhizyBgzwxgzIysrq49KB7g9MOkySB3atuhy91tkPjoHmp0ZScs22z+llAoDoWgRnA5cLCK7gX8C54rI4yEoR8++9hJc8P8wriiuiFpOQu1OOLDervvzLPvXXAfv/7n7M4+ba9vve5uCX2allDoBfR4ExphbjTH5xphhwJXAG8aYa/q6HMeUWgCnfxcpmEUAO89Q0751R26z4o/w6q2w/omuX6O2tMN9ndZaKdU/6XkExzL35zyY/WNKTSp1b9zNru0duoQ2PGtvd7/b/uu/rgzWPGbvd/zyr9FprZVS/VNIg8AYs9wY89lQluGY8qYTN+0K7vNdQqapZOWjHY4Aqiiyt588DXfmQ2M1/Osr8MJ34c074YkvtW97uLhvy62UUr2kLYJeuHrmEKZeejPlJoXL3G8D8Kp/xtEbbnsF9jvdR2/dBb7G9nXFq3ScQCnVL2kQ9ILbJXx+aj6BwdOJkgC1Jo5/DP1/XJH0GMz9RfuGz98Avi6+7BNz4MMH4emv9l2hlVKqlzQIeklEyDn/uwAETv0Gc0YPYmW5h/s2dJq3Lzb16Cc3Vtvbba8EuZRKKXX8NAiOx4hz4Ja9pCz4KQsm5wLw4i57xvHh1PHUfvsTzA93Qe4p7c9JGQJRMe2Pi1fD8v+DrS/bx8ZA0WsQ6PbcOqWUCiox5gSnTuhDM2bMMKtXrw51MY7y8Lu72LK/kskf/5JH/Beyw+RxSn4KD141jix3PexZYae3bqzkkzeeInv7U2S76hDjXPTmp4dh3ROw+FtwyX0wtf8dRauUGrhEZI0xposBzSPp9Qg+ha/PKaSqPo+pa69vW/ZR8WFO/c0HXHlqAV+aMY8JScnEphdyjz9AcUshr8Z0mndo/1p729p9pJRSfUy7hj6ltIRotv9yPlfNLODnl0zguW99hpQ4D//8cB+X3r+CX7y0CYAWf4CtptPMpg2V9rwDgOh4eO2nsOJPfVsBpVTE0xbBSRDldnHnF9tnLX3xO3NYtqmUn7+4icc/2IvXZ3in6BAAiwNzuMT1rt3w0DaodybUq9gB7zshMHQ2PPN1+MabEJ/el1VRSkUgbREEQUF6PF+fU8gbPziLgvQ4nlq9D4D8tDhualnI0+PutRse/ARKbYuBj/7Z/gJPXgVVu2Hv+31bcKVURNIWQRANz0rk7ZvPYcvBWraV1nLJlDwuf+B9frmukS/FAktuwrii7ExGDYfan1jnzFHkbeziVZVS6uTSFkGQiQjjcpO5ZEoeAN86ZwQFg/Pa1n+ceh41MYPsg4RO023XlPRVMZVSEUyDoI+dPSabF757RtvjPxycREmjB4CDhV/AzLuzfeNlt9tDUFtO8KI4SinVCxoEIZY44jMMyc0B4Nn9GZQMnnfkBo/Mh1/lto8lKKXUSaZBECr5pwJw7/XnkZBsjwx67kA6c+7v5spnS2/rq5IppSKMBkGofO0luNUZA4hNwXji+dw5Z+ASYVjTPxjW9I+2TZvy58CON+APU2D9k3aaCqWUOkk0CEIlKgZiEu39Wf+FXPxHvjd3HJt+fiH//vbpjMlJolqSAdgy6Sa7XdUu+Pc34W+XHDlusP7J9ovkKKXUcdK5hvqxsu1r+dMjjzLm4h/w5bFuu/CTZ+C1O+z9eXfa+YnuKrCPf3o4NAVVSvVLvZ1rSFsE/Vjm8Kk8JfMpKq2DlHz7d/qNMOd/oGCWvV5yawgAbFocusIqpQYsDYJ+zOUSzhiVxeL1JVQ3tNiFInD+HfDVF2D4ORCdBNkT7Lp/fQXe+rVOaa2UOi4aBP3cd84dSX2zn/96fO2RK6Ki4Zrn4Adb4PN/bl/+5i/huW9AXXnfFlQpNWBpEPRzpxSk8t3zRvL+zgque2QVPn+HX/sulx1wzj0FrvwH3FZqxww2PAOLzobF/21bCSsX2QvgtDLGthq8TUcuV0pFJB0sHgB2HarnnN8uB+C+q6dxkXN1tG7tWWG7iErWQHONXZY3A8QFTdX22gf1zvTXiTmQNswexRSdZEMlbShkjbW3UbH2iKTEHPt8XxMMPxuiE47cZyBgg0kp1W/0drBYg2CA2Hqwlq88vJLSmmbOGp3FH6+eSnKsp+cntY4VfHAffPwUxKWBJ8F+iUfFQlwq7F9np8J2RdmpsP3Nxy5Mcp69bOfeleCJheZae12FhCy7j2nX2pbGpMvsY7/PBk9Srh3jUEr1CQ2CMPTrV7bw5+U72h7f/tnxXDt7KB73SfolHvDDvpXga4bGSqjcab/gB0+zoRGTBM11sPIB2P0OpBTYZXFp9nbLi0e+Xkyy/fM12dlVR82FyVfA0M+AJx5iUzQYlAoiDYIwVFbTxB3/2Uisx83z6+xZyQsmDeKs0Vms2VPFd84dRUF6fN8Uxu8Dd6dZzH0t4Gu08yKZAKx73LY43FEwZDasfhgCviOfkzHS/tWUwClX2S6oQ9tsd9WhIti+zHZTGQNjL7LbphfaINnzHgyeCtGJNlAaKm3wdC7XoSJwuSEhG9we2w2mVATQIAhj/oBhX2UDz64t5o9vbG9bPjwrgceum9l3YXC8GiqhcpdtOdSV2S/kgx/b5d5GqN1/Yq87/Bzb3dWxRZI0GFILwBMHez+wrRJx2yBIzoO8aTDhC+COgWFzbBdXR94mcEcfPe7ha4G9K2DYGTZclOrHNAgixP7qRs749Zv4A+3/jlfNLGBiXgqL1+/n15dOZlhmAqt3V+L1G2aPyACgtKaJ1HgPMVH95MuspQGKXrVfzNV7bYti3OfsL39fkx3D2POebS1sXQINVXB475GvERVnWyStknKh6bANgMIzID7DthwOl8D+tdBYZbdLHw7jLoaWOvj4X+0D7FnjYN4v7OvsXG7HWQ58ZNclZNnWTWMVZI6BjBEw4lwbSpkjbbA9da0dcE8aBPGZkDwYCs8Cb4P9a2mArNG2tdN6EaIlN9vXKtsE835l91O9F5693o7rnHYDFJ4Jax6DQZOg9iDkjIfM0bDjTbu/nAl2ubghsdM1LjqrK7Nh2VgNtQegYGb7utqD0FJvy+P32hA9GZrr2qdXORZvk73tHNQnqnyrHRMbNufkvF4/p0EQQQ43ejnvd8s5VNdy1Lr0hGiuOLWA+52xhb99fSaFmQmc8es3iXIJ8yflsmZ3JZ89ZTC3zh/LYyt2k5cWT3qCh1+/spW7r5hCXmocAD5/gEavn6RjDVL3Fb/PjmXsW2XDYtQFsPZv9qim9BH2ixNjj3bqPBZRe9B2XaUUwNu/gYrtdtuO0ofbcZJWiYNg5Pmw9SUbAIk59mpyyfn2S7Jql/3ynXSZ7Y7a3+ncj65M+KLtPqvaffT+wQZIx6vXicuGZE9mfQvWP2FbL3O+D4e22ueNmgepQ2D327a1YwLw2k+PfO7s/4aiZTB6Lnz8NNQdtCG5+T92TGf0fNuNd/Bj+x4XnGavsV2+DfKn2zB84UbY/R4s+A0MPd0Ga8V2+x5tf90e0Xb9Utsq64qvxe6vYCY8eL7d76UPQWK2DaSAz77eyPPt5Vw/fNAeKTfyPLjo7qO7/gIBO2lj3jT4daFddsPbNjw9ce3beRvh1dvA3wJn/I/99wd45277vk26zD6u3GX3X1cGxg/TvgLv3mM/U5/5Drz8Q5j4RSg82wbYmsfsD5FBk7qu754V9vM74tye/11PgAZBhCkqrWXd3mqio1zcvngDNU0+bpk/lrte3gLYbqNDtc3UNPm6fY1rZg3h8Q/sr+zMxBgO1TUzPCuB/zprBG9sKWPt3iqavAFumT+WoRnxTC1I458f7iUxJor7l+/gD1dOZVtpLfMmDqKkqpFR2Yl4AwHue2M7bpeLuRNyGJdrJ9JrbPHT0OIjKdZDdFQ/OOzU12K/AKIT7H/omhI7pceL34fcKfZLZtCk9i+Zih2QOtT+B45JtL/q962C1Q/ZLztPHJz/Uzue0eAMvCflwiu3QP4M2xrYvsx+QedOsaHlrbdjIxO+AG/8EiqKYMo1sN4JrOuXwnMLYfe7MPly2zIo3WhbSgWzbCtj3d9t+dIKbcug8wD+8UjIgoxRtiust2JTbCus42s0Vh09NpQyxLaGStbYFtWp/5/9si7dYA9xru7U2jseo+dDwal2MsaKop63nXkDJOfC1pftgRJtxB7ckDYUVi2yi87/mW2Rrn/iyNc482b7YwJsOFbuoEvJ+RCfZluaoy6AuHR46ftH1nX6dfb9G3sRbH/NtlymXnNc1e9UTySTAAAS70lEQVRIgyCCef0BqhpayE6KZcknB7j39SL+cu10Fr29kydW2g/diKwEbjhrBMMyEvhwdyV7Kxp4avW+Xu8j2u3ikimDeXpNcdsyEft9mJcaR0l1I9lJMRRmJrByV2XbNueMySIvLa4tcADe+eE5JMVGsWZPFWePycbtEopKa9lT0cBf3t7BhMEp/OSz43G72n/VV9a38M2/r+Hnn5/A2EE2XDaUHCYzMYak2Cg2lBzGAO9tP8T1cwpJjY8+0bfz5DKmvXXibbR/8elHb9d02HbLJA92uo4a2s/daKi0R2l11VXTVGPHRPKm29dd86jtEqvZb4/0ikuFiZfaEGussl1uW160LYHYFBtsUbH21258ul2/6d92QD5pECC29eJttEHUUgeH99kB/Q8fghV/tL+8z/8ZvHePXZdSYPcZ8Nuuu9Shtgvs8D5br1axqc7BAlvt4yGfgXm/tPt/7w92WdY4GDwFPnqy/XlphfYQ5oZDtuzJg9tbcu5oexBC0TJbt3NvswcY/P2LNnBaJefZ927atTDly3Zix7ItUNP++T5umaNtcLTyxNt/y47dlzHJMP1rsOLe7l/n+tdssJ0ADQJ1lD0V9fz1nZ18//zRZCQe2XxuaPFx09MfMSIrkWtnD+XJlfs4f3w2/15XwqrdVWQlxjB3fA7eQIDdh+r56zu7jnr9IenxXD4jn98u3XbE8mi3i9R4D2W1xz5HYUxOEmNzk1i8/siB45HZieSmxJKVFENji5+XNxwEYFxuMjOHpZGZGMPvlm0jLd7D1CFpvLGljOykGMpqm7loUi6/u/wUDtU1MzglDpcTKP6AwRcIEBPlJhAwbDlYy4jshF6Nm3y4u5LROUmkxPWTbrL+oqeQ6kogYLv3SjfCkFm2hVS5CzJHHdmd522yXUyJ2fZx0TLbLTf5yvajxFrHg2IS4XCx/ZIN+NqDtuORbgE/FC21rabybTD8LKcLsVM3YsUOG1D+Ftj1ln1OSoENweTBzhjOHti21P6Kr95jQ7JyJ5x9C9QcsK1LdzRkj7dhtPVlSMi0dR5/se12Wna77XoEG4RJg21AlKy2r3uCNAhUUC16ewfLt5azYkcFACt/fB7JsR7iot0s+eQAz60tobK+mQeunU56fDRul7CjvJ6lmw4ya3gGPr/hmodWMjwzgaykGCbmpdDsDbBuXxWb9tfQ7LP94DnJMdzxuQnc/PRH1Lf4gfaWRyuPW/D6u/4ct27rdknbgPolUwYzf+IgfvPqVg43+hiSHsfavfbX4XfOHckP5o4B4FBdM795ZSu1zV6SYjz4jeHSafks31bGX97ayeCUWJ5cOIs/vbGdK2cOYV9lA7sO1fP9C0ZjjMEfMEQ553h4/QEO1TUzKDkWcb5oXt14kMn5KeSmxHVRcqU+PQ0C1Sfe236IA4ebuGx6/nE/t/WzJ50GcuuafZTXNnPOb5dz5xcncdXMIRw83MTqPZUMTU+gID2Ow41efrJ4I29vK2f5TWdz4z/X8VGx7ZvuGAxLvnsGl96/gkavDZGZw9JZt6+q2+DITophSkEqSzeVHlddhqTHs7fSdnP8+9uns3h9CY+8t5uLJucSE+Xi9c1lHG70ApCZGE1ynIed5fVEu11MHZLK1acN4aJJduqQ8rrmtnD4uLiaVzcepLrBy+2fG49bhOpGLz6/4ZEVu/jaZ4aRFOuh2etva+Ut31qGP2A4d2x223t7uMFLSry2XiKNBoEa8Lz+QI9nTTe2+KmobyY/LZ6GFh+7DtWTn2rPoXi7qJy0+GjmjMqkqLQWERiZnQRASXUjRaW1nDosnbe2lZOfFseqXZXkJMdy63OfUNdsBzYnDE7m/y6dzPefWs+onESWfHKwbd+PXncqf31nJ+9tr+hVXZJio6jtNFA/NCOe4qrGtpbK6JxE0hOi+WBnJTOHpTMuN4nH3t/Ttv25Y7MJGMPyreVtLZzpQ9M4UN1Iiz/AyzeeSWlNE5/947sATkhEtZ1rcv64HFbtquCUglR+9YVJNHn9lNc2c8/rRXz7nJHsKKujtslHZX0zcycMYvrQNH7z6lYumTKYiYNTEIFDdS1kJbV3K7668SAf7Kzg3LHZ+AOGs8dkt60rKq3lqr9+wKPXzWRiXkqv3qdWK7YfYvPBWq6fU4jXH6C+2dd/xnkGkH4bBCJSAPwNGAQEgEXGmD/09BwNAtVXjDH4AoaSqkYGpcQS63HjDxjcLqHFF2DzgRoyk2LIS40jEDAcqm9m2aZSbnt+AzeeN4qR2Yls3F9Ds8/PI+/tBmDXnQswBvZWNnDB79/C6zc8tXAWpw3PoMnrxxcwvPjRfh5dsZstB2vJT4ujuMoOKM6bkMOPF4zjrW3l3L54Y1s5O3Z1tRqaEU+T18/hRi9njsrqtlWTGBPVFnY9mTE0jdV77LkWYwclERPl4qPiw1w0KZcd5XWcPSabB9468giZqUNSuXrmEP61eh8f7q5qW/7980cDsHxbGWeOyiIxJoomr5+vzynkKw+vYvOBGgozE7j6tCG89PGBti7HOSMziY5y8W7RIV7/wVnERbvJSLCBsL2sjlc2HGTGsHROK7QtvfQEe4DCKxsO4nYJ54zJYvOBWibmJR/V8uzs8Q/2cEp+KpPyjy+0ulJR10xafHTbeFSo9OcgyAVyjTFrRSQJWAN83hizqbvnaBCo/iwQMBSV1TFmUNIRy3+1ZDNTClJZMKl9ttj91Y08v66E/zprRJdfEs0+PzFRbt7bfogtB2u5ZtYQYqLcGGP48fMbEIFb5o+lyetnZ3k9/1y1l59dMpHXN5fyi5c2MzwzgdsuGsfUIWlsOVhDeW0zs4dn4DeGe14rYvbwDNwu4csPrjxq39fMGsIHOyuJ9bjYUGJPqhuSHk92UkxbIPRWT+M2HXUMm96I9bho8gYoSI9jX2X70TdZSTGUd3EwwuzhGby/s4Lk2CjuuXIK722vYEPJYRaeOZzzxuWw61A9f3y9iISYKP7+gW19fXTHXH7+wiaqG1qYmJfCkPR4BqXE8suXNhPlFv501TRS4jy8+Ml+rphRwHWPfsjMYems3FXJd84dSVltM995ch0XnzIYXyDAjxeMY1ByLKt225Ze67iRMQYRwRhDiz9AtNuFL2DaWsHby2r5w+vb+eG8MSc8W0C/DYKjCiCyGPiTMWZZd9toECh18hhjuOD3b3NaYToet4v3d1Rw1pgsfrxgXNs2gYChqqGF9IRoRIRdh+pJT4imscWPxy2U1jRTXNVAVlIML3x0gNE5iUwbmkZslJtn1uzj2tnD2FtZT3ZSLMmxHt7feYjZIzKJ9bh4e9shkmOj+L9XtrB2bzXZSTGcPSaLd4sOEeNxU9PopaK+/eTIwSmxXDgxl/K6Zl746MijyYZlxLO7wo7NJMXaVkZvAsjjFmYMTef9nb3r2juZLj5lME1eP0s3lZIYE4XHLdQ2+YiLdlPb5OPrpxdy07zR3PT0RyzdWMoHPz6PzMQTmx9rQASBiAwD3gYmGmNqOq1bCCwEGDJkyPQ9e/Yc9Xyl1Ilp/TUaSg++s5NfvLSZ2xaM4xtnDm9bXlzVwC3PfsJdl9pxDLfLRWGmPYfik+LDvF1UzvjcZMprm7n81AIaW/ws+eQAZ4zKJDs5lrKaJuJjonj43V1cNDmXwowE9lU1cPeybZw+IpOZhelcuegDGlp81DT5+Nwpg/nhvDGkJ0TzzcfXsGl/Df/72XFkJcZyamEa975exCPv7ebmeWOYmJfC+zsquHvZtu6qBdjxmefXlbQdINBZdJSLzIRoRg9KIjclFo/bxTNrimlwjoxrNX/iIO6/ZvoJv8f9PghEJBF4C/ilMea5nrbVFoFS4afFF+DtbeWcOza7z/vSG1p8uETYXlbHiKxE4qLtuSNNXj8iHHUuSefg3FFeR05yLFEuobK+pe3s+GWbSpk7PoeMxBiKSmvxBQwNLT72VDQQH+1m6cZSvnXOCFLioo8YdG/1TlE5i9fvZ0PJYS4Yn8OXTxvKoJQTn2epXweBiHiAF4FXjTF3H2t7DQKllDp+vQ2CPp/kRWysPgRs7k0IKKWUCq5QzPZ1OnAtcK6IrHf+FoSgHEoppYCoY29ychlj3gX0+oRKKdVP9IP5f5VSSoWSBoFSSkU4DQKllIpwGgRKKRXhNAiUUirCaRAopVSE0yBQSqkIp0GglFIRToNAKaUinAaBUkpFOA0CpZSKcBoESikV4TQIlFIqwmkQKKVUhNMgUEqpCKdBoJRSEU6DQCmlIpwGgVJKRTgNAqWUinAaBEopFeE0CJRSKsJpECilVITTIFBKqQinQaCUUhFOg0AppSKcBoFSSkU4DQKllIpwGgRKKRXhNAiUUirCaRAopVSE0yBQSqkIp0GglFIRToNAKaUinAaBUkpFOA0CpZSKcCEJAhG5UES2ish2EbklFGVQSill9XkQiIgbuA+YD4wHrhKR8X1dDqWUUlYoWgQzge3GmJ3GmBbgn8AlISiHUkopICoE+8wD9nV4XAyc1nkjEVkILHQe1onI1hPcXyZw6ASfO1BpnSOD1jkyfJo6D+3NRqEIAulimTlqgTGLgEWfemciq40xMz7t6wwkWufIoHWODH1R51B0DRUDBR0e5wP7Q1AOpZRShCYIPgRGiUihiEQDVwL/CUE5lFJKEYKuIWOMT0T+G3gVcAMPG2M2BnGXn7p7aQDSOkcGrXNkCHqdxZijuueVUkpFED2zWCmlIpwGgVJKRbiwDoJwncpCRB4WkTIR2dBhWbqILBORIuc2zVkuInKv8x58LCLTQlfyEyMiBSLypohsFpGNInKjszyc6xwrIqtE5COnzj9zlheKyEqnzk85B1wgIjHO4+3O+mGhLP+nISJuEVknIi86j8O6ziKyW0Q+EZH1IrLaWdann+2wDYIwn8riUeDCTstuAV43xowCXnceg63/KOdvIXB/H5XxZPIBPzDGjANmAd92/i3Duc7NwLnGmFOAKcCFIjIL+D/g906dq4Drne2vB6qMMSOB3zvbDVQ3Aps7PI6EOp9jjJnS4XyBvv1sG2PC8g+YDbza4fGtwK2hLtdJrN8wYEOHx1uBXOd+LrDVuf8X4Kquthuof8Bi4IJIqTMQD6zFnoF/CIhylrd9xrFH4c127kc520moy34Cdc3HfvGdC7yIPQE13Ou8G8jstKxPP9th2yKg66ks8kJUlr6QY4w5AODcZjvLw+p9cJr/U4GVhHmdnS6S9UAZsAzYAVQbY3zOJh3r1VZnZ/1hIKNvS3xS3AP8EAg4jzMI/zobYKmIrHGm1oE+/myHYoqJvtKrqSwiQNi8DyKSCDwLfM8YUyPSVdXspl0sG3B1Nsb4gSkikgo8D4zrajPndsDXWUQ+C5QZY9aIyNmti7vYNGzq7DjdGLNfRLKBZSKypYdtg1LncG4RRNpUFqUikgvg3JY5y8PifRARDzYEnjDGPOcsDus6tzLGVAPLseMjqSLS+gOuY73a6uysTwEq+7akn9rpwMUishs7K/G52BZCONcZY8x+57YMG/gz6ePPdjgHQaRNZfEf4KvO/a9i+9Fbl3/FOdpgFnC4tck5UIj96f8QsNkYc3eHVeFc5yynJYCIxAHnYwdQ3wQuczbrXOfW9+Iy4A3jdCIPFMaYW40x+caYYdj/r28YY75MGNdZRBJEJKn1PjAX2EBff7ZDPVAS5EGYBcA2bN/qbaEuz0ms15PAAcCL/YVwPbZv9HWgyLlNd7YV7NFTO4BPgBmhLv8J1HcOtvn7MbDe+VsQ5nWeDKxz6rwBuN1ZPhxYBWwHngZinOWxzuPtzvrhoa7Dp6z/2cCL4V5np24fOX8bW7+n+vqzrVNMKKVUhAvnriGllFK9oEGglFIRToNAKaUinAaBUkpFOA0CpZSKcBoESgWZiJzdOpOmUv2RBoFSSkU4DQKlHCJyjXMNgPUi8hdn0rc6EfmdiKwVkddFJMvZdoqIfODMCf98h/niR4rIa851BNaKyAjn5RNF5BkR2SIiT0gPEyUp1dc0CJQCRGQccAV2ArApgB/4MpAArDXGTAPeAu5wnvI34EfGmMnYMzxblz8B3GfsdQQ+gz0DHOyMqd/DXhtjOHZeHaX6hXCefVSp43EeMB340PmxHoed6CsAPOVs8zjwnIikAKnGmLec5Y8BTztzxuQZY54HMMY0ATivt8oYU+w8Xo+9nsS7wa+WUsemQaCUJcBjxphbj1go8pNO2/U0J0tP3T3NHe770f97qh/RriGlrNeBy5w54VuvGTsU+3+kdebLq4F3jTGHgSoROcNZfi3wljGmBigWkc87rxEjIvF9WgulToD+KlEKMMZsEpH/xV4pyoWd2fXbQD0wQUTWYK+AdYXzlK8CDzhf9DuB65zl1wJ/EZGfO6/xpT6shlInRGcfVaoHIlJnjEkMdTmUCibtGlJKqQinLQKllIpw2iJQSqkIp0GglFIRToNAKaUinAaBUkpFOA0CpZSKcP8/sRt5z2OQtygAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "experimento_ssd300_fault_1.h5\n" + ] + } + ], + "source": [ + "#Graficar aprendizaje\n", + "\n", + "history_path =config['train']['saved_weights_name'].split('.')[0] + '_history'\n", + "\n", + "hist_load = np.load(history_path + '.npy',allow_pickle=True).item()\n", + "\n", + "print(hist_load.keys())\n", + "\n", + "# summarize history for loss\n", + "plt.plot(hist_load['loss'])\n", + "plt.plot(hist_load['val_loss'])\n", + "plt.title('model loss')\n", + "plt.ylabel('loss')\n", + "plt.xlabel('epoch')\n", + "plt.legend(['train', 'test'], loc='upper left')\n", + "plt.ylim((0, 10)) \n", + "plt.show()\n", + "\n", + "print(config['train']['saved_weights_name'])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Evaluación del Modelo" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Processing image set 'train.txt': 100%|██████████| 33/33 [00:00<00:00, 112.45it/s]\n", + "Processing image set 'test.txt': 100%|██████████| 2/2 [00:00<00:00, 57.78it/s]\n", + "Number of images in the evaluation dataset: 2\n", + "\n", + "Producing predictions batch-wise: 100%|██████████| 1/1 [00:00<00:00, 1.32it/s]\n", + "Matching predictions to ground truth, class 1/1.: 100%|██████████| 400/400 [00:00<00:00, 9288.89it/s]\n", + "Computing precisions and recalls, class 1/1\n", + "Computing average precision, class 1/1\n", + "400 instances of class 1 with average precision: 0.7948\n", + "mAP using the weighted average of precisions among classes: 0.7948\n", + "mAP: 0.7948\n", + "1 AP 0.795\n", + "\n", + " mAP 0.795\n" + ] + } + ], + "source": [ + "\n", + "config_path = 'config_300_fault_1.json'\n", + "\n", + "with open(config_path) as config_buffer:\n", + " config = json.loads(config_buffer.read())\n", + "\n", + " \n", + "model_mode = 'training'\n", + "# TODO: Set the path to the `.h5` file of the model to be loaded.\n", + "model_path = config['train']['saved_weights_name']\n", + "\n", + "# We need to create an SSDLoss object in order to pass that to the model loader.\n", + "ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n", + "\n", + "K.clear_session() # Clear previous models from memory.\n", + "\n", + "model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n", + " 'L2Normalization': L2Normalization,\n", + " 'DecodeDetections': DecodeDetections,\n", + " 'compute_loss': ssd_loss.compute_loss})\n", + "\n", + "\n", + " \n", + "train_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n", + "val_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n", + "\n", + "# 2: Parse the image and label lists for the training and validation datasets. This can take a while.\n", + "\n", + "\n", + "\n", + "# The XML parser needs to now what object class names to look for and in which order to map them to integers.\n", + "classes = ['background' ] + labels\n", + "\n", + "train_dataset.parse_xml(images_dirs= [config['train']['train_image_folder']],\n", + " image_set_filenames=[config['train']['train_image_set_filename']],\n", + " annotations_dirs=[config['train']['train_annot_folder']],\n", + " classes=classes,\n", + " include_classes='all',\n", + " #classes = ['background', 'panel', 'cell'], \n", + " #include_classes=classes,\n", + " exclude_truncated=False,\n", + " exclude_difficult=False,\n", + " ret=False)\n", + "\n", + "val_dataset.parse_xml(images_dirs= [config['test']['test_image_folder']],\n", + " image_set_filenames=[config['test']['test_image_set_filename']],\n", + " annotations_dirs=[config['test']['test_annot_folder']],\n", + " classes=classes,\n", + " include_classes='all',\n", + " #classes = ['background', 'panel', 'cell'], \n", + " #include_classes=classes,\n", + " exclude_truncated=False,\n", + " exclude_difficult=False,\n", + " ret=False)\n", + "\n", + "#########################\n", + "# 3: Set the batch size.\n", + "#########################\n", + "batch_size = config['train']['batch_size'] # Change the batch size if you like, or if you run into GPU memory issues.\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "evaluator = Evaluator(model=model,\n", + " n_classes=n_classes,\n", + " data_generator=val_dataset,\n", + " model_mode='training')\n", + "\n", + "results = evaluator(img_height=img_height,\n", + " img_width=img_width,\n", + " batch_size=4,\n", + " data_generator_mode='resize',\n", + " round_confidences=False,\n", + " matching_iou_threshold=0.5,\n", + " border_pixels='include',\n", + " sorting_algorithm='quicksort',\n", + " average_precision_mode='sample',\n", + " num_recall_points=11,\n", + " ignore_neutral_boxes=True,\n", + " return_precisions=True,\n", + " return_recalls=True,\n", + " return_average_precisions=True,\n", + " verbose=True)\n", + "\n", + "mean_average_precision, average_precisions, precisions, recalls = results\n", + "total_instances = []\n", + "precisions = []\n", + "\n", + "for i in range(1, len(average_precisions)):\n", + " \n", + " print('{:.0f} instances of class'.format(len(recalls[i])),\n", + " classes[i], 'with average precision: {:.4f}'.format(average_precisions[i]))\n", + " total_instances.append(len(recalls[i]))\n", + " precisions.append(average_precisions[i])\n", + "\n", + "if sum(total_instances) == 0:\n", + " \n", + " print('No test instances found.')\n", + "\n", + "else:\n", + "\n", + " print('mAP using the weighted average of precisions among classes: {:.4f}'.format(sum([a * b for a, b in zip(total_instances, precisions)]) / sum(total_instances)))\n", + " print('mAP: {:.4f}'.format(sum(precisions) / sum(x > 0 for x in total_instances)))\n", + "\n", + " for i in range(1, len(average_precisions)):\n", + " print(\"{:<14}{:<6}{}\".format(classes[i], 'AP', round(average_precisions[i], 3)))\n", + " print()\n", + " print(\"{:<14}{:<6}{}\".format('','mAP', round(mean_average_precision, 3)))" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "1" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ceil(val_dataset_size/batch_size)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Cargar nuevamente el modelo desde los pesos.\n", + "Predicción" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Training on: \t{'1': 1}\n", + "\n" + ] + } + ], + "source": [ + "from imageio import imread\n", + "from keras.preprocessing import image\n", + "import time\n", + "\n", + "config_path = 'config_300_fault_1.json'\n", + "input_path = ['fault_jpg_1/']\n", + "output_path = 'result_ssd300_fault_1/'\n", + "\n", + "with open(config_path) as config_buffer:\n", + " config = json.loads(config_buffer.read())\n", + "\n", + "makedirs(output_path)\n", + "###############################\n", + "# Parse the annotations\n", + "###############################\n", + "score_threshold = 0.25\n", + "score_threshold_iou = 0.5\n", + "labels = config['model']['labels']\n", + "categories = {}\n", + "#categories = {\"Razor\": 1, \"Gun\": 2, \"Knife\": 3, \"Shuriken\": 4} #la categoría 0 es la background\n", + "for i in range(len(labels)): categories[labels[i]] = i+1\n", + "print('\\nTraining on: \\t' + str(categories) + '\\n')\n", + "\n", + "img_height = config['model']['input'] # Height of the model input images\n", + "img_width = config['model']['input'] # Width of the model input images\n", + "img_channels = 3 # Number of color channels of the model input images\n", + "n_classes = len(labels) # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO\n", + "classes = ['background'] + labels\n", + "\n", + "model_mode = 'training'\n", + "# TODO: Set the path to the `.h5` file of the model to be loaded.\n", + "model_path = config['train']['saved_weights_name']\n", + "\n", + "# We need to create an SSDLoss object in order to pass that to the model loader.\n", + "ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n", + "\n", + "K.clear_session() # Clear previous models from memory.\n", + "\n", + "model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n", + " 'L2Normalization': L2Normalization,\n", + " 'DecodeDetections': DecodeDetections,\n", + " 'compute_loss': ssd_loss.compute_loss})\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Tiempo Total: 1.982\n", + "Tiempo promedio por imagen: 0.079\n", + "OK\n" + ] + } + ], + "source": [ + "image_paths = []\n", + "for inp in input_path:\n", + " if os.path.isdir(inp):\n", + " for inp_file in os.listdir(inp):\n", + " image_paths += [inp + inp_file]\n", + " else:\n", + " image_paths += [inp]\n", + "\n", + "image_paths = [inp_file for inp_file in image_paths if (inp_file[-4:] in ['.jpg', '.png', 'JPEG'])]\n", + "times = []\n", + "\n", + "\n", + "for img_path in image_paths:\n", + " orig_images = [] # Store the images here.\n", + " input_images = [] # Store resized versions of the images here.\n", + " #print(img_path)\n", + "\n", + " # preprocess image for network\n", + " orig_images.append(imread(img_path))\n", + " img = image.load_img(img_path, target_size=(img_height, img_width))\n", + " img = image.img_to_array(img)\n", + " input_images.append(img)\n", + " input_images = np.array(input_images)\n", + " # process image\n", + " start = time.time()\n", + " y_pred = model.predict(input_images)\n", + " y_pred_decoded = decode_detections(y_pred,\n", + " confidence_thresh=score_threshold,\n", + " iou_threshold=score_threshold_iou,\n", + " top_k=200,\n", + " normalize_coords=True,\n", + " img_height=img_height,\n", + " img_width=img_width)\n", + "\n", + "\n", + " #print(\"processing time: \", time.time() - start)\n", + " times.append(time.time() - start)\n", + " # correct for image scale\n", + "\n", + " # visualize detections\n", + " # Set the colors for the bounding boxes\n", + " colors = plt.cm.brg(np.linspace(0, 1, 21)).tolist()\n", + "\n", + " plt.figure(figsize=(20,12))\n", + " plt.imshow(orig_images[0],cmap = 'gray')\n", + "\n", + " current_axis = plt.gca()\n", + " #print(y_pred)\n", + " for box in y_pred_decoded[0]:\n", + " # Transform the predicted bounding boxes for the 300x300 image to the original image dimensions.\n", + "\n", + " xmin = box[2] * orig_images[0].shape[1] / img_width\n", + " ymin = box[3] * orig_images[0].shape[0] / img_height\n", + " xmax = box[4] * orig_images[0].shape[1] / img_width\n", + " ymax = box[5] * orig_images[0].shape[0] / img_height\n", + "\n", + " color = colors[int(box[0])]\n", + " label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])\n", + " current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color=color, fill=False, linewidth=2))\n", + " current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':color, 'alpha':1.0})\n", + "\n", + " #plt.figure(figsize=(15, 15))\n", + " #plt.axis('off')\n", + " save_path = output_path + img_path.split('/')[-1]\n", + " plt.savefig(save_path)\n", + " plt.close()\n", + " \n", + "file = open(output_path + 'time.txt','w')\n", + "\n", + "file.write('Tiempo promedio:' + str(np.mean(times)))\n", + "\n", + "file.close()\n", + "print('Tiempo Total: {:.3f}'.format(np.sum(times)))\n", + "print('Tiempo promedio por imagen: {:.3f}'.format(np.mean(times)))\n", + "print('OK')" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1 : 99\n" + ] + } + ], + "source": [ + "\n", + "# Summary instance training\n", + "category_train_list = []\n", + "for image_label in train_dataset.labels:\n", + " category_train_list += [i[0] for i in train_dataset.labels[0]]\n", + "summary_category_training = {train_dataset.classes[i]: category_train_list.count(i) for i in list(set(category_train_list))}\n", + "for i in summary_category_training.keys():\n", + " print(i, ': {:.0f}'.format(summary_category_training[i]))\n" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1 : 99\n" + ] + } + ], + "source": [ + "for i in summary_category_training.keys():\n", + " print(i, ': {:.0f}'.format(summary_category_training[i]))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/Primer_resultado_fault_1/config_300_fault_1.json b/Primer_resultado_fault_1/config_300_fault_1.json new file mode 100644 index 0000000..5a785b5 --- /dev/null +++ b/Primer_resultado_fault_1/config_300_fault_1.json @@ -0,0 +1,28 @@ +{ + "model" : { + "backend": "ssd300", + "input": 400, + "labels": ["1"] + }, + + "train": { + "train_image_folder": "Train&Test_S/images", + "train_annot_folder": "Train&Test_S/anns", + "train_image_set_filename": "Train&Test_S/train.txt", + + "train_times": 1, + "batch_size": 12, + "learning_rate": 1e-4, + "nb_epochs": 10, + "warmup_epochs": 3, + "saved_weights_name": "experimento_ssd300_fault_1.h5", + "debug": true + }, + + +"test": { + "test_image_folder": "Train&Test_S/images", + "test_annot_folder": "Train&Test_S/anns", + "test_image_set_filename": "Train&Test_S/test.txt" + } +} diff --git a/Primer_resultado_fault_1/experimento_ssd300_fault_1_history.npy b/Primer_resultado_fault_1/experimento_ssd300_fault_1_history.npy new file mode 100644 index 0000000..a8f115f Binary files /dev/null and b/Primer_resultado_fault_1/experimento_ssd300_fault_1_history.npy differ diff --git a/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 11_DJI_0011.jpg b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 11_DJI_0011.jpg new file mode 100644 index 0000000..a23ddea Binary files /dev/null and b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 11_DJI_0011.jpg differ diff --git a/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 11_DJI_0012.jpg b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 11_DJI_0012.jpg new file mode 100644 index 0000000..e146689 Binary files /dev/null and b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 11_DJI_0012.jpg differ diff --git a/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 11_DJI_0094.jpg b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 11_DJI_0094.jpg new file mode 100644 index 0000000..8237f9b Binary files /dev/null and b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 11_DJI_0094.jpg differ diff --git a/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 11_DJI_0095.jpg b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 11_DJI_0095.jpg new file mode 100644 index 0000000..eb6a1b0 Binary files /dev/null and b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 11_DJI_0095.jpg differ diff --git a/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 12_DJI_0003.jpg b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 12_DJI_0003.jpg new file mode 100644 index 0000000..52500ae Binary files /dev/null and b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 12_DJI_0003.jpg differ diff --git a/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 14_DJI_0007.jpg b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 14_DJI_0007.jpg new file mode 100644 index 0000000..8d04227 Binary files /dev/null and b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 14_DJI_0007.jpg differ diff --git a/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 14_DJI_0008.jpg b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 14_DJI_0008.jpg new file mode 100644 index 0000000..645e854 Binary files /dev/null and b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 14_DJI_0008.jpg differ diff --git a/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 14_DJI_0009.jpg b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 14_DJI_0009.jpg new file mode 100644 index 0000000..847ee85 Binary files /dev/null and b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 14_DJI_0009.jpg differ diff --git a/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 17_DJI_0007.jpg b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 17_DJI_0007.jpg new file mode 100644 index 0000000..10b1538 Binary files /dev/null and b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 17_DJI_0007.jpg differ diff --git a/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 17_DJI_0008.jpg b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 17_DJI_0008.jpg new file mode 100644 index 0000000..77b9ea6 Binary files /dev/null and b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 17_DJI_0008.jpg differ diff --git a/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 17_DJI_0009.jpg b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 17_DJI_0009.jpg new file mode 100644 index 0000000..792c3f4 Binary files /dev/null and b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 17_DJI_0009.jpg differ diff --git a/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 20_DJI_0076.jpg b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 20_DJI_0076.jpg new file mode 100644 index 0000000..a5afee0 Binary files /dev/null and b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 20_DJI_0076.jpg differ diff --git a/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 20_DJI_0080.jpg b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 20_DJI_0080.jpg new file mode 100644 index 0000000..e5fd195 Binary files /dev/null and b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 20_DJI_0080.jpg differ diff --git a/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 20_DJI_0082.jpg b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 20_DJI_0082.jpg new file mode 100644 index 0000000..7a6d44b Binary files /dev/null and b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 20_DJI_0082.jpg differ diff --git a/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 28_DJI_0001.jpg b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 28_DJI_0001.jpg new file mode 100644 index 0000000..bb128ef Binary files /dev/null and b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 28_DJI_0001.jpg differ diff --git a/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 28_DJI_0003.jpg b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 28_DJI_0003.jpg new file mode 100644 index 0000000..3a30588 Binary files /dev/null and b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 28_DJI_0003.jpg differ diff --git a/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 28_DJI_0009.jpg b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 28_DJI_0009.jpg new file mode 100644 index 0000000..5662b6d Binary files /dev/null and b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 28_DJI_0009.jpg differ diff --git a/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 46_DJI_0054.jpg b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 46_DJI_0054.jpg new file mode 100644 index 0000000..5dca50d Binary files /dev/null and b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 46_DJI_0054.jpg differ diff --git a/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 46_DJI_0055.jpg b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 46_DJI_0055.jpg new file mode 100644 index 0000000..ea66f0f Binary files /dev/null and b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 46_DJI_0055.jpg differ diff --git a/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 46_DJI_0056.jpg b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 46_DJI_0056.jpg new file mode 100644 index 0000000..3780f8d Binary files /dev/null and b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 46_DJI_0056.jpg differ diff --git a/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 50_DJI_0006.jpg b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 50_DJI_0006.jpg new file mode 100644 index 0000000..a887dfd Binary files /dev/null and b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 50_DJI_0006.jpg differ diff --git a/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 50_DJI_0015.jpg b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 50_DJI_0015.jpg new file mode 100644 index 0000000..fc47c68 Binary files /dev/null and b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 50_DJI_0015.jpg differ diff --git a/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 50_DJI_0016.jpg b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 50_DJI_0016.jpg new file mode 100644 index 0000000..b6a87bf Binary files /dev/null and b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 50_DJI_0016.jpg differ diff --git a/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 9_DJI_0077.jpg b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 9_DJI_0077.jpg new file mode 100644 index 0000000..0aaccfb Binary files /dev/null and b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 9_DJI_0077.jpg differ diff --git a/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 9_DJI_0080.jpg b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 9_DJI_0080.jpg new file mode 100644 index 0000000..d312b02 Binary files /dev/null and b/Primer_resultado_fault_1/result_ssd300_fault_1/Mision 9_DJI_0080.jpg differ diff --git a/Primer_resultado_fault_1/result_ssd300_fault_1/time.txt b/Primer_resultado_fault_1/result_ssd300_fault_1/time.txt new file mode 100644 index 0000000..744d39e --- /dev/null +++ b/Primer_resultado_fault_1/result_ssd300_fault_1/time.txt @@ -0,0 +1 @@ +Tiempo promedio:0.07926161766052246 \ No newline at end of file diff --git a/config_300_fault_1.json b/config_300_fault_1.json index cafcca2..5a785b5 100644 --- a/config_300_fault_1.json +++ b/config_300_fault_1.json @@ -6,9 +6,9 @@ }, "train": { - "train_image_folder": "Train&Test_1/images", - "train_annot_folder": "Train&Test_1/anns", - "train_image_set_filename": "Train&Test_1/train.txt", + "train_image_folder": "Train&Test_S/images", + "train_annot_folder": "Train&Test_S/anns", + "train_image_set_filename": "Train&Test_S/train.txt", "train_times": 1, "batch_size": 12, @@ -21,8 +21,8 @@ "test": { - "test_image_folder": "Train&Test_1/images", - "test_annot_folder": "Train&Test_1/anns", - "test_image_set_filename": "Train&Test_1/test.txt" + "test_image_folder": "Train&Test_S/images", + "test_annot_folder": "Train&Test_S/anns", + "test_image_set_filename": "Train&Test_S/test.txt" } } diff --git a/config_7_fault_1.json b/config_7_fault_1.json index c692688..4f7524d 100644 --- a/config_7_fault_1.json +++ b/config_7_fault_1.json @@ -6,9 +6,9 @@ }, "train": { - "train_image_folder": "Train&Test_S/images", - "train_annot_folder": "Train&Test_S/anns", - "train_image_set_filename": "Train&Test_S/train.txt", + "train_image_folder": "Train&Test_S/Train/images", + "train_annot_folder": "Train&Test_S/Train/anns", + "train_image_set_filename": "Train&Test_S/Train/train.txt", "train_times": 1, "batch_size": 8, @@ -21,8 +21,8 @@ "test": { - "test_image_folder": "Train&Test_S/images", - "test_annot_folder": "Train&Test_S/anns", - "test_image_set_filename": "Train&Test_S/test.txt" + "test_image_folder": "Train&Test_S/Test/images", + "test_annot_folder": "Train&Test_S/Test/anns", + "test_image_set_filename": "Train&Test_S/Test/test.txt" } } diff --git a/experimento_ssd7_fault_1.h5 b/experimento_ssd7_fault_1.h5 new file mode 100644 index 0000000..e926478 Binary files /dev/null and b/experimento_ssd7_fault_1.h5 differ diff --git a/experimento_ssd7_fault_1_history.npy b/experimento_ssd7_fault_1_history.npy new file mode 100644 index 0000000..4b094dc Binary files /dev/null and b/experimento_ssd7_fault_1_history.npy differ diff --git a/log.csv b/log.csv index 22e8d99..8e60b57 100644 --- a/log.csv +++ b/log.csv @@ -558,3 +558,1264 @@ epoch,loss,val_loss 10,5.376049788070447,4.478155612945557 11,5.385305519176252,7.463174819946289 12,5.432557795986985,4.685098171234131 +0,15.881673319138363,7.375675678253174 +1,8.356029669443766,5.9648237228393555 +2,6.9354832737582255,5.591615676879883 +3,6.576785355237914,5.613137245178223 +4,6.206373137854487,5.805556297302246 +5,5.9745859580078955,5.410709381103516 +6,5.846244939341532,5.404609203338623 +7,5.833713872836587,5.171739101409912 +8,5.6691007133400735,5.144720554351807 +9,5.625074021497929,5.335570812225342 +10,5.514733474762713,5.682697772979736 +11,5.41908200625178,5.01742696762085 +12,5.269865041207877,4.892789840698242 +13,5.1854388362071555,4.693246841430664 +14,5.070313432885776,4.715047836303711 +15,5.0184673756279805,4.756409168243408 +16,4.977027483976604,4.668238162994385 +17,5.03410340784681,4.338890075683594 +18,4.85641484429466,4.351319789886475 +19,4.729835234053148,4.209253311157227 +20,4.702004563905888,4.361381530761719 +21,4.6291504153113925,4.131518363952637 +22,4.540262085492493,4.005181789398193 +23,4.449438978605764,4.169116020202637 +24,4.271978601123072,4.139697551727295 +25,4.221815269501483,3.9072606563568115 +26,4.138884769148658,4.988559722900391 +27,4.074649817287435,3.9170775413513184 +28,4.039631126356906,3.816544532775879 +29,3.869656247728852,3.8606245517730713 +30,3.740425213805986,3.910066843032837 +31,3.783481373812983,3.765537738800049 +32,3.688262802378683,3.7563486099243164 +33,3.742541152709836,3.627863883972168 +34,3.635184340138253,3.604227304458618 +35,3.692311818333348,3.6819028854370117 +36,3.7258855939236257,3.885371446609497 +37,3.7273904234985187,3.726266860961914 +38,3.6165273332465895,3.738358497619629 +39,3.5355864162341124,3.6938042640686035 +40,3.5645025023997157,3.8448326587677 +41,3.575968391563977,3.7542030811309814 +42,3.470710121643316,3.532132148742676 +43,3.5186657977234472,3.734591245651245 +44,3.434543408879792,3.6501681804656982 +45,3.5027135984124538,3.603182077407837 +46,3.4974465122639806,3.6228671073913574 +47,3.4627913902500995,3.6162970066070557 +48,3.4552725420335983,3.7527081966400146 +49,3.4142114777382604,3.5594005584716797 +50,3.4301328944900056,3.5592427253723145 +51,3.3953572599374633,3.7217860221862793 +52,3.4193801964567005,3.540640115737915 +53,3.350956703726537,3.723790407180786 +54,3.332990003216819,3.999763250350952 +55,3.341965870127652,3.7415072917938232 +56,3.3121060902806003,3.5359549522399902 +57,3.3493398931435734,3.6459293365478516 +58,3.5077789500762857,3.5454273223876953 +59,3.3389676799566286,3.502479076385498 +60,3.3368763598824067,3.492331027984619 +61,3.2513954847888216,3.372159004211426 +62,3.270837154310471,3.532351016998291 +63,3.2585178571435995,3.374575138092041 +64,3.1948175169731097,3.0984811782836914 +65,3.2312478830768887,3.2195627689361572 +66,3.160445174339357,3.283327341079712 +67,3.1842058649480016,3.326854944229126 +68,3.199797490637049,3.5159189701080322 +69,3.201327982000824,3.0328762531280518 +70,3.158118564574445,3.1784555912017822 +71,3.0952306761728647,3.324601173400879 +72,3.075337693217015,3.3989617824554443 +73,3.0578822607551115,3.2314391136169434 +74,3.0573137436640683,3.282822370529175 +75,3.167580363536076,3.103592872619629 +76,3.019150211511414,3.3556346893310547 +77,3.063656429503854,3.1094558238983154 +78,3.033766615942973,3.169346570968628 +79,3.0936825425247028,2.9857537746429443 +80,2.8942711060963147,3.137526035308838 +81,2.8340868742004726,3.1282784938812256 +82,2.8098318254361385,3.081157922744751 +83,2.7242096509855513,3.0028302669525146 +84,2.750763719023411,3.0320188999176025 +85,2.688757450854192,3.003370523452759 +86,2.6111973046606827,3.004093885421753 +87,2.6666578301292025,3.010838031768799 +88,2.637845463765775,2.9381844997406006 +89,2.5894111784667344,2.9442050457000732 +90,2.6072447364921465,2.9340615272521973 +91,2.5937541833992213,2.901705741882324 +92,2.5580534402291195,2.869645357131958 +93,2.5461848366812725,2.9039292335510254 +94,2.5516191515766207,2.8995492458343506 +95,2.498734673622194,2.9772801399230957 +96,2.565249661983521,2.8111507892608643 +97,2.4362051444626895,2.876401662826538 +98,2.5065703288086105,2.8407721519470215 +99,2.463761842218342,2.8256804943084717 +100,2.4318306299506642,2.7866525650024414 +101,2.4730073950920834,2.783376693725586 +102,2.4074946801084263,2.754483222961426 +103,2.4527692853427325,2.7466492652893066 +104,2.47984890067285,2.738999128341675 +105,2.430154104323738,2.7358579635620117 +106,2.428105773495846,2.751729965209961 +107,2.3722474903111888,2.735196113586426 +108,2.404930322956324,2.709954023361206 +109,2.449514294582638,2.699392080307007 +110,2.4592020108849217,2.704482316970825 +111,2.4285112759073035,2.7048871517181396 +112,2.3377642214623955,2.7098512649536133 +113,2.3647702747209847,2.703526496887207 +114,2.4030131166572466,2.70902943611145 +115,2.3702982524053646,2.718344211578369 +116,2.450216845855401,2.7063214778900146 +117,2.394759929797305,2.7185616493225098 +118,2.416050981628439,2.7158002853393555 +119,2.4263992910489076,2.699984550476074 +120,2.3861866185710605,2.7090277671813965 +121,2.437077779913209,2.6965367794036865 +122,2.40972381326743,2.700421094894409 +123,2.3781525013557245,2.7079601287841797 +124,2.3352096689854815,2.689880132675171 +125,2.3411187881994637,2.6951041221618652 +126,2.425331566574138,2.674070119857788 +127,2.3668846738794462,2.6877613067626953 +128,2.3641136978887407,2.6803808212280273 +129,2.341672804115254,2.676715135574341 +130,2.435125226531524,2.6694090366363525 +131,2.401895518198975,2.675246238708496 +132,2.3418212398196436,2.663054943084717 +133,2.3654867094722603,2.672536849975586 +134,2.333191567285834,2.692758560180664 +135,2.381863135407991,2.677367687225342 +136,2.3376338680585227,2.6772494316101074 +137,2.384818537033871,2.681792974472046 +138,2.4008382074839414,2.686776876449585 +139,2.342548298054054,2.6794042587280273 +140,2.359852362718504,2.660256862640381 +141,2.371143901380596,2.6548993587493896 +142,2.3068764903506294,2.643223524093628 +143,2.2849913796547,2.6531982421875 +144,2.310250307623632,2.6704165935516357 +145,2.37241519963155,2.6563966274261475 +146,2.2492310796186774,2.652865171432495 +147,2.3437071687843885,2.665412187576294 +148,2.3927333260494503,2.672112464904785 +149,2.314203890208003,2.6558427810668945 +150,2.3442902590988117,2.6429052352905273 +151,2.368868349354124,2.6681411266326904 +152,2.3621096776681636,2.6447041034698486 +153,2.3327624047484643,2.631779432296753 +154,2.2965333676729047,2.644305944442749 +155,2.3604985660688103,2.644063949584961 +156,2.2805610703512498,2.6224019527435303 +157,2.332057881550711,2.614557981491089 +158,2.3205046358160493,2.620645046234131 +159,2.321278472687308,2.6014182567596436 +160,2.378242120391033,2.618088722229004 +161,2.3390923598806603,2.631119966506958 +162,2.3737238246023815,2.6258726119995117 +163,2.3116003265146348,2.5996532440185547 +164,2.347205031470317,2.592092752456665 +165,2.2968043868483248,2.594717025756836 +166,2.3139396606247282,2.5825107097625732 +167,2.336645218916745,2.5698466300964355 +168,2.3103573867995344,2.585277557373047 +169,2.3577535035180266,2.570096015930176 +170,2.2756100178414536,2.579211950302124 +171,2.351941598208789,2.5718772411346436 +172,2.294573994607873,2.5573418140411377 +173,2.2888349106266324,2.564650535583496 +174,2.303667943549091,2.568376302719116 +175,2.3010128199077045,2.5709242820739746 +176,2.319495083200834,2.5369129180908203 +177,2.298347094403301,2.5417861938476562 +178,2.2868030595649134,2.55072021484375 +179,2.289347357581032,2.5314292907714844 +180,2.272905391308527,2.559802293777466 +181,2.2744739179402753,2.530998468399048 +182,2.321645053595873,2.547484874725342 +183,2.228498441971615,2.559534788131714 +184,2.3228542234728247,2.5471041202545166 +185,2.345108349901454,2.572962522506714 +186,2.3126381203654027,2.5463387966156006 +187,2.301704987150724,2.5498929023742676 +188,2.28135520268526,2.566157341003418 +189,2.257599696151567,2.5368130207061768 +190,2.2937986059267015,2.538170337677002 +191,2.2647079846514666,2.543246030807495 +192,2.2723923924833294,2.5171337127685547 +193,2.29328614864193,2.5373051166534424 +194,2.2842376920767635,2.519249200820923 +195,2.2629963032880984,2.5084049701690674 +196,2.3420118715593725,2.5499958992004395 +197,2.2209068046278784,2.4976320266723633 +198,2.2666890591951416,2.5370054244995117 +199,2.282095333266128,2.511399030685425 +200,2.2849978916651548,2.480984687805176 +201,2.2708060121016542,2.5116794109344482 +202,2.2177025431492288,2.534011125564575 +203,2.3199355793908767,2.537384271621704 +204,2.23185184411197,2.4815635681152344 +205,2.224377914530332,2.5245752334594727 +206,2.2382957029082795,2.496988296508789 +207,2.3195596280474753,2.5283453464508057 +208,2.2056773314710525,2.5045931339263916 +209,2.241046477728384,2.518791675567627 +210,2.3311104992105136,2.523860454559326 +211,2.319528997595844,2.5051522254943848 +212,2.266416108575764,2.512946128845215 +213,2.2148420950372474,2.5479562282562256 +214,2.2348017604624637,2.527203321456909 +215,2.2678729760224554,2.5218818187713623 +216,2.2378323438706773,2.5374791622161865 +217,2.259658019073674,2.5319032669067383 +218,2.2484022560821244,2.5194995403289795 +219,2.2290094792030812,2.52644681930542 +220,2.216770108931703,2.519836664199829 +221,2.2232757134398584,2.5085196495056152 +222,2.2392864824965475,2.4828426837921143 +223,2.261641844699943,2.5009422302246094 +224,2.2119862633764904,2.4980099201202393 +225,2.225164644399846,2.478156805038452 +226,2.25463686842736,2.4626948833465576 +227,2.2056115011428292,2.4645938873291016 +228,2.2580598112023167,2.4577479362487793 +229,2.2683002541625434,2.463399887084961 +230,2.1898214066710717,2.48294734954834 +231,2.186906837962304,2.4623465538024902 +232,2.1840648924718136,2.484057903289795 +233,2.2075639472670385,2.4435806274414062 +234,2.173723088298247,2.4906272888183594 +235,2.206345216498349,2.4778432846069336 +236,2.21625816822052,2.4629242420196533 +237,2.238000662515209,2.4257633686065674 +238,2.247759646730996,2.4472618103027344 +239,2.1834110583531436,2.44305419921875 +240,2.226325114351527,2.4601502418518066 +241,2.250926129804934,2.456170082092285 +242,2.1825862064023758,2.478611469268799 +243,2.2289474913469776,2.484898805618286 +244,2.168356116026477,2.4765846729278564 +245,2.2312184936019315,2.5027430057525635 +246,2.2083806241240747,2.4649839401245117 +247,2.2744261032896613,2.45207142829895 +248,2.241567925796197,2.48469614982605 +249,2.1866571848009198,2.4815423488616943 +250,2.179433058845541,2.45687198638916 +251,2.2273726570508785,2.4826548099517822 +252,2.244199167480261,2.4716646671295166 +253,2.12715442161091,2.473435163497925 +254,2.1504420029045126,2.4661874771118164 +255,2.220137847866609,2.4508512020111084 +256,2.243177991095788,2.4528560638427734 +257,2.1630029749805337,2.4285261631011963 +258,2.1692954727349556,2.424243450164795 +259,2.156670632909556,2.4161109924316406 +260,2.1694128562059323,2.4259090423583984 +261,2.2132842505010664,2.445253849029541 +262,2.1043895505816557,2.481628894805908 +263,2.1972251714737605,2.4537289142608643 +264,2.184383065564106,2.4478919506073 +265,2.193652620081042,2.465968608856201 +266,2.1848305752881543,2.4388301372528076 +267,2.212471119714368,2.460390090942383 +268,2.2176272527116243,2.448953151702881 +269,2.1645759503263218,2.4426918029785156 +270,2.2112150718470684,2.415754795074463 +271,2.1126985790951003,2.439558506011963 +272,2.202685675763954,2.448338031768799 +273,2.198804367140788,2.430708408355713 +274,2.200688388829674,2.4546713829040527 +275,2.2317753140218253,2.412391424179077 +276,2.1846055692158215,2.41095232963562 +277,2.184584537164761,2.4286105632781982 +278,2.202254692605146,2.426581382751465 +279,2.1737632475367032,2.4448604583740234 +280,2.1700449315576607,2.4267237186431885 +281,2.150391628371922,2.4362635612487793 +282,2.1463815629969503,2.4204530715942383 +283,2.13742690366474,2.4749083518981934 +284,2.173257542241172,2.447052478790283 +285,2.1585336913854616,2.4463326930999756 +286,2.1532440009664318,2.4754011631011963 +287,2.113059232605251,2.4033255577087402 +288,2.1299038381602524,2.434706926345825 +289,2.1646835790957257,2.4333736896514893 +290,2.1802718584154217,2.4344568252563477 +291,2.1792663855513696,2.448399305343628 +292,2.1992101672568607,2.4092636108398438 +293,2.172026732961878,2.4833426475524902 +294,2.1431665446517902,2.4724440574645996 +295,2.098902832289211,2.4684951305389404 +296,2.1313877063486166,2.426060438156128 +297,2.174288471648089,2.4211394786834717 +298,2.134814486803253,2.4089856147766113 +299,2.132476067997779,2.4622888565063477 +300,2.174833444548563,2.459872007369995 +301,2.1933644690800236,2.4427649974823 +302,2.1038804327109855,2.440248966217041 +303,2.1334403860471554,2.4646315574645996 +304,2.147203309939859,2.4213626384735107 +305,2.1749110611647935,2.444471836090088 +306,2.139680826696453,2.438563585281372 +307,2.097625986148751,2.4429707527160645 +308,2.0973880443650956,2.4618008136749268 +309,2.1080817754002292,2.412468910217285 +310,2.1265082678508236,2.4152474403381348 +311,2.182172478707025,2.3989200592041016 +312,2.181772105375493,2.4245193004608154 +313,2.149027172659264,2.4514312744140625 +314,2.134376917288154,2.4353504180908203 +315,2.1214422033008504,2.420356273651123 +316,2.1375286419534945,2.3919239044189453 +317,2.1627947150524047,2.401273488998413 +318,2.136007459676883,2.438209295272827 +319,2.171851704355146,2.397061347961426 +320,2.1420612689259917,2.3843002319335938 +321,2.1074114687111463,2.4187633991241455 +322,2.163180526814174,2.3858251571655273 +323,2.1588323496343005,2.3975257873535156 +324,2.142024565457648,2.420259475708008 +325,2.1163445476625786,2.4071192741394043 +326,2.0462252054292436,2.4162936210632324 +327,2.1302104012517904,2.404348373413086 +328,2.1124283234929777,2.410168409347534 +329,2.0618336938057675,2.4502975940704346 +330,2.1591057442839205,2.387420654296875 +331,2.050837726215196,2.4484050273895264 +332,2.1123903778658253,2.4090919494628906 +333,2.146318983966713,2.459496259689331 +334,2.1166323859834932,2.416254997253418 +335,2.100630650078568,2.4175143241882324 +336,2.1549097039719043,2.4146695137023926 +337,2.102922279978059,2.4363322257995605 +338,2.1383343119712226,2.4137954711914062 +339,2.095107249732888,2.4105234146118164 +340,2.089595561796199,2.422175407409668 +341,2.0939173464554206,2.453106641769409 +342,2.1504876603872316,2.44063138961792 +343,2.1138383817803015,2.418400287628174 +344,2.1212830618226883,2.3929786682128906 +345,2.085264908520337,2.4446115493774414 +346,2.093044743186138,2.3986618518829346 +347,2.0902120946863367,2.4301929473876953 +348,2.1279743084790597,2.414482593536377 +349,2.082658919480329,2.4016337394714355 +350,2.0964599058478663,2.429827928543091 +351,2.1117636358380643,2.39005446434021 +352,2.0805975723136316,2.4364752769470215 +353,2.096475504399645,2.4296374320983887 +354,2.0738446257744565,2.4054207801818848 +355,2.0971355500117026,2.42106556892395 +356,2.1534976549954115,2.4047751426696777 +357,2.0589033150867806,2.404226303100586 +358,2.0825968466169846,2.4166862964630127 +359,2.143816816059705,2.4166104793548584 +360,2.080436834847245,2.3937325477600098 +361,2.1109501712960625,2.381377935409546 +362,2.0937988261111102,2.362335205078125 +363,2.0670566594568194,2.394319534301758 +364,2.0890236630465817,2.4385342597961426 +365,2.112520248428677,2.4278557300567627 +366,2.1035674212738993,2.385653018951416 +367,2.0741854065754373,2.400710105895996 +368,2.1640698061976837,2.379450798034668 +369,2.0957691013325785,2.377753496170044 +370,2.1035011755964144,2.4017953872680664 +371,2.0762595828287607,2.3996572494506836 +372,2.09006932808203,2.409036159515381 +373,2.0946829514425307,2.4017202854156494 +374,2.0903123938744983,2.393583059310913 +375,2.1129291356421946,2.405625820159912 +376,2.0860091079779663,2.4471607208251953 +377,2.0724047441898317,2.398470163345337 +378,2.0926668656944254,2.3763267993927 +379,2.1011560559272766,2.3724403381347656 +380,2.065736650770951,2.3865044116973877 +381,1.984386580191776,2.3686935901641846 +382,2.027756544735914,2.3918991088867188 +383,2.1103926172698224,2.3729119300842285 +384,2.114878270216794,2.354022264480591 +385,2.0559931452808486,2.3669590950012207 +386,2.1117109651461607,2.375464677810669 +387,2.053718146251398,2.379971742630005 +388,2.0553464273937414,2.3885421752929688 +389,2.1115769232326373,2.3598856925964355 +390,2.0650736262428016,2.3714187145233154 +391,2.010359362500613,2.4156394004821777 +392,2.1493085564319703,2.3745031356811523 +393,2.0472259865794586,2.393484115600586 +394,2.108329564495816,2.4125680923461914 +395,2.031235399947829,2.4481089115142822 +396,2.0780509476440803,2.390174388885498 +397,2.1258152589771915,2.409898519515991 +398,2.062787445754381,2.3668434619903564 +399,2.0658394254195915,2.364100694656372 +400,2.082701088626528,2.4076836109161377 +401,2.0622011406869913,2.387784957885742 +402,2.0179976114460167,2.381817579269409 +403,2.0744506042511737,2.4052798748016357 +404,2.0625546998483935,2.410189628601074 +405,2.0189623001160997,2.341459274291992 +406,2.0615276941836207,2.392237424850464 +407,2.100911670549689,2.3671202659606934 +408,2.0555681805519708,2.4154958724975586 +409,2.088665930951228,2.3768904209136963 +410,2.0403145697850946,2.4380381107330322 +411,2.0274787020618326,2.3742964267730713 +412,2.0766367140363475,2.427424192428589 +413,2.089307167224728,2.361572027206421 +414,2.049872740412928,2.3785154819488525 +415,2.071720056520785,2.38339900970459 +416,2.0389311157715095,2.3745739459991455 +417,2.056290562536151,2.36765193939209 +418,2.0997031565572395,2.3601438999176025 +419,2.0195827308727545,2.3894307613372803 +420,2.04706708157095,2.3623979091644287 +421,2.0556617903579126,2.390589475631714 +422,2.0864955446700635,2.394789457321167 +423,2.0458761960349228,2.385775566101074 +424,2.060648931505902,2.4122469425201416 +425,2.0250966457969812,2.366981029510498 +426,2.0134673264760736,2.39845609664917 +427,2.053281574301381,2.399228572845459 +428,2.0455518723833466,2.4607012271881104 +429,2.0645496699075934,2.413451910018921 +430,2.022797135381751,2.3874261379241943 +431,1.9962005153988622,2.401998519897461 +432,2.0094156008650237,2.3511033058166504 +433,2.0203185185708636,2.378697395324707 +434,1.9862256634787578,2.3683369159698486 +435,2.058259401724189,2.3521006107330322 +436,2.048370451875072,2.371403932571411 +437,2.0412823260642528,2.3868401050567627 +438,2.0428282698103777,2.3805932998657227 +439,2.100702221927747,2.3675994873046875 +440,2.0556316028174004,2.4117469787597656 +441,1.995883980304084,2.431476593017578 +442,2.0165477238717626,2.4066131114959717 +443,2.029742553058697,2.4046456813812256 +444,2.0639684180797606,2.3749711513519287 +445,2.0325737882181594,2.378682851791382 +446,2.0494681642230916,2.3742425441741943 +447,2.0508506782048403,2.3607211112976074 +448,2.047786170016221,2.363994836807251 +449,2.0173565867161556,2.427478075027466 +450,2.0003817178897703,2.368389129638672 +451,2.0559441404915897,2.35809326171875 +452,2.0343062146807886,2.3709330558776855 +453,2.0677210751931088,2.3356316089630127 +454,2.0293530922769847,2.385078191757202 +455,2.025521018849407,2.343632936477661 +456,2.026231319118261,2.415574789047241 +457,1.9881544090359589,2.354979991912842 +458,2.0491763632693796,2.3609724044799805 +459,1.998161525752304,2.4260621070861816 +460,2.020299034691899,2.36303448677063 +461,1.990207225815152,2.3737518787384033 +462,2.0696349751397114,2.358226776123047 +463,1.9956797439543927,2.3426735401153564 +464,2.0031369869325726,2.4339914321899414 +465,2.065352822844274,2.364129066467285 +466,1.9823655652218177,2.3501689434051514 +467,1.9868480155513462,2.3670759201049805 +468,2.0430417158298337,2.4298572540283203 +469,2.0137414893165966,2.3355565071105957 +470,2.0798689367335887,2.4133965969085693 +471,2.031592505829211,2.383260488510132 +472,1.9859629380898398,2.380925178527832 +473,2.0613049320693886,2.3997981548309326 +474,1.9877038537968732,2.3530142307281494 +475,2.0156059978438203,2.4219415187835693 +476,1.9796217975564483,2.358930826187134 +477,2.0257577356915384,2.3678383827209473 +478,2.042026695006532,2.39223051071167 +479,1.965091101804936,2.379812479019165 +480,2.0264260518778245,2.385875701904297 +481,1.9954212575010914,2.3501858711242676 +482,1.9920498402307079,2.3712189197540283 +483,2.0072856491852846,2.361006498336792 +484,2.0266711444802623,2.348459005355835 +485,2.0073979719458226,2.3520631790161133 +486,2.024714571578626,2.3896994590759277 +487,2.020155272848619,2.388653516769409 +488,2.02148582792412,2.4082274436950684 +489,2.029456819435556,2.411524772644043 +490,2.0169413369861457,2.332989454269409 +491,2.009509516022186,2.3919973373413086 +492,2.0300684468947576,2.3671255111694336 +493,1.9827609993720967,2.3443689346313477 +494,2.0101036853296557,2.3845434188842773 +495,1.9646477215296565,2.3363873958587646 +496,1.9630032334822776,2.364137649536133 +497,1.9886632391152654,2.362448215484619 +498,1.983261665791192,2.3823893070220947 +499,2.054877500390746,2.358912467956543 +0,98.59305665018773,102.67535400390625 +1,59.74460257337393,69.42134094238281 +2,44.86596101766062,63.32090759277344 +3,35.872177591765606,55.52248001098633 +0,18.309019000393818,48.556888580322266 +1,18.44688238081385,46.15229034423828 +2,18.6424168358057,44.146522521972656 +3,17.775085828609622,42.220306396484375 +4,17.390023000904772,39.91164016723633 +5,16.00797714914223,37.843387603759766 +6,16.834486633945225,36.26311111450195 +7,16.254171908227473,34.567630767822266 +8,17.244038839106338,32.61912536621094 +9,15.418105769872016,30.702686309814453 +10,15.283213965879764,29.637784957885742 +11,15.543550244469085,28.732946395874023 +12,15.461333039670938,27.561737060546875 +13,14.470104773839315,25.995304107666016 +14,14.397848586620363,25.20619010925293 +15,14.351592158751526,24.72223663330078 +16,14.711095720040994,23.90822982788086 +17,15.49972624739769,25.061656951904297 +18,14.707511304184916,23.161535263061523 +19,14.117039993161061,23.228193283081055 +20,14.614822104451441,23.57022476196289 +21,14.431401229359473,23.380224227905273 +22,14.971145454000254,23.094696044921875 +23,14.064097729300933,23.52645492553711 +24,15.161434326899474,23.961524963378906 +25,14.55630746304663,23.57516860961914 +26,13.792699972355399,23.760412216186523 +27,14.849537603861629,23.813405990600586 +28,13.70066730441943,25.10789680480957 +29,14.333727941850876,23.716938018798828 +30,14.440530849737433,23.53561019897461 +31,14.28217526342048,23.507152557373047 +32,12.647278059405917,23.768409729003906 +33,13.099475806025783,24.208541870117188 +34,15.241013675439554,24.331371307373047 +35,14.262424488483397,25.370960235595703 +36,13.18557945064368,24.5015869140625 +37,13.275160759524569,21.921960830688477 +38,14.385775838950675,21.712862014770508 +39,14.244441561218178,22.67209243774414 +40,14.121157838998597,22.206228256225586 +41,13.650420477344815,22.11162757873535 +42,14.500104839210614,22.99717140197754 +43,14.2165450984663,24.298847198486328 +44,13.318035685723745,23.260610580444336 +45,13.276212863766204,22.914810180664062 +46,11.975583071265715,21.605653762817383 +47,12.860350167069189,22.10746955871582 +48,13.742263346991681,23.20090675354004 +49,13.067719727917448,22.66254997253418 +50,13.162811701564113,24.672523498535156 +51,12.291338499625308,24.674636840820312 +52,13.50450067963105,23.008262634277344 +53,13.685628709091478,24.737361907958984 +54,13.604871452341937,23.815664291381836 +55,13.475675605033917,24.060890197753906 +56,12.136713381359298,21.99675941467285 +57,13.538001663353528,25.221912384033203 +58,13.394955258551843,23.42654800415039 +59,13.088826469244685,25.3453311920166 +60,13.17103674106442,24.126598358154297 +61,13.67201107186698,23.89624786376953 +62,13.409336201826298,22.88789939880371 +63,14.935741019183999,21.297508239746094 +64,12.881717027862216,27.551223754882812 +65,13.010794503162602,24.175630569458008 +66,13.606789347261435,21.52002716064453 +67,13.322744207955449,21.314823150634766 +68,12.988979112224943,20.956754684448242 +69,12.861498780731285,22.018224716186523 +70,13.467781065591698,21.03614044189453 +71,13.793220890315418,21.718149185180664 +72,14.535490579111375,25.06163215637207 +73,12.491569804363563,25.4558162689209 +74,13.098640551034372,28.079750061035156 +75,13.051791143027573,35.31846237182617 +76,13.608522934991806,27.242931365966797 +77,12.552488796067822,26.436803817749023 +78,12.741128630469216,26.469478607177734 +79,13.435135390589146,26.389610290527344 +80,12.502585743688433,25.295583724975586 +81,13.460709710861746,22.354663848876953 +82,13.473579742869394,28.75642967224121 +83,13.62366780533128,28.55095672607422 +84,12.72436131573503,25.338420867919922 +85,12.602442957664447,22.18197250366211 +86,13.038902096917258,20.929941177368164 +87,12.360940948818945,22.274267196655273 +88,12.187521964474454,20.815202713012695 +89,12.765757199529082,26.31867027282715 +90,13.314240360779724,26.03536605834961 +91,13.787995429638306,25.777345657348633 +92,12.435528290044385,23.9021053314209 +93,13.351185734980113,21.037948608398438 +94,12.573928969805358,20.930835723876953 +95,12.660676067466632,21.299793243408203 +96,13.380145145696906,21.899520874023438 +97,12.96967354758841,23.779373168945312 +98,12.562040409535088,20.954404830932617 +99,12.894259374862795,22.497034072875977 +100,12.28375488421956,22.768707275390625 +101,13.19599585702049,22.168865203857422 +102,12.83613539651564,21.8887882232666 +103,12.392621870249346,21.657922744750977 +104,12.007316490609899,21.15534210205078 +105,11.439029240153467,21.094539642333984 +106,12.4888472908833,21.006845474243164 +107,12.541115881636618,21.025697708129883 +108,11.75819817031112,21.007125854492188 +109,13.488317664203748,20.8796443939209 +110,13.162211787148458,21.3548641204834 +111,13.50965756486482,23.215757369995117 +112,12.104070092810959,23.874469757080078 +113,11.557300607904752,21.922216415405273 +114,13.346384371983582,21.47045135498047 +115,12.122838088072063,21.169769287109375 +116,12.80381063934243,21.738086700439453 +117,12.218986551508268,21.439422607421875 +118,13.80027027859714,21.974058151245117 +119,11.905491190970107,23.47684097290039 +120,11.998342666054292,22.2279052734375 +121,11.598589907578432,21.575653076171875 +122,12.589943219270628,22.153242111206055 +123,12.423618881838848,24.137680053710938 +124,12.184386738011094,25.509050369262695 +125,12.137922134971099,25.158498764038086 +126,12.383320503078949,23.07334327697754 +127,11.444705577495972,23.18815040588379 +128,12.812143839022768,24.671533584594727 +129,11.824597782270136,26.324817657470703 +130,12.543570866350267,25.01198387145996 +131,12.095618637770983,26.358322143554688 +132,12.749210738356172,27.999706268310547 +133,11.570505466617522,24.733169555664062 +134,12.119708726451572,24.135828018188477 +135,11.885237228643017,24.678308486938477 +136,12.225352173945943,28.241289138793945 +137,11.285812212920643,24.589387893676758 +138,12.662256227851888,24.041120529174805 +139,12.057718864555568,23.269636154174805 +140,12.150592685720252,26.281225204467773 +141,12.743933328165996,26.088075637817383 +142,11.818692466600345,25.507911682128906 +143,12.664263073040289,25.876575469970703 +144,11.374854905079106,28.0947265625 +145,12.045808944545808,24.39802360534668 +146,12.350641342859502,24.20514488220215 +147,12.133706365684073,28.945968627929688 +148,12.086499341850072,28.684898376464844 +149,12.242362804568756,28.639358520507812 +150,13.080301504369002,27.837602615356445 +151,11.619713161812454,27.825654983520508 +152,12.120855842039436,27.68004608154297 +153,11.476226785852084,28.017160415649414 +154,12.752698602572165,28.17348861694336 +155,10.913811762911097,28.300107955932617 +156,11.850735255093277,28.392166137695312 +0,18.259778742569345,9.929030418395996 +1,9.519550451164037,7.887963771820068 +2,8.593435066597339,7.922017574310303 +3,8.22241302666937,7.275017738342285 +4,7.837418607023896,7.262777805328369 +5,7.6982389392904755,7.102909564971924 +6,7.904128151303741,6.649874210357666 +7,7.445181189990435,7.934605598449707 +8,7.281032003563821,6.322686195373535 +9,7.4331981687519795,6.303368091583252 +10,7.236770662453656,6.831284999847412 +11,7.031833540841085,6.271876811981201 +12,6.835178650692308,6.176564693450928 +13,6.969886094494596,6.355589866638184 +14,7.108205158639019,7.318484783172607 +15,6.855620966295455,6.014745235443115 +16,6.8220756666256435,6.05953311920166 +17,6.743036478027011,6.857476234436035 +18,6.866922932034942,5.772882461547852 +19,6.677296779194816,6.178677558898926 +20,6.617641378813284,5.825047492980957 +21,6.405501651504059,6.377925395965576 +22,6.432463073991036,5.74095344543457 +23,6.700751799653596,5.983301162719727 +24,6.317921877556991,5.8078203201293945 +25,6.355279603291079,5.78232479095459 +26,6.248643867326368,6.040361404418945 +27,6.163193216765609,5.554721832275391 +28,6.376748040725625,5.6859540939331055 +29,6.427805097616336,5.576755523681641 +30,6.186310397831556,5.3413262367248535 +31,6.1785479295449175,5.752665042877197 +32,6.08522281204972,5.741584300994873 +33,6.080919902396137,5.46055793762207 +34,6.107214878165657,5.826045513153076 +35,6.040591512778799,5.337527751922607 +36,5.90760734230686,5.828795433044434 +37,5.9857503012881255,5.479652404785156 +38,5.990311868184269,5.382695198059082 +39,5.982991996837897,5.549236297607422 +40,5.888478049815027,5.2697434425354 +41,5.730013313345428,5.4604620933532715 +42,5.922563552856445,5.373895645141602 +43,5.794673295620361,5.50067663192749 +44,5.791320525333083,5.349364757537842 +45,5.8605682089803,5.236660480499268 +46,5.803195549490673,5.260746002197266 +47,5.91418048796277,5.1766767501831055 +48,5.791901231136894,5.193906307220459 +49,5.747705918192212,5.250124931335449 +50,5.716505645731164,5.349728107452393 +51,5.686270097945626,5.304551124572754 +52,5.71594696357602,5.4308061599731445 +53,5.776020726975693,5.183679103851318 +54,5.630546247601834,5.128717422485352 +55,5.765814787703134,5.550536632537842 +56,5.870920959545416,5.3058905601501465 +57,5.631493537237599,5.3277506828308105 +58,5.619446886041777,5.398120403289795 +59,5.479598475412062,5.164195537567139 +60,5.615070184504954,5.259687423706055 +61,5.675869690264509,5.356075763702393 +62,5.586595380663547,5.428970813751221 +63,5.535050954740768,5.260526180267334 +64,5.588672056875594,5.744375705718994 +65,5.9720169231092575,5.38411283493042 +66,6.34545110291941,5.5751729011535645 +67,5.851850350697835,5.561216354370117 +68,5.925328687361215,5.307160377502441 +69,5.636172194571846,5.352664470672607 +70,5.559000596322648,5.175745010375977 +71,5.737387068589961,5.450139045715332 +72,5.793440182137554,5.211631774902344 +73,5.851182565011613,5.410022258758545 +74,5.551975069643691,5.467459678649902 +75,5.725226681628734,5.425684452056885 +76,5.8584900775242374,5.036645889282227 +77,5.717840255768487,5.346608638763428 +78,5.612494660982969,5.4594550132751465 +79,5.561546131561363,5.379159450531006 +80,5.596457880264407,5.110055923461914 +81,5.701549783389639,5.218481063842773 +82,5.427473534651793,5.031703948974609 +83,5.689141464493256,5.228032112121582 +84,5.483385646050892,5.066976070404053 +85,5.440537730201346,5.101001262664795 +86,5.5524904656475185,5.0327534675598145 +87,5.721621188545747,5.101315021514893 +88,5.82499321953195,5.101744651794434 +89,5.410742948100742,4.974808692932129 +90,5.60546575904867,5.0701141357421875 +91,5.522617342693558,5.143996238708496 +92,5.509673934541541,5.101085186004639 +93,5.394866431441554,5.175982475280762 +94,5.436023928428608,5.055948734283447 +95,5.3682472348538015,5.142648220062256 +96,5.443277496732873,5.088404655456543 +97,5.312855829958056,5.071862697601318 +98,5.414841588251597,5.1160688400268555 +99,5.469365494127819,5.21682071685791 +100,5.231376104667539,4.966191291809082 +101,5.149707334242985,4.91472053527832 +102,5.100584798028099,4.948541641235352 +103,5.10691579964643,4.941493988037109 +104,5.0996057759838465,4.95589017868042 +105,5.235353313934576,4.951779365539551 +106,5.066499119899312,4.908158302307129 +107,5.032381486502915,4.903717517852783 +108,5.057555149296649,4.898251533508301 +109,5.232942401385698,4.930909156799316 +110,5.04716633321154,4.918262481689453 +111,5.0727280336114955,4.921173572540283 +112,5.111494499477533,4.959497451782227 +113,5.0436797206993,4.951436996459961 +114,5.109944956828853,4.926906585693359 +115,5.108078298672952,4.895373821258545 +116,5.147764729543993,4.883907794952393 +117,5.027682178351795,4.88873815536499 +118,5.232755098186556,4.939098358154297 +119,5.113609593310863,4.858057975769043 +120,5.2598363606091745,4.893451690673828 +121,5.115374277198249,4.885416030883789 +122,5.0912700504957815,4.886414527893066 +123,5.261337017818108,4.886699676513672 +124,4.986728421977309,4.8646955490112305 +125,5.01597070434113,4.883338451385498 +126,5.018451273603725,4.859218597412109 +127,5.24298388059022,4.853057384490967 +128,5.153382320819824,4.8970537185668945 +129,5.01966350020115,4.92483377456665 +130,5.03767493904614,4.909350395202637 +131,4.998312768234544,4.877500057220459 +132,4.921524446731692,4.8684773445129395 +133,5.087724189289281,4.847572326660156 +134,5.002303599011995,4.859246253967285 +135,4.997283406738364,4.858850955963135 +136,4.949215302701856,4.8523783683776855 +137,4.985402325518449,4.878350734710693 +138,4.977534065454467,4.852563858032227 +139,4.946527163187663,4.871365547180176 +140,4.990887565248993,4.865853309631348 +141,4.970032203424854,4.865931034088135 +142,5.154248112537822,4.882564067840576 +143,4.93070945427918,4.844860076904297 +144,4.945318183067384,4.883275032043457 +145,5.090561025129642,4.858691215515137 +146,4.939322018168602,4.798882484436035 +147,4.980155556338359,4.882704734802246 +148,5.0358219850258745,4.8594794273376465 +149,4.944445402160977,4.808207988739014 +150,4.968069588455907,4.82637882232666 +151,5.0274332403485245,4.8173112869262695 +152,4.869860725766632,4.83699893951416 +153,4.9285883708610845,4.844727993011475 +154,4.9582050341726,4.835329532623291 +155,4.9290011689188695,4.855822563171387 +156,4.930994071492707,4.842042922973633 +157,4.890503271029947,4.827426910400391 +158,4.924265080641661,4.821841716766357 +159,4.991001563111183,4.841434001922607 +160,4.9120256718390625,4.8304853439331055 +161,4.990477554154981,4.832113265991211 +162,4.991890601955903,4.838512420654297 +163,5.111289652318902,4.8492751121521 +164,5.215101544798557,4.850467205047607 +165,4.93200325511132,4.8572845458984375 +166,4.841474762379797,4.8404221534729 +167,5.068453673121065,4.8330793380737305 +168,4.88805607034335,4.849000453948975 +169,4.899694105315078,4.836804389953613 +170,4.881635430723185,4.850560665130615 +171,4.8781559967539945,4.851832866668701 +172,4.915134587574526,4.838128089904785 +173,5.015505143667112,4.844712257385254 +174,4.900085629819199,4.830416202545166 +175,4.914414856603237,4.831276893615723 +176,4.948342922273059,4.814130783081055 +177,4.96644226276907,4.836696624755859 +178,4.94022220731433,4.851717472076416 +179,4.917827520448441,4.852512359619141 +180,4.909715827869134,4.8446044921875 +181,4.88787701351395,4.8482770919799805 +182,4.920659625237904,4.842310428619385 +183,4.869795354900308,4.83669376373291 +184,4.872525086168383,4.853926181793213 +185,5.051385467643634,4.849937915802002 +186,4.936169325493337,4.848665714263916 +187,5.034477050187157,4.849297523498535 +188,4.913518890048243,4.8421220779418945 +189,4.892289945150266,4.851822376251221 +190,4.8864359282405,4.859631061553955 +191,4.895977294412556,4.848063945770264 +192,4.906855263567101,4.850001811981201 +193,4.896345393905222,4.860968589782715 +194,4.918606493063779,4.8393731117248535 +195,4.9483566946814435,4.858851432800293 +196,5.088649546513792,4.856363773345947 +197,4.973392535945051,4.850390434265137 +198,4.882633488574535,4.858846664428711 +199,4.911709769827421,4.848233699798584 +200,4.859976209801614,4.852334976196289 +201,5.075364809270126,4.870742321014404 +202,4.855852528347995,4.848693370819092 +203,5.076912625284221,4.844854831695557 +204,4.911325110401704,4.843681335449219 +205,4.85277503435729,4.852741718292236 +206,4.890992233473859,4.850731372833252 +207,5.0608500057085335,4.860933303833008 +208,4.884364619281123,4.81980562210083 +209,4.900987949943023,4.839620113372803 +210,4.888756459675303,4.8500189781188965 +211,4.857678873291433,4.83669900894165 +212,5.007931692398861,4.824719429016113 +213,4.930270869335621,4.842092514038086 +214,4.868548131379925,4.840893268585205 +215,4.993460724074445,4.823753356933594 +216,4.912491189036772,4.851287364959717 +217,4.93938004253992,4.8285651206970215 +218,4.901247680674457,4.8402838706970215 +219,4.969941864221557,4.843581199645996 +220,4.903693309898585,4.8558855056762695 +221,4.9177893955636085,4.8537373542785645 +222,4.879750789673516,4.84989595413208 +223,4.992566624625784,4.830425262451172 +224,4.965146363593577,4.854353904724121 +225,5.0914665359892055,4.83690071105957 +226,4.877138202959071,4.845240592956543 +227,4.93432723598844,4.840631484985352 +228,4.950834861568274,4.841284275054932 +229,4.879612242589231,4.830111980438232 +230,5.0186355432307685,4.83534049987793 +231,4.980635527369112,4.845816612243652 +232,4.994773894711271,4.851962089538574 +233,4.881918875982716,4.843675136566162 +234,4.9577154307664255,4.853140354156494 +235,4.867363338262005,4.844918727874756 +236,4.887648978740058,4.855035781860352 +237,4.900452846402368,4.84181547164917 +238,4.899871840503047,4.832566738128662 +239,4.877236967840377,4.8391804695129395 +240,4.9297842810524255,4.835505962371826 +241,4.821750216145333,4.8583550453186035 +242,4.8990566178303645,4.8382182121276855 +243,4.854262766461281,4.839243412017822 +244,4.872041409132911,4.845248699188232 +245,4.8647496070134215,4.855406761169434 +246,4.820814056032685,4.8309550285339355 +247,4.86916908931211,4.843050003051758 +248,4.880884844860524,4.846562385559082 +249,4.971935702279738,4.84993839263916 +250,4.875431402133462,4.835239887237549 +251,4.88864963256046,4.839564323425293 +252,4.874714837087272,4.836103916168213 +253,4.966849255431545,4.8505988121032715 +254,4.821245162298634,4.8421854972839355 +255,4.870108482298474,4.836266040802002 +256,4.8368028513069365,4.848062038421631 +257,4.907972465743811,4.826174736022949 +258,4.88184212728807,4.852370738983154 +259,4.860553479585492,4.824774742126465 +260,5.08448203448054,4.825503349304199 +261,5.117114801172989,4.832688808441162 +262,4.919915911929855,4.820897102355957 +263,4.869587234320368,4.844525337219238 +264,4.852232298993934,4.828740119934082 +265,4.857707299821364,4.840968608856201 +266,4.85592795457762,4.8268280029296875 +267,4.929969645975721,4.813381671905518 +268,4.853955477313265,4.833334922790527 +269,4.942545142420631,4.825384616851807 +270,4.9631899708947955,4.8280229568481445 +271,4.870972501775606,4.827029705047607 +272,5.055289737535108,4.8091912269592285 +273,4.956857171955161,4.836847305297852 +274,4.838273826192637,4.819296360015869 +275,4.872252709859073,4.831906318664551 +276,4.885246938190928,4.8259663581848145 +277,4.866082237066467,4.826266288757324 +278,4.941213596094531,4.807991027832031 +279,4.884289272474658,4.827019691467285 +280,4.86485021101321,4.82530403137207 +281,4.855879843397426,4.818278789520264 +282,4.964471398647214,4.825504302978516 +283,4.934766606554959,4.838627815246582 +284,4.85977101390953,4.8292365074157715 +285,4.828259992989272,4.836241722106934 +286,4.939565824029224,4.826103687286377 +287,4.838781385395767,4.828159809112549 +288,4.965819847356396,4.845649719238281 +289,4.837829737063966,4.830915451049805 +290,4.84477157228974,4.82660436630249 +291,4.911343718744429,4.82383394241333 +292,4.873568091887594,4.822254180908203 +293,4.913670222830707,4.846660614013672 +294,4.948298560825941,4.82721471786499 +295,4.981606816984916,4.817222595214844 +296,4.842818887746951,4.842416763305664 +297,4.876676054026841,4.862038612365723 +298,4.87388271191081,4.831111907958984 +299,4.967491825529925,4.8437676429748535 +0,10.535446964610706,7.138007640838623 +1,7.8241460431705825,8.138928413391113 +2,7.037873786868471,6.542815685272217 +3,6.811079288973953,5.20956563949585 +4,6.54739814490983,5.755399227142334 +5,6.231513845559323,7.499943256378174 +6,6.042671915256616,8.112894058227539 +7,5.847052983081702,5.025928974151611 +8,5.760145931171649,4.750400543212891 +9,5.659365419908004,5.241806507110596 +10,5.639966595172882,5.037885665893555 +11,5.73006853985064,6.9272308349609375 +12,5.723964644923354,6.6573591232299805 +13,5.797361269141689,4.718046188354492 +14,5.439443990678498,4.7904052734375 +15,5.635361391486543,6.718345642089844 +16,5.653092409263958,5.234623432159424 +17,5.234378751841459,4.628739833831787 +18,5.441285646323002,7.224741458892822 +19,5.142640886162266,4.9277801513671875 +20,5.388914961525888,4.881191730499268 +21,5.167937509218851,5.874646186828613 +22,5.187350001840881,6.2521796226501465 +23,5.224235027486627,7.166512489318848 +24,5.164473139517235,6.275496006011963 +25,5.170422951470722,6.8056182861328125 +26,5.098721721316829,5.16926383972168 +27,5.119321246941884,6.240243434906006 +28,5.012495778907429,5.059202671051025 +29,4.96033417672822,4.0382490158081055 +30,4.864972035812609,5.043783664703369 +31,5.496353719812451,4.348814487457275 +32,5.090254177469196,4.102095603942871 +33,5.189136644204457,6.531438827514648 +34,4.9992458332668654,5.448512077331543 +35,4.888303357543367,4.152667999267578 +36,4.967370222915303,4.280396938323975 +37,4.897762766751376,4.640329360961914 +38,4.844392508448976,5.0896992683410645 +39,4.92388643134724,6.573436260223389 +40,4.766854523889946,4.830630779266357 +41,4.844383910569277,5.52494478225708 +42,4.878748532858762,4.5594305992126465 +43,4.851197158206593,5.413809776306152 +44,4.796016617616018,5.237204551696777 +45,4.880061917232744,4.286767482757568 +46,4.7454505010084675,3.9727718830108643 +47,4.730251236034162,4.514941215515137 +48,4.8392193053707935,4.389209270477295 +49,4.589753867143934,4.6481828689575195 +50,4.4799076036973435,4.488437175750732 +51,4.730030272946213,5.017831802368164 +52,4.6228651660861395,4.124366283416748 +53,4.90867225076213,5.007582187652588 +54,4.67436535394553,4.093002796173096 +55,4.575891185168064,5.06628942489624 +56,4.754421760457935,4.477593421936035 +57,4.630469324372031,3.9222466945648193 +58,4.64677338961399,4.579679489135742 +59,4.6159121159351235,3.878170967102051 +60,4.538089289087238,4.406075477600098 +61,4.455654626181631,3.9146296977996826 +62,4.598985946539677,4.573358058929443 +63,4.571617266264829,5.985673427581787 +64,4.54163094217127,6.103704452514648 +65,4.692487739432941,4.692255973815918 +66,4.572965932253635,3.7858526706695557 +67,4.636289463621197,3.9670493602752686 +68,4.713560311722033,5.384866237640381 +69,4.572802028150269,4.479700565338135 +70,4.48527215683099,4.6507391929626465 +71,4.390281683025938,4.158255577087402 +72,4.4361830299550835,3.9447548389434814 +73,4.509955534068021,4.910351753234863 +74,4.4000974360289,5.104384899139404 +75,4.45315440676429,3.950993776321411 +76,4.6317100333445,3.775142192840576 +77,4.355406855091904,5.060515880584717 +78,4.40980063568462,4.540878772735596 +79,4.421327938455524,4.082302093505859 +80,4.331938456766533,4.46631383895874 +81,4.411942592172911,3.892807960510254 +82,4.429773382345835,3.7041680812835693 +83,4.508404327161385,6.292335510253906 +84,4.351348651177956,4.242518424987793 +85,4.4803279175902855,4.043257236480713 +86,4.2969105511000665,4.009950637817383 +87,4.361302759069385,3.889901876449585 +88,4.2551303495060315,4.278628349304199 +89,4.376428228797335,3.879997968673706 +90,4.38901546904535,3.9082541465759277 +91,4.186863660812378,3.726301431655884 +92,4.124111926194393,3.77917218208313 +93,4.392694892305316,5.240423202514648 +94,4.279921836925276,3.70542573928833 +95,4.3344357223221746,5.423924922943115 +96,4.352085221536232,3.6470515727996826 +97,4.1990686532222865,3.660454273223877 +98,4.222043641769525,3.6666927337646484 +99,4.194973945617676,3.8389904499053955 +100,4.092292759635232,3.5655860900878906 +101,3.989663469791412,3.5725150108337402 +102,3.981248530113336,3.570087432861328 +103,4.016868723161293,3.576676368713379 +104,4.027392782586993,3.579324245452881 +105,3.9767748449787947,3.562131643295288 +106,4.024276663317825,3.607285737991333 +107,4.000350344542301,3.4951846599578857 +108,4.005244428822489,3.631859302520752 +109,3.9664393428600198,3.541245222091675 +110,4.000907515034531,3.597203254699707 +111,3.907105119661851,3.4729678630828857 +112,3.954275351220911,3.4766249656677246 +113,3.9599741065140925,3.521806240081787 +114,3.948770172668226,4.467918872833252 +115,3.9212709629174434,3.4957480430603027 +116,3.8708123752565093,3.6989617347717285 +117,3.973125000072248,3.5175647735595703 +118,3.9312785014961706,3.5038444995880127 +119,3.8963442271406,3.5073935985565186 +120,3.8899089191899154,3.5109925270080566 +121,3.9966415784575724,3.572051763534546 +122,3.886194211063963,3.5390114784240723 +123,4.006036867878654,3.5060036182403564 +124,3.925734584981745,3.4958598613739014 +125,3.8175584518548216,3.4939565658569336 +126,3.9767107981624026,3.6423051357269287 +127,3.9112727909377125,3.559387683868408 +128,3.9210808750354884,3.5783653259277344 +129,3.8978167566386137,3.4558184146881104 +130,3.9146922183759285,3.442147731781006 +131,3.8998415372588418,3.485807180404663 +132,3.8524805220464864,3.4839394092559814 +133,3.864646847681566,3.4289379119873047 +134,3.896560330101938,3.5704212188720703 +135,3.9212636012019533,3.6962549686431885 +136,3.911486264069875,3.4379525184631348 +137,3.902305760528102,3.4614615440368652 +138,3.8855316671458158,3.4831371307373047 +139,3.870126810940829,3.4520771503448486 +140,3.9085745464671744,3.4297633171081543 +141,3.9253488692370326,3.5832183361053467 +142,3.8564954674605167,3.4396049976348877 +143,3.8318241982749015,3.5898733139038086 +144,3.8363115231196088,3.635559320449829 +145,3.8665000940814163,3.514362335205078 +146,3.9079818360733265,3.5363481044769287 +147,3.8632410154198156,3.523726224899292 +148,3.8685725255446,3.3906733989715576 +149,3.8380854845047,3.5136773586273193 +150,3.822555325970505,3.4482574462890625 +151,3.81273100195509,3.409930944442749 +152,3.8252228411761195,3.3968701362609863 +153,3.8074971466353444,3.414401054382324 +154,3.792241237741528,3.434910297393799 +155,3.829114956566782,3.452721357345581 +156,3.893543620904287,3.4151875972747803 +157,3.861967405044671,3.4273664951324463 +158,3.793957085681684,3.4121944904327393 +159,3.9103168101021737,3.404581069946289 +160,3.8070671554767723,3.4108850955963135 +161,3.793223588394396,3.4250144958496094 +162,3.809103540579478,3.4398889541625977 +163,3.861557374578534,3.4331581592559814 +164,3.7472348726156985,3.4132120609283447 +165,3.7782242222265765,3.3862571716308594 +166,3.837867415312565,3.4021520614624023 +167,3.8502973448146474,3.4277844429016113 +168,3.8656850782307712,3.415170669555664 +169,3.783724697069688,3.397962808609009 +170,3.8140269976673706,3.4005913734436035 +171,3.863913030696638,3.424940347671509 +172,3.808407175540924,3.4052114486694336 +173,3.824768977454214,3.4211342334747314 +174,3.889205584381566,3.444988965988159 +175,3.86010818084081,3.3880269527435303 +176,3.7785442955566175,3.3919363021850586 +177,3.7765940366369306,3.40488862991333 +178,3.8455430175318863,3.4235596656799316 +179,3.8654890031525584,3.4057908058166504 +180,3.7924586176872253,3.426251173019409 +181,3.784715480154211,3.400472402572632 +182,3.8302748625928706,3.3924312591552734 +183,3.8151419043540953,3.4177021980285645 +184,3.781953849214496,3.3949778079986572 +185,3.8058335213950185,3.4330036640167236 +186,3.784428761944626,3.413548469543457 +187,3.83813762737043,3.4035556316375732 +188,3.81805451855515,3.3998470306396484 +189,3.807869372584603,3.3909356594085693 +190,3.79270888819839,3.411669969558716 +191,3.812076607617465,3.4259047508239746 +192,3.796394194256176,3.40803599357605 +193,3.824968786311872,3.4039738178253174 +194,3.8355708266749526,3.424544095993042 +195,3.9191272294882573,3.44091796875 +196,3.812352052601901,3.3932266235351562 +197,3.816050056255225,3.390587568283081 +198,3.7940091201753328,3.41398549079895 +199,3.7971794417410187,3.419776439666748 +200,3.819029266906507,3.404385566711426 +201,3.7826591314239937,3.398138999938965 +202,3.8210758646329244,3.403352737426758 +203,3.8420705058357933,3.4483673572540283 +204,3.834588299736832,3.4032278060913086 +205,3.837385216626254,3.4191997051239014 +206,3.8896077036857606,3.3905584812164307 +207,3.8320565563259703,3.4297590255737305 +208,3.839206792007793,3.379161834716797 +209,3.8610341350237527,3.3871748447418213 +210,3.772339236013817,3.3873775005340576 +211,3.783070614121177,3.3957266807556152 +212,3.82103871829582,3.3919363021850586 +213,3.8371659514578904,3.4060192108154297 +214,3.7542113278851366,3.4153759479522705 +215,3.7170616016243443,3.44187068939209 +216,3.8395546624154755,3.4097321033477783 +217,3.8662303454948193,3.416490316390991 +218,3.845604047269532,3.4306788444519043 +219,3.824705605434649,3.4285528659820557 +220,3.830407956965042,3.3914730548858643 +221,3.8231031385335057,3.383490800857544 +222,3.833514393820907,3.3960111141204834 +223,3.801216755852555,3.4285266399383545 +224,3.858941161993778,3.389674663543701 +225,3.7505493785395765,3.395493268966675 +226,3.823566889401638,3.4127182960510254 +227,3.820666165424116,3.400625228881836 +228,3.823223647926793,3.38191556930542 +229,3.7554176207744714,3.3922643661499023 +230,3.7746106866634253,3.3796651363372803 +231,3.7969261169433595,3.4117064476013184 +232,3.867424920472232,3.3949637413024902 +233,3.770203200976054,3.3882691860198975 +234,3.7431430119456666,3.3910398483276367 +235,3.7668205752517236,3.3840842247009277 +236,3.8286517002365805,3.3761231899261475 +237,3.771576622399417,3.3885087966918945 +238,3.777764148423166,3.403149366378784 +239,3.7905584682117808,3.3763933181762695 +240,3.8210763176282247,3.414173126220703 +241,3.865154363531055,3.393035888671875 +242,3.800120635466142,3.398986339569092 +243,3.8022580645301125,3.396702289581299 +244,3.7276438171213324,3.3882017135620117 +245,3.815004042784373,3.3917412757873535 +246,3.8575457323681226,3.3967437744140625 +247,3.7772725036649994,3.3954968452453613 +248,3.7712400523099032,3.382535696029663 +249,3.7938311800812228,3.3680741786956787 +250,3.8112679398421085,3.4131574630737305 +251,3.851072518753283,3.4096884727478027 +252,3.8343051928462404,3.4363956451416016 +253,3.764303384405194,3.3730990886688232 +254,3.7501230239868164,3.394145965576172 +255,3.8075529828216093,3.3988044261932373 +256,3.835201842134649,3.371772050857544 +257,3.7671612905733514,3.371422052383423 +258,3.8664221745548826,3.415010929107666 +259,3.8068302790323894,3.3795337677001953 +260,3.746603085416736,3.3995862007141113 +261,3.844028600056966,3.4078457355499268 +262,3.864883915222052,3.382657051086426 +263,3.820603057832429,3.3754115104675293 +264,3.846506002816287,3.3887317180633545 +265,3.806149897069642,3.3693315982818604 +266,3.8139860608360983,3.378915309906006 +267,3.838236346389308,3.36610746383667 +268,3.746777840455373,3.3673408031463623 +269,3.8196406386115336,3.3848512172698975 +270,3.791604555375648,3.376155376434326 +271,3.782218140183073,3.4068636894226074 +272,3.7335610830422605,3.3726370334625244 +273,3.838183360750025,3.385442018508911 +274,3.7939599969170312,3.3936171531677246 +275,3.8370031555493673,3.404611825942993 +276,3.810185116710085,3.3647208213806152 +277,3.842852070114829,3.386965274810791 +278,3.8682658285805673,3.363536834716797 +279,3.7965586998245935,3.36864972114563 +280,3.761886895425392,3.3644275665283203 +281,3.7922668839945937,3.3626370429992676 +282,3.6954838662436513,3.391234874725342 +283,3.7597093000556483,3.356743335723877 +284,3.818933317155549,3.363304376602173 +285,3.8085382475997465,3.3700997829437256 +286,3.7776332927472662,3.387850761413574 +287,3.852750436103705,3.3747494220733643 +288,3.869700754411293,3.3716113567352295 +289,3.819369491483226,3.367892265319824 +290,3.7625609213655644,3.392800807952881 +291,3.8113484064737957,3.372192859649658 +292,3.765781452439048,3.362692356109619 +293,3.731386435031891,3.372163772583008 +294,3.8062914302854827,3.367699384689331 +295,3.796064802733335,3.3909687995910645 +296,3.8264753128543045,3.3955934047698975 +297,3.817375989393754,3.41853928565979 +298,3.791223588856784,3.3914475440979004 +299,3.744249291130991,3.3684945106506348 diff --git a/result_ssd300_fault_1/Mision 11_DJI_0011.jpg b/result_ssd300_fault_1/Mision 11_DJI_0011.jpg new file mode 100644 index 0000000..4312bef Binary files /dev/null and b/result_ssd300_fault_1/Mision 11_DJI_0011.jpg differ diff --git a/result_ssd300_fault_1/Mision 11_DJI_0012.jpg b/result_ssd300_fault_1/Mision 11_DJI_0012.jpg new file mode 100644 index 0000000..1796ebc Binary files /dev/null and b/result_ssd300_fault_1/Mision 11_DJI_0012.jpg differ diff --git a/result_ssd300_fault_1/Mision 11_DJI_0094.jpg b/result_ssd300_fault_1/Mision 11_DJI_0094.jpg new file mode 100644 index 0000000..597554e Binary files /dev/null and b/result_ssd300_fault_1/Mision 11_DJI_0094.jpg differ diff --git a/result_ssd300_fault_1/Mision 11_DJI_0095.jpg b/result_ssd300_fault_1/Mision 11_DJI_0095.jpg new file mode 100644 index 0000000..9b772d6 Binary files /dev/null and b/result_ssd300_fault_1/Mision 11_DJI_0095.jpg differ diff --git a/result_ssd300_fault_1/Mision 12_DJI_0003.jpg b/result_ssd300_fault_1/Mision 12_DJI_0003.jpg new file mode 100644 index 0000000..23018da Binary files /dev/null and b/result_ssd300_fault_1/Mision 12_DJI_0003.jpg differ diff --git a/result_ssd300_fault_1/Mision 14_DJI_0007.jpg b/result_ssd300_fault_1/Mision 14_DJI_0007.jpg new file mode 100644 index 0000000..b9e5fe9 Binary files /dev/null and b/result_ssd300_fault_1/Mision 14_DJI_0007.jpg differ diff --git a/result_ssd300_fault_1/Mision 14_DJI_0008.jpg b/result_ssd300_fault_1/Mision 14_DJI_0008.jpg new file mode 100644 index 0000000..8ae9766 Binary files /dev/null and b/result_ssd300_fault_1/Mision 14_DJI_0008.jpg differ diff --git a/result_ssd300_fault_1/Mision 14_DJI_0009.jpg b/result_ssd300_fault_1/Mision 14_DJI_0009.jpg new file mode 100644 index 0000000..e431f3f Binary files /dev/null and b/result_ssd300_fault_1/Mision 14_DJI_0009.jpg differ diff --git a/result_ssd300_fault_1/Mision 17_DJI_0007.jpg b/result_ssd300_fault_1/Mision 17_DJI_0007.jpg new file mode 100644 index 0000000..c48d117 Binary files /dev/null and b/result_ssd300_fault_1/Mision 17_DJI_0007.jpg differ diff --git a/result_ssd300_fault_1/Mision 17_DJI_0008.jpg b/result_ssd300_fault_1/Mision 17_DJI_0008.jpg new file mode 100644 index 0000000..ea11fb8 Binary files /dev/null and b/result_ssd300_fault_1/Mision 17_DJI_0008.jpg differ diff --git a/result_ssd300_fault_1/Mision 17_DJI_0009.jpg b/result_ssd300_fault_1/Mision 17_DJI_0009.jpg new file mode 100644 index 0000000..7a34e7b Binary files /dev/null and b/result_ssd300_fault_1/Mision 17_DJI_0009.jpg differ diff --git a/result_ssd300_fault_1/Mision 20_DJI_0076.jpg b/result_ssd300_fault_1/Mision 20_DJI_0076.jpg new file mode 100644 index 0000000..e2a5283 Binary files /dev/null and b/result_ssd300_fault_1/Mision 20_DJI_0076.jpg differ diff --git a/result_ssd300_fault_1/Mision 20_DJI_0080.jpg b/result_ssd300_fault_1/Mision 20_DJI_0080.jpg new file mode 100644 index 0000000..613298e Binary files /dev/null and b/result_ssd300_fault_1/Mision 20_DJI_0080.jpg differ diff --git a/result_ssd300_fault_1/Mision 20_DJI_0082.jpg b/result_ssd300_fault_1/Mision 20_DJI_0082.jpg new file mode 100644 index 0000000..356997c Binary files /dev/null and b/result_ssd300_fault_1/Mision 20_DJI_0082.jpg differ diff --git a/result_ssd300_fault_1/Mision 28_DJI_0001.jpg b/result_ssd300_fault_1/Mision 28_DJI_0001.jpg new file mode 100644 index 0000000..8cd3626 Binary files /dev/null and b/result_ssd300_fault_1/Mision 28_DJI_0001.jpg differ diff --git a/result_ssd300_fault_1/Mision 28_DJI_0003.jpg b/result_ssd300_fault_1/Mision 28_DJI_0003.jpg new file mode 100644 index 0000000..43063b0 Binary files /dev/null and b/result_ssd300_fault_1/Mision 28_DJI_0003.jpg differ diff --git a/result_ssd300_fault_1/Mision 28_DJI_0009.jpg b/result_ssd300_fault_1/Mision 28_DJI_0009.jpg new file mode 100644 index 0000000..db08cff Binary files /dev/null and b/result_ssd300_fault_1/Mision 28_DJI_0009.jpg differ diff --git a/result_ssd300_fault_1/Mision 46_DJI_0054.jpg b/result_ssd300_fault_1/Mision 46_DJI_0054.jpg new file mode 100644 index 0000000..4b78954 Binary files /dev/null and b/result_ssd300_fault_1/Mision 46_DJI_0054.jpg differ diff --git a/result_ssd300_fault_1/Mision 46_DJI_0055.jpg b/result_ssd300_fault_1/Mision 46_DJI_0055.jpg new file mode 100644 index 0000000..ce65a9f Binary files /dev/null and b/result_ssd300_fault_1/Mision 46_DJI_0055.jpg differ diff --git a/result_ssd300_fault_1/Mision 46_DJI_0056.jpg b/result_ssd300_fault_1/Mision 46_DJI_0056.jpg new file mode 100644 index 0000000..6cb93b1 Binary files /dev/null and b/result_ssd300_fault_1/Mision 46_DJI_0056.jpg differ diff --git a/result_ssd300_fault_1/Mision 50_DJI_0006.jpg b/result_ssd300_fault_1/Mision 50_DJI_0006.jpg new file mode 100644 index 0000000..815d79f Binary files /dev/null and b/result_ssd300_fault_1/Mision 50_DJI_0006.jpg differ diff --git a/result_ssd300_fault_1/Mision 50_DJI_0015.jpg b/result_ssd300_fault_1/Mision 50_DJI_0015.jpg new file mode 100644 index 0000000..edd99a1 Binary files /dev/null and b/result_ssd300_fault_1/Mision 50_DJI_0015.jpg differ diff --git a/result_ssd300_fault_1/Mision 50_DJI_0016.jpg b/result_ssd300_fault_1/Mision 50_DJI_0016.jpg new file mode 100644 index 0000000..f5b788a Binary files /dev/null and b/result_ssd300_fault_1/Mision 50_DJI_0016.jpg differ diff --git a/result_ssd300_fault_1/Mision 9_DJI_0077.jpg b/result_ssd300_fault_1/Mision 9_DJI_0077.jpg new file mode 100644 index 0000000..8bfa830 Binary files /dev/null and b/result_ssd300_fault_1/Mision 9_DJI_0077.jpg differ diff --git a/result_ssd300_fault_1/Mision 9_DJI_0080.jpg b/result_ssd300_fault_1/Mision 9_DJI_0080.jpg new file mode 100644 index 0000000..e407be1 Binary files /dev/null and b/result_ssd300_fault_1/Mision 9_DJI_0080.jpg differ diff --git a/result_ssd300_fault_1/time.txt b/result_ssd300_fault_1/time.txt new file mode 100644 index 0000000..f1a3f11 --- /dev/null +++ b/result_ssd300_fault_1/time.txt @@ -0,0 +1 @@ +Tiempo promedio:0.07781978607177735 \ No newline at end of file diff --git a/result_ssd7_fault_1/Mision 11_DJI_0011.jpg b/result_ssd7_fault_1/Mision 11_DJI_0011.jpg new file mode 100644 index 0000000..345ba9e Binary files /dev/null and b/result_ssd7_fault_1/Mision 11_DJI_0011.jpg differ diff --git a/result_ssd7_fault_1/Mision 11_DJI_0012.jpg b/result_ssd7_fault_1/Mision 11_DJI_0012.jpg new file mode 100644 index 0000000..f63b9f1 Binary files /dev/null and b/result_ssd7_fault_1/Mision 11_DJI_0012.jpg differ diff --git a/result_ssd7_fault_1/Mision 11_DJI_0094.jpg b/result_ssd7_fault_1/Mision 11_DJI_0094.jpg new file mode 100644 index 0000000..2bb9e1b Binary files /dev/null and b/result_ssd7_fault_1/Mision 11_DJI_0094.jpg differ diff --git a/result_ssd7_fault_1/Mision 11_DJI_0095.jpg b/result_ssd7_fault_1/Mision 11_DJI_0095.jpg new file mode 100644 index 0000000..406047f Binary files /dev/null and b/result_ssd7_fault_1/Mision 11_DJI_0095.jpg differ diff --git a/result_ssd7_fault_1/Mision 12_DJI_0003.jpg b/result_ssd7_fault_1/Mision 12_DJI_0003.jpg new file mode 100644 index 0000000..875cb70 Binary files /dev/null and b/result_ssd7_fault_1/Mision 12_DJI_0003.jpg differ diff --git a/result_ssd7_fault_1/Mision 14_DJI_0007.jpg b/result_ssd7_fault_1/Mision 14_DJI_0007.jpg new file mode 100644 index 0000000..c9b706a Binary files /dev/null and b/result_ssd7_fault_1/Mision 14_DJI_0007.jpg differ diff --git a/result_ssd7_fault_1/Mision 14_DJI_0008.jpg b/result_ssd7_fault_1/Mision 14_DJI_0008.jpg new file mode 100644 index 0000000..ad3440e Binary files /dev/null and b/result_ssd7_fault_1/Mision 14_DJI_0008.jpg differ diff --git a/result_ssd7_fault_1/Mision 14_DJI_0009.jpg b/result_ssd7_fault_1/Mision 14_DJI_0009.jpg new file mode 100644 index 0000000..2fe48f4 Binary files /dev/null and b/result_ssd7_fault_1/Mision 14_DJI_0009.jpg differ diff --git a/result_ssd7_fault_1/Mision 17_DJI_0007.jpg b/result_ssd7_fault_1/Mision 17_DJI_0007.jpg new file mode 100644 index 0000000..31bf69c Binary files /dev/null and b/result_ssd7_fault_1/Mision 17_DJI_0007.jpg differ diff --git a/result_ssd7_fault_1/Mision 17_DJI_0008.jpg b/result_ssd7_fault_1/Mision 17_DJI_0008.jpg new file mode 100644 index 0000000..d5eab62 Binary files /dev/null and b/result_ssd7_fault_1/Mision 17_DJI_0008.jpg differ diff --git a/result_ssd7_fault_1/Mision 17_DJI_0009.jpg b/result_ssd7_fault_1/Mision 17_DJI_0009.jpg new file mode 100644 index 0000000..60fbbcb Binary files /dev/null and b/result_ssd7_fault_1/Mision 17_DJI_0009.jpg differ diff --git a/result_ssd7_fault_1/Mision 20_DJI_0076.jpg b/result_ssd7_fault_1/Mision 20_DJI_0076.jpg new file mode 100644 index 0000000..acd7315 Binary files /dev/null and b/result_ssd7_fault_1/Mision 20_DJI_0076.jpg differ diff --git a/result_ssd7_fault_1/Mision 20_DJI_0080.jpg b/result_ssd7_fault_1/Mision 20_DJI_0080.jpg new file mode 100644 index 0000000..11c5691 Binary files /dev/null and b/result_ssd7_fault_1/Mision 20_DJI_0080.jpg differ diff --git a/result_ssd7_fault_1/Mision 20_DJI_0082.jpg b/result_ssd7_fault_1/Mision 20_DJI_0082.jpg new file mode 100644 index 0000000..7a571eb Binary files /dev/null and b/result_ssd7_fault_1/Mision 20_DJI_0082.jpg differ diff --git a/result_ssd7_fault_1/Mision 28_DJI_0001.jpg b/result_ssd7_fault_1/Mision 28_DJI_0001.jpg new file mode 100644 index 0000000..555c991 Binary files /dev/null and b/result_ssd7_fault_1/Mision 28_DJI_0001.jpg differ diff --git a/result_ssd7_fault_1/Mision 28_DJI_0003.jpg b/result_ssd7_fault_1/Mision 28_DJI_0003.jpg new file mode 100644 index 0000000..10f38e6 Binary files /dev/null and b/result_ssd7_fault_1/Mision 28_DJI_0003.jpg differ diff --git a/result_ssd7_fault_1/Mision 28_DJI_0009.jpg b/result_ssd7_fault_1/Mision 28_DJI_0009.jpg new file mode 100644 index 0000000..2aeae0b Binary files /dev/null and b/result_ssd7_fault_1/Mision 28_DJI_0009.jpg differ diff --git a/result_ssd7_fault_1/Mision 46_DJI_0054.jpg b/result_ssd7_fault_1/Mision 46_DJI_0054.jpg new file mode 100644 index 0000000..70bc6bd Binary files /dev/null and b/result_ssd7_fault_1/Mision 46_DJI_0054.jpg differ diff --git a/result_ssd7_fault_1/Mision 46_DJI_0055.jpg b/result_ssd7_fault_1/Mision 46_DJI_0055.jpg new file mode 100644 index 0000000..837845a Binary files /dev/null and b/result_ssd7_fault_1/Mision 46_DJI_0055.jpg differ diff --git a/result_ssd7_fault_1/Mision 46_DJI_0056.jpg b/result_ssd7_fault_1/Mision 46_DJI_0056.jpg new file mode 100644 index 0000000..0d1e132 Binary files /dev/null and b/result_ssd7_fault_1/Mision 46_DJI_0056.jpg differ diff --git a/result_ssd7_fault_1/Mision 50_DJI_0006.jpg b/result_ssd7_fault_1/Mision 50_DJI_0006.jpg new file mode 100644 index 0000000..400019e Binary files /dev/null and b/result_ssd7_fault_1/Mision 50_DJI_0006.jpg differ diff --git a/result_ssd7_fault_1/Mision 50_DJI_0015.jpg b/result_ssd7_fault_1/Mision 50_DJI_0015.jpg new file mode 100644 index 0000000..4934b33 Binary files /dev/null and b/result_ssd7_fault_1/Mision 50_DJI_0015.jpg differ diff --git a/result_ssd7_fault_1/Mision 50_DJI_0016.jpg b/result_ssd7_fault_1/Mision 50_DJI_0016.jpg new file mode 100644 index 0000000..0d7b20d Binary files /dev/null and b/result_ssd7_fault_1/Mision 50_DJI_0016.jpg differ diff --git a/result_ssd7_fault_1/Mision 9_DJI_0077.jpg b/result_ssd7_fault_1/Mision 9_DJI_0077.jpg new file mode 100644 index 0000000..98d8cc1 Binary files /dev/null and b/result_ssd7_fault_1/Mision 9_DJI_0077.jpg differ diff --git a/result_ssd7_fault_1/Mision 9_DJI_0080.jpg b/result_ssd7_fault_1/Mision 9_DJI_0080.jpg new file mode 100644 index 0000000..2820077 Binary files /dev/null and b/result_ssd7_fault_1/Mision 9_DJI_0080.jpg differ diff --git a/result_ssd7_fault_1/time.txt b/result_ssd7_fault_1/time.txt new file mode 100644 index 0000000..9f2c0e6 --- /dev/null +++ b/result_ssd7_fault_1/time.txt @@ -0,0 +1 @@ +Tiempo promedio:0.018647890090942382 \ No newline at end of file