config Diode Fault

This commit is contained in:
Daniel Saavedra
2020-02-19 14:26:55 -03:00
parent 0a2c92816c
commit e91f29cd2b
5 changed files with 281 additions and 617 deletions

2
.gitignore vendored
View File

@@ -4,6 +4,8 @@ panel_jpg/
result_ssd7_panel_1/
result_ssd7_panel_2/
Train&Test_1/
Train&Test_4/
Train&Test_D/
Train&Test_C/
Train&Test_A/
Train&Test_S/

View File

@@ -76,7 +76,7 @@ def _main_(args):
cod_falla = int(Excel.loc[index_path]['Cód. Falla'])
sev = Excel.loc[index_path]['Severidad']
#if cod_falla != 1:
#if cod_falla != 4:
# continue
## Junta las mismas fotos con distintos label EJ :

View File

@@ -50,7 +50,7 @@
},
{
"cell_type": "code",
"execution_count": 26,
"execution_count": 7,
"metadata": {},
"outputs": [
{
@@ -58,11 +58,188 @@
"output_type": "stream",
"text": [
"\n",
"Training on: \t{'panel': 1, 'cell': 2}\n",
"Training on: \t{'1': 1}\n",
"\n",
"OK create model\n",
"\n",
"Loading pretrained weights.\n",
"\n"
"Loading pretrained weights VGG.\n",
"\n",
"__________________________________________________________________________________________________\n",
"Layer (type) Output Shape Param # Connected to \n",
"==================================================================================================\n",
"input_1 (InputLayer) (None, 400, 400, 3) 0 \n",
"__________________________________________________________________________________________________\n",
"identity_layer (Lambda) (None, 400, 400, 3) 0 input_1[0][0] \n",
"__________________________________________________________________________________________________\n",
"input_mean_normalization (Lambd (None, 400, 400, 3) 0 identity_layer[0][0] \n",
"__________________________________________________________________________________________________\n",
"input_channel_swap (Lambda) (None, 400, 400, 3) 0 input_mean_normalization[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv1_1 (Conv2D) (None, 400, 400, 64) 1792 input_channel_swap[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv1_2 (Conv2D) (None, 400, 400, 64) 36928 conv1_1[0][0] \n",
"__________________________________________________________________________________________________\n",
"pool1 (MaxPooling2D) (None, 200, 200, 64) 0 conv1_2[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv2_1 (Conv2D) (None, 200, 200, 128 73856 pool1[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv2_2 (Conv2D) (None, 200, 200, 128 147584 conv2_1[0][0] \n",
"__________________________________________________________________________________________________\n",
"pool2 (MaxPooling2D) (None, 100, 100, 128 0 conv2_2[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv3_1 (Conv2D) (None, 100, 100, 256 295168 pool2[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv3_2 (Conv2D) (None, 100, 100, 256 590080 conv3_1[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv3_3 (Conv2D) (None, 100, 100, 256 590080 conv3_2[0][0] \n",
"__________________________________________________________________________________________________\n",
"pool3 (MaxPooling2D) (None, 50, 50, 256) 0 conv3_3[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv4_1 (Conv2D) (None, 50, 50, 512) 1180160 pool3[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv4_2 (Conv2D) (None, 50, 50, 512) 2359808 conv4_1[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv4_3 (Conv2D) (None, 50, 50, 512) 2359808 conv4_2[0][0] \n",
"__________________________________________________________________________________________________\n",
"pool4 (MaxPooling2D) (None, 25, 25, 512) 0 conv4_3[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv5_1 (Conv2D) (None, 25, 25, 512) 2359808 pool4[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv5_2 (Conv2D) (None, 25, 25, 512) 2359808 conv5_1[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv5_3 (Conv2D) (None, 25, 25, 512) 2359808 conv5_2[0][0] \n",
"__________________________________________________________________________________________________\n",
"pool5 (MaxPooling2D) (None, 25, 25, 512) 0 conv5_3[0][0] \n",
"__________________________________________________________________________________________________\n",
"fc6 (Conv2D) (None, 25, 25, 1024) 4719616 pool5[0][0] \n",
"__________________________________________________________________________________________________\n",
"fc7 (Conv2D) (None, 25, 25, 1024) 1049600 fc6[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv6_1 (Conv2D) (None, 25, 25, 256) 262400 fc7[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv6_padding (ZeroPadding2D) (None, 27, 27, 256) 0 conv6_1[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv6_2 (Conv2D) (None, 13, 13, 512) 1180160 conv6_padding[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv7_1 (Conv2D) (None, 13, 13, 128) 65664 conv6_2[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv7_padding (ZeroPadding2D) (None, 15, 15, 128) 0 conv7_1[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv7_2 (Conv2D) (None, 7, 7, 256) 295168 conv7_padding[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv8_1 (Conv2D) (None, 7, 7, 128) 32896 conv7_2[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv8_2 (Conv2D) (None, 5, 5, 256) 295168 conv8_1[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv9_1 (Conv2D) (None, 5, 5, 128) 32896 conv8_2[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv4_3_norm (L2Normalization) (None, 50, 50, 512) 512 conv4_3[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv9_2 (Conv2D) (None, 3, 3, 256) 295168 conv9_1[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv4_3_norm_mbox_conf (Conv2D) (None, 50, 50, 8) 36872 conv4_3_norm[0][0] \n",
"__________________________________________________________________________________________________\n",
"fc7_mbox_conf (Conv2D) (None, 25, 25, 12) 110604 fc7[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv6_2_mbox_conf (Conv2D) (None, 13, 13, 12) 55308 conv6_2[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv7_2_mbox_conf (Conv2D) (None, 7, 7, 12) 27660 conv7_2[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv8_2_mbox_conf (Conv2D) (None, 5, 5, 8) 18440 conv8_2[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv9_2_mbox_conf (Conv2D) (None, 3, 3, 8) 18440 conv9_2[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv4_3_norm_mbox_loc (Conv2D) (None, 50, 50, 16) 73744 conv4_3_norm[0][0] \n",
"__________________________________________________________________________________________________\n",
"fc7_mbox_loc (Conv2D) (None, 25, 25, 24) 221208 fc7[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv6_2_mbox_loc (Conv2D) (None, 13, 13, 24) 110616 conv6_2[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv7_2_mbox_loc (Conv2D) (None, 7, 7, 24) 55320 conv7_2[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv8_2_mbox_loc (Conv2D) (None, 5, 5, 16) 36880 conv8_2[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv9_2_mbox_loc (Conv2D) (None, 3, 3, 16) 36880 conv9_2[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv4_3_norm_mbox_conf_reshape (None, 10000, 2) 0 conv4_3_norm_mbox_conf[0][0] \n",
"__________________________________________________________________________________________________\n",
"fc7_mbox_conf_reshape (Reshape) (None, 3750, 2) 0 fc7_mbox_conf[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv6_2_mbox_conf_reshape (Resh (None, 1014, 2) 0 conv6_2_mbox_conf[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv7_2_mbox_conf_reshape (Resh (None, 294, 2) 0 conv7_2_mbox_conf[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv8_2_mbox_conf_reshape (Resh (None, 100, 2) 0 conv8_2_mbox_conf[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv9_2_mbox_conf_reshape (Resh (None, 36, 2) 0 conv9_2_mbox_conf[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv4_3_norm_mbox_priorbox (Anc (None, 50, 50, 4, 8) 0 conv4_3_norm_mbox_loc[0][0] \n",
"__________________________________________________________________________________________________\n",
"fc7_mbox_priorbox (AnchorBoxes) (None, 25, 25, 6, 8) 0 fc7_mbox_loc[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv6_2_mbox_priorbox (AnchorBo (None, 13, 13, 6, 8) 0 conv6_2_mbox_loc[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv7_2_mbox_priorbox (AnchorBo (None, 7, 7, 6, 8) 0 conv7_2_mbox_loc[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv8_2_mbox_priorbox (AnchorBo (None, 5, 5, 4, 8) 0 conv8_2_mbox_loc[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv9_2_mbox_priorbox (AnchorBo (None, 3, 3, 4, 8) 0 conv9_2_mbox_loc[0][0] \n",
"__________________________________________________________________________________________________\n",
"mbox_conf (Concatenate) (None, 15194, 2) 0 conv4_3_norm_mbox_conf_reshape[0]\n",
" fc7_mbox_conf_reshape[0][0] \n",
" conv6_2_mbox_conf_reshape[0][0] \n",
" conv7_2_mbox_conf_reshape[0][0] \n",
" conv8_2_mbox_conf_reshape[0][0] \n",
" conv9_2_mbox_conf_reshape[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv4_3_norm_mbox_loc_reshape ( (None, 10000, 4) 0 conv4_3_norm_mbox_loc[0][0] \n",
"__________________________________________________________________________________________________\n",
"fc7_mbox_loc_reshape (Reshape) (None, 3750, 4) 0 fc7_mbox_loc[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv6_2_mbox_loc_reshape (Resha (None, 1014, 4) 0 conv6_2_mbox_loc[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv7_2_mbox_loc_reshape (Resha (None, 294, 4) 0 conv7_2_mbox_loc[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv8_2_mbox_loc_reshape (Resha (None, 100, 4) 0 conv8_2_mbox_loc[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv9_2_mbox_loc_reshape (Resha (None, 36, 4) 0 conv9_2_mbox_loc[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv4_3_norm_mbox_priorbox_resh (None, 10000, 8) 0 conv4_3_norm_mbox_priorbox[0][0] \n",
"__________________________________________________________________________________________________\n",
"fc7_mbox_priorbox_reshape (Resh (None, 3750, 8) 0 fc7_mbox_priorbox[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv6_2_mbox_priorbox_reshape ( (None, 1014, 8) 0 conv6_2_mbox_priorbox[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv7_2_mbox_priorbox_reshape ( (None, 294, 8) 0 conv7_2_mbox_priorbox[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv8_2_mbox_priorbox_reshape ( (None, 100, 8) 0 conv8_2_mbox_priorbox[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv9_2_mbox_priorbox_reshape ( (None, 36, 8) 0 conv9_2_mbox_priorbox[0][0] \n",
"__________________________________________________________________________________________________\n",
"mbox_conf_softmax (Activation) (None, 15194, 2) 0 mbox_conf[0][0] \n",
"__________________________________________________________________________________________________\n",
"mbox_loc (Concatenate) (None, 15194, 4) 0 conv4_3_norm_mbox_loc_reshape[0][\n",
" fc7_mbox_loc_reshape[0][0] \n",
" conv6_2_mbox_loc_reshape[0][0] \n",
" conv7_2_mbox_loc_reshape[0][0] \n",
" conv8_2_mbox_loc_reshape[0][0] \n",
" conv9_2_mbox_loc_reshape[0][0] \n",
"__________________________________________________________________________________________________\n",
"mbox_priorbox (Concatenate) (None, 15194, 8) 0 conv4_3_norm_mbox_priorbox_reshap\n",
" fc7_mbox_priorbox_reshape[0][0] \n",
" conv6_2_mbox_priorbox_reshape[0][\n",
" conv7_2_mbox_priorbox_reshape[0][\n",
" conv8_2_mbox_priorbox_reshape[0][\n",
" conv9_2_mbox_priorbox_reshape[0][\n",
"__________________________________________________________________________________________________\n",
"predictions (Concatenate) (None, 15194, 14) 0 mbox_conf_softmax[0][0] \n",
" mbox_loc[0][0] \n",
" mbox_priorbox[0][0] \n",
"==================================================================================================\n",
"Total params: 23,745,908\n",
"Trainable params: 23,745,908\n",
"Non-trainable params: 0\n",
"__________________________________________________________________________________________________\n"
]
}
],
@@ -120,7 +297,7 @@
" else:\n",
" return 0.00001\n",
"\n",
"config_path = 'config_7_panel_cell.json'\n",
"config_path = 'config_300_fault_1.json'\n",
"\n",
"\n",
"with open(config_path) as config_buffer:\n",
@@ -200,7 +377,7 @@
"\n",
" if config['model']['backend'] == 'ssd300':\n",
" #weights_path = 'VGG_VOC0712Plus_SSD_300x300_ft_iter_160000.h5'\n",
" from models.keras_ssd300 import ssd_300 as ssd\n",
" from models.keras_ssd300 import ssd_300\n",
"\n",
" model = ssd_300(image_size=(img_height, img_width, img_channels),\n",
" n_classes=n_classes,\n",
@@ -281,631 +458,40 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Processing image set 'train.txt': 100%|██████████| 1/1 [00:00<00:00, 3.02it/s]\n",
"Processing image set 'test.txt': 100%|██████████| 1/1 [00:00<00:00, 2.48it/s]\n",
"Processing image set 'train.txt': 100%|██████████| 1/1 [00:00<00:00, 18.73it/s]\n",
"Processing image set 'test.txt': 100%|██████████| 1/1 [00:00<00:00, 20.23it/s]\n",
"panel : 69\n",
"cell : 423\n",
"Number of images in the training dataset:\t 1\n",
"Number of images in the validation dataset:\t 1\n",
"Epoch 1/100\n",
"\n",
"Epoch 00001: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 200s 4s/step - loss: 13.2409 - val_loss: 9.9807\n",
"\n",
"Epoch 00001: val_loss improved from inf to 9.98075, saving model to experimento_ssd7_panel_cell.h5\n",
"Epoch 2/100\n",
"\n",
"Epoch 00002: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 238s 5s/step - loss: 9.8864 - val_loss: 11.1452\n",
"\n",
"Epoch 00002: val_loss did not improve from 9.98075\n",
"Epoch 3/100\n",
"\n",
"Epoch 00003: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 226s 5s/step - loss: 8.8060 - val_loss: 8.3006\n",
"\n",
"Epoch 00003: val_loss improved from 9.98075 to 8.30060, saving model to experimento_ssd7_panel_cell.h5\n",
"Epoch 4/100\n",
"\n",
"Epoch 00004: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 199s 4s/step - loss: 7.4999 - val_loss: 8.9384\n",
"\n",
"Epoch 00004: val_loss did not improve from 8.30060\n",
"Epoch 5/100\n",
"\n",
"Epoch 00005: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 187s 4s/step - loss: 7.4727 - val_loss: 7.9512\n",
"\n",
"Epoch 00005: val_loss improved from 8.30060 to 7.95121, saving model to experimento_ssd7_panel_cell.h5\n",
"Epoch 6/100\n",
"\n",
"Epoch 00006: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 213s 4s/step - loss: 6.8813 - val_loss: 11.2544\n",
"\n",
"Epoch 00006: val_loss did not improve from 7.95121\n",
"Epoch 7/100\n",
"\n",
"Epoch 00007: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 195s 4s/step - loss: 6.4775 - val_loss: 6.9093\n",
"\n",
"Epoch 00007: val_loss improved from 7.95121 to 6.90929, saving model to experimento_ssd7_panel_cell.h5\n",
"Epoch 8/100\n",
"\n",
"Epoch 00008: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 212s 4s/step - loss: 6.9758 - val_loss: 8.6997\n",
"\n",
"Epoch 00008: val_loss did not improve from 6.90929\n",
"Epoch 9/100\n",
"\n",
"Epoch 00009: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 199s 4s/step - loss: 6.1539 - val_loss: 10.9586\n",
"\n",
"Epoch 00009: val_loss did not improve from 6.90929\n",
"Epoch 10/100\n",
"\n",
"Epoch 00010: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 206s 4s/step - loss: 5.9307 - val_loss: 8.4361\n",
"\n",
"Epoch 00010: val_loss did not improve from 6.90929\n",
"Epoch 11/100\n",
"\n",
"Epoch 00011: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 197s 4s/step - loss: 5.3895 - val_loss: 5.9796\n",
"\n",
"Epoch 00011: val_loss improved from 6.90929 to 5.97960, saving model to experimento_ssd7_panel_cell.h5\n",
"Epoch 12/100\n",
"\n",
"Epoch 00012: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 184s 4s/step - loss: 5.0889 - val_loss: 5.9283\n",
"\n",
"Epoch 00012: val_loss improved from 5.97960 to 5.92832, saving model to experimento_ssd7_panel_cell.h5\n",
"Epoch 13/100\n",
"\n",
"Epoch 00013: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 193s 4s/step - loss: 5.7916 - val_loss: 6.7706\n",
"\n",
"Epoch 00013: val_loss did not improve from 5.92832\n",
"Epoch 14/100\n",
"\n",
"Epoch 00014: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 222s 4s/step - loss: 5.3010 - val_loss: 7.8910\n",
"\n",
"Epoch 00014: val_loss did not improve from 5.92832\n",
"Epoch 15/100\n",
"\n",
"Epoch 00015: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 179s 4s/step - loss: 4.9873 - val_loss: 6.0389\n",
"\n",
"Epoch 00015: val_loss did not improve from 5.92832\n",
"Epoch 16/100\n",
"\n",
"Epoch 00016: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 182s 4s/step - loss: 5.4664 - val_loss: 6.4125\n",
"\n",
"Epoch 00016: val_loss did not improve from 5.92832\n",
"Epoch 17/100\n",
"\n",
"Epoch 00017: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 166s 3s/step - loss: 6.0094 - val_loss: 9.2918\n",
"\n",
"Epoch 00017: val_loss did not improve from 5.92832\n",
"Epoch 18/100\n",
"\n",
"Epoch 00018: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 181s 4s/step - loss: 5.1737 - val_loss: 7.6806\n",
"\n",
"Epoch 00018: val_loss did not improve from 5.92832\n",
"Epoch 19/100\n",
"\n",
"Epoch 00019: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 159s 3s/step - loss: 5.2708 - val_loss: 7.1096\n",
"\n",
"Epoch 00019: val_loss did not improve from 5.92832\n",
"Epoch 20/100\n",
"\n",
"Epoch 00020: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 173s 3s/step - loss: 5.4765 - val_loss: 5.4921\n",
"\n",
"Epoch 00020: val_loss improved from 5.92832 to 5.49211, saving model to experimento_ssd7_panel_cell.h5\n",
"Epoch 21/100\n",
"\n",
"Epoch 00021: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 170s 3s/step - loss: 4.6517 - val_loss: 6.6033\n",
"\n",
"Epoch 00021: val_loss did not improve from 5.49211\n",
"Epoch 22/100\n",
"\n",
"Epoch 00022: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 191s 4s/step - loss: 5.1432 - val_loss: 5.6549\n",
"\n",
"Epoch 00022: val_loss did not improve from 5.49211\n",
"Epoch 23/100\n",
"\n",
"Epoch 00023: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 159s 3s/step - loss: 5.4830 - val_loss: 5.8758\n",
"\n",
"Epoch 00023: val_loss did not improve from 5.49211\n",
"Epoch 24/100\n",
"\n",
"Epoch 00024: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 150s 3s/step - loss: 5.3366 - val_loss: 5.3871\n",
"\n",
"Epoch 00024: val_loss improved from 5.49211 to 5.38706, saving model to experimento_ssd7_panel_cell.h5\n",
"Epoch 25/100\n",
"\n",
"Epoch 00025: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 138s 3s/step - loss: 5.7189 - val_loss: 8.0760\n",
"\n",
"Epoch 00025: val_loss did not improve from 5.38706\n",
"Epoch 26/100\n",
"\n",
"Epoch 00026: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 144s 3s/step - loss: 6.0929 - val_loss: 12.6163\n",
"\n",
"Epoch 00026: val_loss did not improve from 5.38706\n",
"Epoch 27/100\n",
"\n",
"Epoch 00027: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 147s 3s/step - loss: 5.2239 - val_loss: 9.8536\n",
"\n",
"Epoch 00027: val_loss did not improve from 5.38706\n",
"Epoch 28/100\n",
"\n",
"Epoch 00028: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 158s 3s/step - loss: 5.4414 - val_loss: 6.4950\n",
"\n",
"Epoch 00028: val_loss did not improve from 5.38706\n",
"Epoch 29/100\n",
"\n",
"Epoch 00029: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 157s 3s/step - loss: 5.4436 - val_loss: 9.0002\n",
"\n",
"Epoch 00029: val_loss did not improve from 5.38706\n",
"Epoch 30/100\n",
"\n",
"Epoch 00030: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 162s 3s/step - loss: 4.9780 - val_loss: 4.9993\n",
"\n",
"Epoch 00030: val_loss improved from 5.38706 to 4.99925, saving model to experimento_ssd7_panel_cell.h5\n",
"Epoch 31/100\n",
"\n",
"Epoch 00031: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 140s 3s/step - loss: 4.9645 - val_loss: 5.6612\n",
"\n",
"Epoch 00031: val_loss did not improve from 4.99925\n",
"Epoch 32/100\n",
"\n",
"Epoch 00032: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 141s 3s/step - loss: 4.5982 - val_loss: 5.2083\n",
"\n",
"Epoch 00032: val_loss did not improve from 4.99925\n",
"Epoch 33/100\n",
"\n",
"Epoch 00033: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 143s 3s/step - loss: 4.3101 - val_loss: 6.4808\n",
"\n",
"Epoch 00033: val_loss did not improve from 4.99925\n",
"Epoch 34/100\n",
"\n",
"Epoch 00034: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 145s 3s/step - loss: 4.4252 - val_loss: 10.9472\n"
"27/50 [===============>..............] - ETA: 19s - loss: 10.4861"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"Epoch 00034: val_loss did not improve from 4.99925\n",
"Epoch 35/100\n",
"\n",
"Epoch 00035: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 153s 3s/step - loss: 4.4998 - val_loss: 7.1254\n",
"\n",
"Epoch 00035: val_loss did not improve from 4.99925\n",
"Epoch 36/100\n",
"\n",
"Epoch 00036: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 153s 3s/step - loss: 4.8952 - val_loss: 7.0446\n",
"\n",
"Epoch 00036: val_loss did not improve from 4.99925\n",
"Epoch 37/100\n",
"\n",
"Epoch 00037: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 154s 3s/step - loss: 4.9868 - val_loss: 9.3251\n",
"\n",
"Epoch 00037: val_loss did not improve from 4.99925\n",
"Epoch 38/100\n",
"\n",
"Epoch 00038: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 148s 3s/step - loss: 4.8918 - val_loss: 5.1689\n",
"\n",
"Epoch 00038: val_loss did not improve from 4.99925\n",
"Epoch 39/100\n",
"\n",
"Epoch 00039: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 143s 3s/step - loss: 4.5572 - val_loss: 4.9839\n",
"\n",
"Epoch 00039: val_loss improved from 4.99925 to 4.98394, saving model to experimento_ssd7_panel_cell.h5\n",
"Epoch 40/100\n",
"\n",
"Epoch 00040: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 150s 3s/step - loss: 4.4722 - val_loss: 5.7133\n",
"\n",
"Epoch 00040: val_loss did not improve from 4.98394\n",
"Epoch 41/100\n",
"\n",
"Epoch 00041: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 152s 3s/step - loss: 4.9414 - val_loss: 5.5843\n",
"\n",
"Epoch 00041: val_loss did not improve from 4.98394\n",
"Epoch 42/100\n",
"\n",
"Epoch 00042: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 148s 3s/step - loss: 4.5857 - val_loss: 5.1884\n",
"\n",
"Epoch 00042: val_loss did not improve from 4.98394\n",
"Epoch 43/100\n",
"\n",
"Epoch 00043: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 149s 3s/step - loss: 4.7094 - val_loss: 6.7545\n",
"\n",
"Epoch 00043: val_loss did not improve from 4.98394\n",
"Epoch 44/100\n",
"\n",
"Epoch 00044: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 151s 3s/step - loss: 5.0428 - val_loss: 5.2691\n",
"\n",
"Epoch 00044: val_loss did not improve from 4.98394\n",
"Epoch 45/100\n",
"\n",
"Epoch 00045: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 146s 3s/step - loss: 4.9842 - val_loss: 6.5112\n",
"\n",
"Epoch 00045: val_loss did not improve from 4.98394\n",
"Epoch 46/100\n",
"\n",
"Epoch 00046: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 147s 3s/step - loss: 4.9108 - val_loss: 6.0670\n",
"\n",
"Epoch 00046: val_loss did not improve from 4.98394\n",
"Epoch 47/100\n",
"\n",
"Epoch 00047: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 155s 3s/step - loss: 4.6837 - val_loss: 5.8351\n",
"\n",
"Epoch 00047: val_loss did not improve from 4.98394\n",
"Epoch 48/100\n",
"\n",
"Epoch 00048: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 149s 3s/step - loss: 5.1042 - val_loss: 5.1778\n",
"\n",
"Epoch 00048: val_loss did not improve from 4.98394\n",
"Epoch 49/100\n",
"\n",
"Epoch 00049: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 144s 3s/step - loss: 4.1312 - val_loss: 5.9606\n",
"\n",
"Epoch 00049: val_loss did not improve from 4.98394\n",
"Epoch 50/100\n",
"\n",
"Epoch 00050: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 122s 2s/step - loss: 4.5373 - val_loss: 5.4351\n",
"\n",
"Epoch 00050: val_loss did not improve from 4.98394\n",
"Epoch 51/100\n",
"\n",
"Epoch 00051: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 135s 3s/step - loss: 4.8955 - val_loss: 6.0315\n",
"\n",
"Epoch 00051: val_loss did not improve from 4.98394\n",
"Epoch 52/100\n",
"\n",
"Epoch 00052: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 150s 3s/step - loss: 4.9445 - val_loss: 5.7199\n",
"\n",
"Epoch 00052: val_loss did not improve from 4.98394\n",
"Epoch 53/100\n",
"\n",
"Epoch 00053: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 139s 3s/step - loss: 3.9748 - val_loss: 5.5974\n",
"\n",
"Epoch 00053: val_loss did not improve from 4.98394\n",
"Epoch 54/100\n",
"\n",
"Epoch 00054: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 153s 3s/step - loss: 4.8783 - val_loss: 8.6056\n",
"\n",
"Epoch 00054: val_loss did not improve from 4.98394\n",
"Epoch 55/100\n",
"\n",
"Epoch 00055: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 141s 3s/step - loss: 4.1649 - val_loss: 6.0042\n",
"\n",
"Epoch 00055: val_loss did not improve from 4.98394\n",
"Epoch 56/100\n",
"\n",
"Epoch 00056: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 149s 3s/step - loss: 4.8997 - val_loss: 9.1298\n",
"\n",
"Epoch 00056: val_loss did not improve from 4.98394\n",
"Epoch 57/100\n",
"\n",
"Epoch 00057: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 151s 3s/step - loss: 4.4433 - val_loss: 7.1151\n",
"\n",
"Epoch 00057: val_loss did not improve from 4.98394\n",
"Epoch 58/100\n",
"\n",
"Epoch 00058: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 147s 3s/step - loss: 4.5827 - val_loss: 5.4356\n",
"\n",
"Epoch 00058: val_loss did not improve from 4.98394\n",
"Epoch 59/100\n",
"\n",
"Epoch 00059: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 137s 3s/step - loss: 3.9437 - val_loss: 4.7926\n",
"\n",
"Epoch 00059: val_loss improved from 4.98394 to 4.79262, saving model to experimento_ssd7_panel_cell.h5\n",
"Epoch 60/100\n",
"\n",
"Epoch 00060: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 125s 3s/step - loss: 4.0939 - val_loss: 5.7098\n",
"\n",
"Epoch 00060: val_loss did not improve from 4.79262\n",
"Epoch 61/100\n",
"\n",
"Epoch 00061: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 161s 3s/step - loss: 5.1152 - val_loss: 5.2079\n",
"\n",
"Epoch 00061: val_loss did not improve from 4.79262\n",
"Epoch 62/100\n",
"\n",
"Epoch 00062: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 144s 3s/step - loss: 4.2958 - val_loss: 4.9239\n",
"\n",
"Epoch 00062: val_loss did not improve from 4.79262\n",
"Epoch 63/100\n",
"\n",
"Epoch 00063: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 141s 3s/step - loss: 3.8241 - val_loss: 4.5443\n",
"\n",
"Epoch 00063: val_loss improved from 4.79262 to 4.54430, saving model to experimento_ssd7_panel_cell.h5\n",
"Epoch 64/100\n",
"\n",
"Epoch 00064: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 134s 3s/step - loss: 4.7252 - val_loss: 5.9445\n",
"\n",
"Epoch 00064: val_loss did not improve from 4.54430\n",
"Epoch 65/100\n",
"\n",
"Epoch 00065: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 154s 3s/step - loss: 4.4455 - val_loss: 4.8326\n",
"\n",
"Epoch 00065: val_loss did not improve from 4.54430\n",
"Epoch 66/100\n",
"\n",
"Epoch 00066: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 145s 3s/step - loss: 4.4054 - val_loss: 5.6441\n",
"\n",
"Epoch 00066: val_loss did not improve from 4.54430\n",
"Epoch 67/100\n",
"\n",
"Epoch 00067: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 124s 2s/step - loss: 4.4165 - val_loss: 6.8159\n",
"\n",
"Epoch 00067: val_loss did not improve from 4.54430\n",
"Epoch 68/100\n",
"\n",
"Epoch 00068: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 162s 3s/step - loss: 5.0418 - val_loss: 4.8508\n",
"\n",
"Epoch 00068: val_loss did not improve from 4.54430\n",
"Epoch 69/100\n",
"\n",
"Epoch 00069: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 140s 3s/step - loss: 4.1512 - val_loss: 5.4053\n",
"\n",
"Epoch 00069: val_loss did not improve from 4.54430\n",
"Epoch 70/100\n",
"\n",
"Epoch 00070: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 148s 3s/step - loss: 4.6197 - val_loss: 5.2824\n",
"\n",
"Epoch 00070: val_loss did not improve from 4.54430\n",
"Epoch 71/100\n",
"\n",
"Epoch 00071: LearningRateScheduler setting learning rate to 0.001.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"50/50 [==============================] - 152s 3s/step - loss: 4.2807 - val_loss: 5.5992\n",
"\n",
"Epoch 00071: val_loss did not improve from 4.54430\n",
"Epoch 72/100\n",
"\n",
"Epoch 00072: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 143s 3s/step - loss: 4.5368 - val_loss: 6.5207\n",
"\n",
"Epoch 00072: val_loss did not improve from 4.54430\n",
"Epoch 73/100\n",
"\n",
"Epoch 00073: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 141s 3s/step - loss: 4.0598 - val_loss: 5.2421\n",
"\n",
"Epoch 00073: val_loss did not improve from 4.54430\n",
"Epoch 74/100\n",
"\n",
"Epoch 00074: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 150s 3s/step - loss: 4.4861 - val_loss: 5.4182\n",
"\n",
"Epoch 00074: val_loss did not improve from 4.54430\n",
"Epoch 75/100\n",
"\n",
"Epoch 00075: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 144s 3s/step - loss: 4.5263 - val_loss: 4.3774\n",
"\n",
"Epoch 00075: val_loss improved from 4.54430 to 4.37742, saving model to experimento_ssd7_panel_cell.h5\n",
"Epoch 76/100\n",
"\n",
"Epoch 00076: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 148s 3s/step - loss: 3.8465 - val_loss: 4.5809\n",
"\n",
"Epoch 00076: val_loss did not improve from 4.37742\n",
"Epoch 77/100\n",
"\n",
"Epoch 00077: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 152s 3s/step - loss: 4.0495 - val_loss: 4.9745\n",
"\n",
"Epoch 00077: val_loss did not improve from 4.37742\n",
"Epoch 78/100\n",
"\n",
"Epoch 00078: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 152s 3s/step - loss: 4.6009 - val_loss: 13.4989\n",
"\n",
"Epoch 00078: val_loss did not improve from 4.37742\n",
"Epoch 79/100\n",
"\n",
"Epoch 00079: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 142s 3s/step - loss: 4.6687 - val_loss: 6.4490\n",
"\n",
"Epoch 00079: val_loss did not improve from 4.37742\n",
"Epoch 80/100\n",
"\n",
"Epoch 00080: LearningRateScheduler setting learning rate to 0.001.\n",
"50/50 [==============================] - 147s 3s/step - loss: 4.5297 - val_loss: 8.0478\n",
"\n",
"Epoch 00080: val_loss did not improve from 4.37742\n",
"Epoch 81/100\n",
"\n",
"Epoch 00081: LearningRateScheduler setting learning rate to 0.0001.\n",
"50/50 [==============================] - 141s 3s/step - loss: 4.2662 - val_loss: 5.7929\n",
"\n",
"Epoch 00081: val_loss did not improve from 4.37742\n",
"Epoch 82/100\n",
"\n",
"Epoch 00082: LearningRateScheduler setting learning rate to 0.0001.\n",
"50/50 [==============================] - 149s 3s/step - loss: 4.1048 - val_loss: 4.6117\n",
"\n",
"Epoch 00082: val_loss did not improve from 4.37742\n",
"Epoch 83/100\n",
"\n",
"Epoch 00083: LearningRateScheduler setting learning rate to 0.0001.\n",
"50/50 [==============================] - 156s 3s/step - loss: 3.9905 - val_loss: 4.5542\n",
"\n",
"Epoch 00083: val_loss did not improve from 4.37742\n",
"Epoch 84/100\n",
"\n",
"Epoch 00084: LearningRateScheduler setting learning rate to 0.0001.\n",
"50/50 [==============================] - 155s 3s/step - loss: 4.3129 - val_loss: 4.4676\n",
"\n",
"Epoch 00084: val_loss did not improve from 4.37742\n",
"Epoch 85/100\n",
"\n",
"Epoch 00085: LearningRateScheduler setting learning rate to 0.0001.\n",
"50/50 [==============================] - 156s 3s/step - loss: 3.7951 - val_loss: 4.4689\n",
"\n",
"Epoch 00085: val_loss did not improve from 4.37742\n",
"Epoch 86/100\n",
"\n",
"Epoch 00086: LearningRateScheduler setting learning rate to 0.0001.\n",
"50/50 [==============================] - 155s 3s/step - loss: 4.3618 - val_loss: 4.4048\n",
"\n",
"Epoch 00086: val_loss did not improve from 4.37742\n",
"Epoch 87/100\n",
"\n",
"Epoch 00087: LearningRateScheduler setting learning rate to 0.0001.\n",
"50/50 [==============================] - 156s 3s/step - loss: 4.3538 - val_loss: 4.6832\n",
"\n",
"Epoch 00087: val_loss did not improve from 4.37742\n",
"Epoch 88/100\n",
"\n",
"Epoch 00088: LearningRateScheduler setting learning rate to 0.0001.\n",
"50/50 [==============================] - 152s 3s/step - loss: 4.2076 - val_loss: 4.4796\n",
"\n",
"Epoch 00088: val_loss did not improve from 4.37742\n",
"Epoch 89/100\n",
"\n",
"Epoch 00089: LearningRateScheduler setting learning rate to 0.0001.\n",
"50/50 [==============================] - 146s 3s/step - loss: 4.1322 - val_loss: 4.5462\n",
"\n",
"Epoch 00089: val_loss did not improve from 4.37742\n",
"Epoch 90/100\n",
"\n",
"Epoch 00090: LearningRateScheduler setting learning rate to 0.0001.\n",
"50/50 [==============================] - 157s 3s/step - loss: 4.4995 - val_loss: 4.5660\n",
"\n",
"Epoch 00090: val_loss did not improve from 4.37742\n",
"Epoch 91/100\n",
"\n",
"Epoch 00091: LearningRateScheduler setting learning rate to 0.0001.\n",
"50/50 [==============================] - 158s 3s/step - loss: 4.2653 - val_loss: 4.5265\n",
"\n",
"Epoch 00091: val_loss did not improve from 4.37742\n",
"Epoch 92/100\n",
"\n",
"Epoch 00092: LearningRateScheduler setting learning rate to 0.0001.\n",
"50/50 [==============================] - 153s 3s/step - loss: 4.3702 - val_loss: 4.5276\n",
"\n",
"Epoch 00092: val_loss did not improve from 4.37742\n",
"Epoch 93/100\n",
"\n",
"Epoch 00093: LearningRateScheduler setting learning rate to 0.0001.\n",
"50/50 [==============================] - 153s 3s/step - loss: 3.7340 - val_loss: 4.5439\n",
"\n",
"Epoch 00093: val_loss did not improve from 4.37742\n",
"Epoch 94/100\n",
"\n",
"Epoch 00094: LearningRateScheduler setting learning rate to 0.0001.\n",
"50/50 [==============================] - 151s 3s/step - loss: 4.0253 - val_loss: 4.3250\n",
"\n",
"Epoch 00094: val_loss improved from 4.37742 to 4.32498, saving model to experimento_ssd7_panel_cell.h5\n",
"Epoch 95/100\n",
"\n",
"Epoch 00095: LearningRateScheduler setting learning rate to 0.0001.\n",
"50/50 [==============================] - 143s 3s/step - loss: 4.0254 - val_loss: 4.6277\n",
"\n",
"Epoch 00095: val_loss did not improve from 4.32498\n",
"Epoch 96/100\n",
"\n",
"Epoch 00096: LearningRateScheduler setting learning rate to 0.0001.\n",
"50/50 [==============================] - 148s 3s/step - loss: 3.9857 - val_loss: 4.2953\n",
"\n",
"Epoch 00096: val_loss improved from 4.32498 to 4.29533, saving model to experimento_ssd7_panel_cell.h5\n",
"Epoch 97/100\n",
"\n",
"Epoch 00097: LearningRateScheduler setting learning rate to 0.0001.\n",
"50/50 [==============================] - 157s 3s/step - loss: 3.6750 - val_loss: 4.5637\n",
"\n",
"Epoch 00097: val_loss did not improve from 4.29533\n",
"Epoch 98/100\n",
"\n",
"Epoch 00098: LearningRateScheduler setting learning rate to 0.0001.\n",
"50/50 [==============================] - 154s 3s/step - loss: 3.7435 - val_loss: 4.3923\n",
"\n",
"Epoch 00098: val_loss did not improve from 4.29533\n",
"Epoch 99/100\n",
"\n",
"Epoch 00099: LearningRateScheduler setting learning rate to 0.0001.\n",
"50/50 [==============================] - 162s 3s/step - loss: 4.0930 - val_loss: 4.4010\n",
"\n",
"Epoch 00099: val_loss did not improve from 4.29533\n",
"Epoch 100/100\n",
"\n",
"Epoch 00100: LearningRateScheduler setting learning rate to 0.0001.\n",
"50/50 [==============================] - 134s 3s/step - loss: 3.8983 - val_loss: 4.4451\n",
"\n",
"Epoch 00100: val_loss did not improve from 4.29533\n"
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-5-eddb2cf7cd19>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 201\u001b[0m \u001b[0mvalidation_steps\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mceil\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mval_dataset_size\u001b[0m\u001b[0;34m/\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 202\u001b[0m \u001b[0minitial_epoch\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minitial_epoch\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 203\u001b[0;31m verbose = 1 if config['train']['debug'] else 2)\n\u001b[0m\u001b[1;32m 204\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 205\u001b[0m \u001b[0mhistory_path\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconfig\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'train'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'saved_weights_name'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msplit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'.'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34m'_history'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/envs/model/lib/python3.6/site-packages/keras/legacy/interfaces.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 89\u001b[0m warnings.warn('Update your `' + object_name + '` call to the ' +\n\u001b[1;32m 90\u001b[0m 'Keras 2 API: ' + signature, stacklevel=2)\n\u001b[0;32m---> 91\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 92\u001b[0m \u001b[0mwrapper\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_original_function\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 93\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mwrapper\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/envs/model/lib/python3.6/site-packages/keras/engine/training.py\u001b[0m in \u001b[0;36mfit_generator\u001b[0;34m(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)\u001b[0m\n\u001b[1;32m 1416\u001b[0m \u001b[0muse_multiprocessing\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0muse_multiprocessing\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1417\u001b[0m \u001b[0mshuffle\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mshuffle\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1418\u001b[0;31m initial_epoch=initial_epoch)\n\u001b[0m\u001b[1;32m 1419\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1420\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0minterfaces\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlegacy_generator_methods_support\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/envs/model/lib/python3.6/site-packages/keras/engine/training_generator.py\u001b[0m in \u001b[0;36mfit_generator\u001b[0;34m(model, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)\u001b[0m\n\u001b[1;32m 215\u001b[0m outs = model.train_on_batch(x, y,\n\u001b[1;32m 216\u001b[0m \u001b[0msample_weight\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msample_weight\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 217\u001b[0;31m class_weight=class_weight)\n\u001b[0m\u001b[1;32m 218\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 219\u001b[0m \u001b[0mouts\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mto_list\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mouts\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/envs/model/lib/python3.6/site-packages/keras/engine/training.py\u001b[0m in \u001b[0;36mtrain_on_batch\u001b[0;34m(self, x, y, sample_weight, class_weight)\u001b[0m\n\u001b[1;32m 1215\u001b[0m \u001b[0mins\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mx\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0my\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0msample_weights\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1216\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_make_train_function\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1217\u001b[0;31m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain_function\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mins\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1218\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0munpack_singleton\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1219\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/envs/model/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, inputs)\u001b[0m\n\u001b[1;32m 2713\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_legacy_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2714\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2715\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2716\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2717\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mpy_any\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mis_tensor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mx\u001b[0m \u001b[0;32min\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/envs/model/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py\u001b[0m in \u001b[0;36m_call\u001b[0;34m(self, inputs)\u001b[0m\n\u001b[1;32m 2673\u001b[0m \u001b[0mfetched\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_callable_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0marray_vals\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun_metadata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2674\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2675\u001b[0;31m \u001b[0mfetched\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_callable_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0marray_vals\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2676\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mfetched\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2677\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/envs/model/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1380\u001b[0m ret = tf_session.TF_SessionRunCallable(\n\u001b[1;32m 1381\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_handle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstatus\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1382\u001b[0;31m run_metadata_ptr)\n\u001b[0m\u001b[1;32m 1383\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1384\u001b[0m \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
]
}
],

27
config_300_fault_4.json Normal file
View File

@@ -0,0 +1,27 @@
{
"model" : {
"backend": "ssd300",
"input": 400,
"labels": ["4"]
},
"train": {
"train_image_folder": "../Train&Test_D/Train/images",
"train_annot_folder": "../Train&Test_D/Train/anns",
"train_image_set_filename": "../Train&Test_D/Train/train.txt",
"train_times": 1,
"batch_size": 12,
"learning_rate": 1e-4,
"warmup_epochs": 3,
"saved_weights_name": "../Result_ssd300_fault_4/experimento_ssd300_fault_1.h5",
"debug": true
},
"test": {
"test_image_folder": "Train&Test_D/Test/images",
"test_annot_folder": "Train&Test_D/Test/anns",
"test_image_set_filename": "Train&Test_D/Test/test.txt"
}
}

49
config_full_yolo_fault_4.json Executable file
View File

@@ -0,0 +1,49 @@
{
"model" : {
"min_input_size": 400,
"max_input_size": 400,
"anchors": [5,7, 10,14, 15, 15, 26,32, 45,119, 54,18, 94,59, 109,183, 200,21],
"labels": ["4"],
"backend": "full_yolo_backend.h5"
},
"train": {
"train_image_folder": "../Train&Test_D/Train/images/",
"train_annot_folder": "../Train&Test_D/Train/anns/",
"cache_name": "../Resultados_yolo3_fault_4/experimento_fault_1_gpu.pkl",
"train_times": 1,
"batch_size": 2,
"learning_rate": 1e-4,
"nb_epochs": 200,
"warmup_epochs": 15,
"ignore_thresh": 0.5,
"gpus": "0,1",
"grid_scales": [1,1,1],
"obj_scale": 5,
"noobj_scale": 1,
"xywh_scale": 1,
"class_scale": 1,
"tensorboard_dir": "log_experimento_fault_gpu",
"saved_weights_name": "../Resultados_yolo3_fault_4/experimento_yolo3_full_fault.h5",
"debug": true
},
"valid": {
"valid_image_folder": "../Train&Test_D/Test/images/",
"valid_annot_folder": "../Train&Test_D/Test/anns/",
"cache_name": "../Resultados_yolo3_fault_4/val_fault_1.pkl",
"valid_times": 1
},
"test": {
"test_image_folder": "../Train&Test_D/Test/images/",
"test_annot_folder": "../Train&Test_D/Test/anns/",
"cache_name": "../Resultados_yolo3_fault_4/test_fault_1.pkl",
"test_times": 1
}
}