1897 lines
161 KiB
Plaintext
1897 lines
161 KiB
Plaintext
|
|
{
|
||
|
|
"cells": [
|
||
|
|
{
|
||
|
|
"cell_type": "markdown",
|
||
|
|
"metadata": {},
|
||
|
|
"source": [
|
||
|
|
"Detector de Paneles"
|
||
|
|
]
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"cell_type": "code",
|
||
|
|
"execution_count": null,
|
||
|
|
"metadata": {},
|
||
|
|
"outputs": [],
|
||
|
|
"source": [
|
||
|
|
"\n",
|
||
|
|
"\n"
|
||
|
|
]
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"cell_type": "markdown",
|
||
|
|
"metadata": {},
|
||
|
|
"source": [
|
||
|
|
"\n"
|
||
|
|
]
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"cell_type": "markdown",
|
||
|
|
"metadata": {},
|
||
|
|
"source": []
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"cell_type": "code",
|
||
|
|
"execution_count": null,
|
||
|
|
"metadata": {},
|
||
|
|
"outputs": [],
|
||
|
|
"source": []
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"cell_type": "markdown",
|
||
|
|
"metadata": {},
|
||
|
|
"source": [
|
||
|
|
"Cargar el modelo ssd7 \n",
|
||
|
|
"(https://github.com/pierluigiferrari/ssd_keras#how-to-fine-tune-one-of-the-trained-models-on-your-own-dataset)\n",
|
||
|
|
"\n",
|
||
|
|
"Training del SSD7 (modelo reducido de SSD). Parámetros en config_7.json y descargar VGG_ILSVRC_16_layers_fc_reduced.h5\n",
|
||
|
|
"\n",
|
||
|
|
"\n"
|
||
|
|
]
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"cell_type": "code",
|
||
|
|
"execution_count": 1,
|
||
|
|
"metadata": {},
|
||
|
|
"outputs": [
|
||
|
|
{
|
||
|
|
"name": "stderr",
|
||
|
|
"output_type": "stream",
|
||
|
|
"text": [
|
||
|
|
"Using TensorFlow backend.\n"
|
||
|
|
]
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"name": "stdout",
|
||
|
|
"output_type": "stream",
|
||
|
|
"text": [
|
||
|
|
"\n",
|
||
|
|
"Training on: \t{'panel': 1}\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"Loading pretrained weights.\n",
|
||
|
|
"\n",
|
||
|
|
"WARNING:tensorflow:From /home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
|
||
|
|
"Instructions for updating:\n",
|
||
|
|
"Colocations handled automatically by placer.\n",
|
||
|
|
"WARNING:tensorflow:From /home/dl-desktop/Desktop/Rentadrone/ssd_keras-master/keras_loss_function/keras_ssd_loss.py:133: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
|
||
|
|
"Instructions for updating:\n",
|
||
|
|
"Use tf.cast instead.\n",
|
||
|
|
"WARNING:tensorflow:From /home/dl-desktop/Desktop/Rentadrone/ssd_keras-master/keras_loss_function/keras_ssd_loss.py:166: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
|
||
|
|
"Instructions for updating:\n",
|
||
|
|
"Use tf.cast instead.\n",
|
||
|
|
"WARNING:tensorflow:From /home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/ops/math_grad.py:102: div (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
|
||
|
|
"Instructions for updating:\n",
|
||
|
|
"Deprecated in favor of operator or tf.math.divide.\n"
|
||
|
|
]
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"ename": "ResourceExhaustedError",
|
||
|
|
"evalue": "OOM when allocating tensor with shape[48] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc\n\t [[node training/Adam/Variable_6/Assign (defined at /home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:402) ]]\nHint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.\n\n\nCaused by op 'training/Adam/Variable_6/Assign', defined at:\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel_launcher.py\", line 16, in <module>\n app.launch_new_instance()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/traitlets/config/application.py\", line 658, in launch_instance\n app.start()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelapp.py\", line 505, in start\n self.io_loop.start()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 148, in start\n self.asyncio_loop.run_forever()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/asyncio/base_events.py\", line 438, in run_forever\n self._run_once()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/asyncio/base_events.py\", line 1451, in _run_once\n handle._run()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/asyncio/events.py\", line 145, in _run\n self._callback(*self._args)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/ioloop.py\", line 690, in <lambda>\n lambda f: self._run_callback(functools.partial(callback, future))\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n ret = callback()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 781, in inner\n self.run()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 742, in run\n yielded = self.gen.send(value)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 357, in process_one\n yield gen.maybe_future(dispatch(*args))\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 267, in dispatch_shell\n yield gen.maybe_future(handler(stream, idents, msg))\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 534, in execute_request\n user_expressions, allow_stdin,\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/ipkernel.py\", line 294, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/zmqshell.py\", line 536, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2848, in run_cell\n raw_cell, store_history, silent, shell_futures)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/interactiveshell.py
|
||
|
|
"output_type": "error",
|
||
|
|
"traceback": [
|
||
|
|
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||
|
|
"\u001b[0;31mResourceExhaustedError\u001b[0m Traceback (most recent call last)",
|
||
|
|
"\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1333\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1334\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1335\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
||
|
|
"\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run_fn\u001b[0;34m(feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[1;32m 1318\u001b[0m return self._call_tf_sessionrun(\n\u001b[0;32m-> 1319\u001b[0;31m options, feed_dict, fetch_list, target_list, run_metadata)\n\u001b[0m\u001b[1;32m 1320\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
|
||
|
|
"\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_call_tf_sessionrun\u001b[0;34m(self, options, feed_dict, fetch_list, target_list, run_metadata)\u001b[0m\n\u001b[1;32m 1406\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1407\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1408\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
|
||
|
|
"\u001b[0;31mResourceExhaustedError\u001b[0m: OOM when allocating tensor with shape[48] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc\n\t [[{{node training/Adam/Variable_6/Assign}}]]\nHint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.\n",
|
||
|
|
"\nDuring handling of the above exception, another exception occurred:\n",
|
||
|
|
"\u001b[0;31mResourceExhaustedError\u001b[0m Traceback (most recent call last)",
|
||
|
|
"\u001b[0;32m<ipython-input-1-53d5db8f4328>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 122\u001b[0m model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n\u001b[1;32m 123\u001b[0m \u001b[0;34m'L2Normalization'\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mL2Normalization\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 124\u001b[0;31m 'compute_loss': ssd_loss.compute_loss})\n\u001b[0m\u001b[1;32m 125\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 126\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
|
||
|
|
"\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/engine/saving.py\u001b[0m in \u001b[0;36mload_model\u001b[0;34m(filepath, custom_objects, compile)\u001b[0m\n\u001b[1;32m 417\u001b[0m \u001b[0mf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mh5dict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilepath\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'r'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 418\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 419\u001b[0;31m \u001b[0mmodel\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_deserialize_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcustom_objects\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcompile\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 420\u001b[0m \u001b[0;32mfinally\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 421\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mopened_new_file\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
||
|
|
"\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/engine/saving.py\u001b[0m in \u001b[0;36m_deserialize_model\u001b[0;34m(f, custom_objects, compile)\u001b[0m\n\u001b[1;32m 323\u001b[0m optimizer_weight_names]\n\u001b[1;32m 324\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 325\u001b[0;31m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moptimizer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_weights\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moptimizer_weight_values\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 326\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 327\u001b[0m warnings.warn('Error in loading the saved optimizer '\n",
|
||
|
|
"\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/optimizers.py\u001b[0m in \u001b[0;36mset_weights\u001b[0;34m(self, weights)\u001b[0m\n\u001b[1;32m 124\u001b[0m 'of the optimizer (' + str(len(params)) + ')')\n\u001b[1;32m 125\u001b[0m \u001b[0mweight_value_tuples\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 126\u001b[0;31m \u001b[0mparam_values\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mK\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbatch_get_value\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 127\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mpv\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mp\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mw\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mzip\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mparam_values\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mweights\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 128\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mpv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0mw\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
||
|
|
"\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py\u001b[0m in \u001b[0;36mbatch_get_value\u001b[0;34m(ops)\u001b[0m\n\u001b[1;32m 2418\u001b[0m \"\"\"\n\u001b[1;32m 2419\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2420\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mget_session\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mops\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2421\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2422\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
||
|
|
"\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py\u001b[0m in \u001b[0;36mget_session\u001b[0;34m()\u001b[0m\n\u001b[1;32m 204\u001b[0m \u001b[0mv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_keras_initialized\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 205\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0muninitialized_vars\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 206\u001b[0;31m \u001b[0msession\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvariables_initializer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0muninitialized_vars\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 207\u001b[0m \u001b[0;31m# hack for list_devices() function.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 208\u001b[0m \u001b[0;31m# list_devices() function is not available under tensorflow r1.3.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
||
|
|
"\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 927\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 928\u001b[0m result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[0;32m--> 929\u001b[0;31m run_metadata_ptr)\n\u001b[0m\u001b[1;32m 930\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 931\u001b[0m \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
||
|
|
"\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run\u001b[0;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1150\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mhandle\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mfeed_dict_tensor\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1151\u001b[0m results = self._do_run(handle, final_targets, final_fetches,\n\u001b[0;32m-> 1152\u001b[0;31m feed_dict_tensor, options, run_metadata)\n\u001b[0m\u001b[1;32m 1153\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1154\u001b[0m \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
||
|
|
"\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_run\u001b[0;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1326\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1327\u001b[0m return self._do_call(_run_fn, feeds, fetches, targets, options,\n\u001b[0;32m-> 1328\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1329\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1330\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_prun_fn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeeds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetches\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
||
|
|
"\u001b[0;32m~/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1346\u001b[0m \u001b[0;32mpass\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1347\u001b[0m \u001b[0mmessage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0merror_interpolation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minterpolate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_graph\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1348\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnode_def\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmessage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1349\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1350\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_extend_graph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
||
|
|
"\u001b[0;31mResourceExhaustedError\u001b[0m: OOM when allocating tensor with shape[48] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc\n\t [[node training/Adam/Variable_6/Assign (defined at /home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:402) ]]\nHint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.\n\n\nCaused by op 'training/Adam/Variable_6/Assign', defined at:\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel_launcher.py\", line 16, in <module>\n app.launch_new_instance()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/traitlets/config/application.py\", line 658, in launch_instance\n app.start()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelapp.py\", line 505, in start\n self.io_loop.start()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 148, in start\n self.asyncio_loop.run_forever()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/asyncio/base_events.py\", line 438, in run_forever\n self._run_once()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/asyncio/base_events.py\", line 1451, in _run_once\n handle._run()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/asyncio/events.py\", line 145, in _run\n self._callback(*self._args)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/ioloop.py\", line 690, in <lambda>\n lambda f: self._run_callback(functools.partial(callback, future))\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n ret = callback()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 781, in inner\n self.run()\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 742, in run\n yielded = self.gen.send(value)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 357, in process_one\n yield gen.maybe_future(dispatch(*args))\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 267, in dispatch_shell\n yield gen.maybe_future(handler(stream, idents, msg))\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 534, in execute_request\n user_expressions, allow_stdin,\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/ipkernel.py\", line 294, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/zmqshell.py\", line 536, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2848, in run_cell\n raw_cell, store_history, silent, shell_futures)\n File \"/home/dl-desktop/anaconda3/envs/tf_gpu/lib/python3.6/site-packa
|
||
|
|
]
|
||
|
|
}
|
||
|
|
],
|
||
|
|
"source": [
|
||
|
|
"from keras.optimizers import Adam, SGD\n",
|
||
|
|
"from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TerminateOnNaN, CSVLogger\n",
|
||
|
|
"from keras import backend as K\n",
|
||
|
|
"from keras.models import load_model\n",
|
||
|
|
"from math import ceil\n",
|
||
|
|
"import numpy as np\n",
|
||
|
|
"from matplotlib import pyplot as plt\n",
|
||
|
|
"import os\n",
|
||
|
|
"import json\n",
|
||
|
|
"import xml.etree.cElementTree as ET\n",
|
||
|
|
"\n",
|
||
|
|
"import sys\n",
|
||
|
|
"sys.path += [os.path.abspath('../../ssd_keras-master')]\n",
|
||
|
|
"\n",
|
||
|
|
"from keras_loss_function.keras_ssd_loss import SSDLoss\n",
|
||
|
|
"from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes\n",
|
||
|
|
"from keras_layers.keras_layer_DecodeDetections import DecodeDetections\n",
|
||
|
|
"from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast\n",
|
||
|
|
"from keras_layers.keras_layer_L2Normalization import L2Normalization\n",
|
||
|
|
"from ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder\n",
|
||
|
|
"from ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast\n",
|
||
|
|
"from data_generator.object_detection_2d_data_generator import DataGenerator\n",
|
||
|
|
"from data_generator.object_detection_2d_geometric_ops import Resize\n",
|
||
|
|
"from data_generator.object_detection_2d_photometric_ops import ConvertTo3Channels\n",
|
||
|
|
"from data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation\n",
|
||
|
|
"from data_generator.object_detection_2d_misc_utils import apply_inverse_transforms\n",
|
||
|
|
"from eval_utils.average_precision_evaluator import Evaluator\n",
|
||
|
|
"from data_generator.data_augmentation_chain_variable_input_size import DataAugmentationVariableInputSize\n",
|
||
|
|
"from data_generator.data_augmentation_chain_constant_input_size import DataAugmentationConstantInputSize\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"def makedirs(path):\n",
|
||
|
|
" try:\n",
|
||
|
|
" os.makedirs(path)\n",
|
||
|
|
" except OSError:\n",
|
||
|
|
" if not os.path.isdir(path):\n",
|
||
|
|
" raise\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"K.tensorflow_backend._get_available_gpus()\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"def lr_schedule(epoch):\n",
|
||
|
|
" if epoch < 80:\n",
|
||
|
|
" return 0.001\n",
|
||
|
|
" elif epoch < 100:\n",
|
||
|
|
" return 0.0001\n",
|
||
|
|
" else:\n",
|
||
|
|
" return 0.00001\n",
|
||
|
|
"\n",
|
||
|
|
"config_path = 'config_7_panel.json'\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"with open(config_path) as config_buffer:\n",
|
||
|
|
" config = json.loads(config_buffer.read())\n",
|
||
|
|
"\n",
|
||
|
|
"###############################\n",
|
||
|
|
"# Parse the annotations\n",
|
||
|
|
"###############################\n",
|
||
|
|
"path_imgs_training = config['train']['train_image_folder']\n",
|
||
|
|
"path_anns_training = config['train']['train_annot_folder']\n",
|
||
|
|
"path_imgs_val = config['test']['test_image_folder']\n",
|
||
|
|
"path_anns_val = config['test']['test_annot_folder']\n",
|
||
|
|
"labels = config['model']['labels']\n",
|
||
|
|
"categories = {}\n",
|
||
|
|
"#categories = {\"Razor\": 1, \"Gun\": 2, \"Knife\": 3, \"Shuriken\": 4} #la categoría 0 es la background\n",
|
||
|
|
"for i in range(len(labels)): categories[labels[i]] = i+1\n",
|
||
|
|
"print('\\nTraining on: \\t' + str(categories) + '\\n')\n",
|
||
|
|
"\n",
|
||
|
|
"####################################\n",
|
||
|
|
"# Parameters\n",
|
||
|
|
"###################################\n",
|
||
|
|
" #%%\n",
|
||
|
|
"img_height = config['model']['input'] # Height of the model input images\n",
|
||
|
|
"img_width = config['model']['input'] # Width of the model input images\n",
|
||
|
|
"img_channels = 3 # Number of color channels of the model input images\n",
|
||
|
|
"mean_color = [123, 117, 104] # The per-channel mean of the images in the dataset. Do not change this value if you're using any of the pre-trained weights.\n",
|
||
|
|
"swap_channels = [2, 1, 0] # The color channel order in the original SSD is BGR, so we'll have the model reverse the color channel order of the input images.\n",
|
||
|
|
"n_classes = len(labels) # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO\n",
|
||
|
|
"scales_pascal = [0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05] # The anchor box scaling factors used in the original SSD300 for the Pascal VOC datasets\n",
|
||
|
|
"#scales_coco = [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05] # The anchor box scaling factors used in the original SSD300 for the MS COCO datasets\n",
|
||
|
|
"scales = scales_pascal\n",
|
||
|
|
"aspect_ratios = [[1.0, 2.0, 0.5],\n",
|
||
|
|
" [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n",
|
||
|
|
" [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n",
|
||
|
|
" [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n",
|
||
|
|
" [1.0, 2.0, 0.5],\n",
|
||
|
|
" [1.0, 2.0, 0.5]] # The anchor box aspect ratios used in the original SSD300; the order matters\n",
|
||
|
|
"two_boxes_for_ar1 = True\n",
|
||
|
|
"steps = [8, 16, 32, 64, 100, 300] # The space between two adjacent anchor box center points for each predictor layer.\n",
|
||
|
|
"offsets = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5] # The offsets of the first anchor box center points from the top and left borders of the image as a fraction of the step size for each predictor layer.\n",
|
||
|
|
"clip_boxes = False # Whether or not to clip the anchor boxes to lie entirely within the image boundaries\n",
|
||
|
|
"variances = [0.1, 0.1, 0.2, 0.2] # The variances by which the encoded target coordinates are divided as in the original implementation\n",
|
||
|
|
"normalize_coords = True\n",
|
||
|
|
"\n",
|
||
|
|
"K.clear_session() # Clear previous models from memory.\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"model_path = config['train']['saved_weights_name']\n",
|
||
|
|
"# 3: Instantiate an optimizer and the SSD loss function and compile the model.\n",
|
||
|
|
"# If you want to follow the original Caffe implementation, use the preset SGD\n",
|
||
|
|
"# optimizer, otherwise I'd recommend the commented-out Adam optimizer.\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"if config['model']['backend'] == 'ssd7':\n",
|
||
|
|
" #weights_path = 'VGG_ILSVRC_16_layers_fc_reduced.h5'\n",
|
||
|
|
" scales = [0.08, 0.16, 0.32, 0.64, 0.96] # An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`.\n",
|
||
|
|
" aspect_ratios = [0.5 ,1.0, 2.0] # The list of aspect ratios for the anchor boxes\n",
|
||
|
|
" two_boxes_for_ar1 = True # Whether or not you want to generate two anchor boxes for aspect ratio 1\n",
|
||
|
|
" steps = None # In case you'd like to set the step sizes for the anchor box grids manually; not recommended\n",
|
||
|
|
" offsets = None\n",
|
||
|
|
"\n",
|
||
|
|
"if os.path.exists(model_path):\n",
|
||
|
|
" print(\"\\nLoading pretrained weights.\\n\")\n",
|
||
|
|
" # We need to create an SSDLoss object in order to pass that to the model loader.\n",
|
||
|
|
" ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n",
|
||
|
|
"\n",
|
||
|
|
" K.clear_session() # Clear previous models from memory.\n",
|
||
|
|
" model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n",
|
||
|
|
" 'L2Normalization': L2Normalization,\n",
|
||
|
|
" 'compute_loss': ssd_loss.compute_loss})\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"else:\n",
|
||
|
|
" ####################################\n",
|
||
|
|
" # Build the Keras model.\n",
|
||
|
|
" ###################################\n",
|
||
|
|
"\n",
|
||
|
|
" if config['model']['backend'] == 'ssd300':\n",
|
||
|
|
" #weights_path = 'VGG_VOC0712Plus_SSD_300x300_ft_iter_160000.h5'\n",
|
||
|
|
" from models.keras_ssd300 import ssd_300 as ssd\n",
|
||
|
|
"\n",
|
||
|
|
" model = ssd_300(image_size=(img_height, img_width, img_channels),\n",
|
||
|
|
" n_classes=n_classes,\n",
|
||
|
|
" mode='training',\n",
|
||
|
|
" l2_regularization=0.0005,\n",
|
||
|
|
" scales=scales,\n",
|
||
|
|
" aspect_ratios_per_layer=aspect_ratios,\n",
|
||
|
|
" two_boxes_for_ar1=two_boxes_for_ar1,\n",
|
||
|
|
" steps=steps,\n",
|
||
|
|
" offsets=offsets,\n",
|
||
|
|
" clip_boxes=clip_boxes,\n",
|
||
|
|
" variances=variances,\n",
|
||
|
|
" normalize_coords=normalize_coords,\n",
|
||
|
|
" subtract_mean=mean_color,\n",
|
||
|
|
" swap_channels=swap_channels)\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
" elif config['model']['backend'] == 'ssd7':\n",
|
||
|
|
" #weights_path = 'VGG_ILSVRC_16_layers_fc_reduced.h5'\n",
|
||
|
|
" from models.keras_ssd7 import build_model as ssd\n",
|
||
|
|
" scales = [0.08, 0.16, 0.32, 0.64, 0.96] # An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`.\n",
|
||
|
|
" aspect_ratios = [0.5 ,1.0, 2.0] # The list of aspect ratios for the anchor boxes\n",
|
||
|
|
" two_boxes_for_ar1 = True # Whether or not you want to generate two anchor boxes for aspect ratio 1\n",
|
||
|
|
" steps = None # In case you'd like to set the step sizes for the anchor box grids manually; not recommended\n",
|
||
|
|
" offsets = None\n",
|
||
|
|
" model = ssd(image_size=(img_height, img_width, img_channels),\n",
|
||
|
|
" n_classes=n_classes,\n",
|
||
|
|
" mode='training',\n",
|
||
|
|
" l2_regularization=0.0005,\n",
|
||
|
|
" scales=scales,\n",
|
||
|
|
" aspect_ratios_global=aspect_ratios,\n",
|
||
|
|
" aspect_ratios_per_layer=None,\n",
|
||
|
|
" two_boxes_for_ar1=two_boxes_for_ar1,\n",
|
||
|
|
" steps=steps,\n",
|
||
|
|
" offsets=offsets,\n",
|
||
|
|
" clip_boxes=clip_boxes,\n",
|
||
|
|
" variances=variances,\n",
|
||
|
|
" normalize_coords=normalize_coords,\n",
|
||
|
|
" subtract_mean=None,\n",
|
||
|
|
" divide_by_stddev=None)\n",
|
||
|
|
"\n",
|
||
|
|
" else :\n",
|
||
|
|
" print('Wrong Backend')\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
" print('OK create model')\n",
|
||
|
|
" #sgd = SGD(lr=config['train']['learning_rate'], momentum=0.9, decay=0.0, nesterov=False)\n",
|
||
|
|
"\n",
|
||
|
|
" # TODO: Set the path to the weights you want to load. only for ssd300 or ssd512\n",
|
||
|
|
"\n",
|
||
|
|
" weights_path = '../ssd_keras-master/VGG_ILSVRC_16_layers_fc_reduced.h5'\n",
|
||
|
|
" print(\"\\nLoading pretrained weights VGG.\\n\")\n",
|
||
|
|
" model.load_weights(weights_path, by_name=True)\n",
|
||
|
|
"\n",
|
||
|
|
" # 3: Instantiate an optimizer and the SSD loss function and compile the model.\n",
|
||
|
|
" # If you want to follow the original Caffe implementation, use the preset SGD\n",
|
||
|
|
" # optimizer, otherwise I'd recommend the commented-out Adam optimizer.\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
" #adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n",
|
||
|
|
" #sgd = SGD(lr=0.001, momentum=0.9, decay=0.0, nesterov=False)\n",
|
||
|
|
" optimizer = Adam(lr=config['train']['learning_rate'], beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n",
|
||
|
|
" ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n",
|
||
|
|
" model.compile(optimizer=optimizer, loss=ssd_loss.compute_loss)\n",
|
||
|
|
"\n",
|
||
|
|
" model.summary()\n"
|
||
|
|
]
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"cell_type": "markdown",
|
||
|
|
"metadata": {},
|
||
|
|
"source": [
|
||
|
|
"Instanciar los generadores de datos y entrenamiento del modelo.\n",
|
||
|
|
"\n",
|
||
|
|
"*Cambio realizado para leer png y jpg. keras-ssd-master/data_generator/object_detection_2d_data_generator.py función parse_xml\n"
|
||
|
|
]
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"cell_type": "code",
|
||
|
|
"execution_count": 2,
|
||
|
|
"metadata": {},
|
||
|
|
"outputs": [
|
||
|
|
{
|
||
|
|
"name": "stdout",
|
||
|
|
"output_type": "stream",
|
||
|
|
"text": [
|
||
|
|
"Processing image set 'train.txt': 100%|██████████| 1/1 [00:00<00:00, 3.02it/s]\n",
|
||
|
|
"Processing image set 'test.txt': 100%|██████████| 1/1 [00:00<00:00, 2.48it/s]\n",
|
||
|
|
"panel : 69\n",
|
||
|
|
"cell : 423\n",
|
||
|
|
"Number of images in the training dataset:\t 1\n",
|
||
|
|
"Number of images in the validation dataset:\t 1\n",
|
||
|
|
"Epoch 1/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00001: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 200s 4s/step - loss: 13.2409 - val_loss: 9.9807\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00001: val_loss improved from inf to 9.98075, saving model to experimento_ssd7_panel_cell.h5\n",
|
||
|
|
"Epoch 2/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00002: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 238s 5s/step - loss: 9.8864 - val_loss: 11.1452\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00002: val_loss did not improve from 9.98075\n",
|
||
|
|
"Epoch 3/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00003: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 226s 5s/step - loss: 8.8060 - val_loss: 8.3006\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00003: val_loss improved from 9.98075 to 8.30060, saving model to experimento_ssd7_panel_cell.h5\n",
|
||
|
|
"Epoch 4/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00004: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 199s 4s/step - loss: 7.4999 - val_loss: 8.9384\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00004: val_loss did not improve from 8.30060\n",
|
||
|
|
"Epoch 5/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00005: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 187s 4s/step - loss: 7.4727 - val_loss: 7.9512\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00005: val_loss improved from 8.30060 to 7.95121, saving model to experimento_ssd7_panel_cell.h5\n",
|
||
|
|
"Epoch 6/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00006: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 213s 4s/step - loss: 6.8813 - val_loss: 11.2544\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00006: val_loss did not improve from 7.95121\n",
|
||
|
|
"Epoch 7/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00007: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 195s 4s/step - loss: 6.4775 - val_loss: 6.9093\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00007: val_loss improved from 7.95121 to 6.90929, saving model to experimento_ssd7_panel_cell.h5\n",
|
||
|
|
"Epoch 8/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00008: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 212s 4s/step - loss: 6.9758 - val_loss: 8.6997\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00008: val_loss did not improve from 6.90929\n",
|
||
|
|
"Epoch 9/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00009: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 199s 4s/step - loss: 6.1539 - val_loss: 10.9586\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00009: val_loss did not improve from 6.90929\n",
|
||
|
|
"Epoch 10/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00010: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 206s 4s/step - loss: 5.9307 - val_loss: 8.4361\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00010: val_loss did not improve from 6.90929\n",
|
||
|
|
"Epoch 11/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00011: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 197s 4s/step - loss: 5.3895 - val_loss: 5.9796\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00011: val_loss improved from 6.90929 to 5.97960, saving model to experimento_ssd7_panel_cell.h5\n",
|
||
|
|
"Epoch 12/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00012: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 184s 4s/step - loss: 5.0889 - val_loss: 5.9283\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00012: val_loss improved from 5.97960 to 5.92832, saving model to experimento_ssd7_panel_cell.h5\n",
|
||
|
|
"Epoch 13/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00013: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 193s 4s/step - loss: 5.7916 - val_loss: 6.7706\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00013: val_loss did not improve from 5.92832\n",
|
||
|
|
"Epoch 14/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00014: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 222s 4s/step - loss: 5.3010 - val_loss: 7.8910\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00014: val_loss did not improve from 5.92832\n",
|
||
|
|
"Epoch 15/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00015: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 179s 4s/step - loss: 4.9873 - val_loss: 6.0389\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00015: val_loss did not improve from 5.92832\n",
|
||
|
|
"Epoch 16/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00016: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 182s 4s/step - loss: 5.4664 - val_loss: 6.4125\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00016: val_loss did not improve from 5.92832\n",
|
||
|
|
"Epoch 17/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00017: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 166s 3s/step - loss: 6.0094 - val_loss: 9.2918\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00017: val_loss did not improve from 5.92832\n",
|
||
|
|
"Epoch 18/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00018: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 181s 4s/step - loss: 5.1737 - val_loss: 7.6806\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00018: val_loss did not improve from 5.92832\n",
|
||
|
|
"Epoch 19/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00019: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 159s 3s/step - loss: 5.2708 - val_loss: 7.1096\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00019: val_loss did not improve from 5.92832\n",
|
||
|
|
"Epoch 20/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00020: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 173s 3s/step - loss: 5.4765 - val_loss: 5.4921\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00020: val_loss improved from 5.92832 to 5.49211, saving model to experimento_ssd7_panel_cell.h5\n",
|
||
|
|
"Epoch 21/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00021: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 170s 3s/step - loss: 4.6517 - val_loss: 6.6033\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00021: val_loss did not improve from 5.49211\n",
|
||
|
|
"Epoch 22/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00022: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 191s 4s/step - loss: 5.1432 - val_loss: 5.6549\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00022: val_loss did not improve from 5.49211\n",
|
||
|
|
"Epoch 23/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00023: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 159s 3s/step - loss: 5.4830 - val_loss: 5.8758\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00023: val_loss did not improve from 5.49211\n",
|
||
|
|
"Epoch 24/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00024: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 150s 3s/step - loss: 5.3366 - val_loss: 5.3871\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00024: val_loss improved from 5.49211 to 5.38706, saving model to experimento_ssd7_panel_cell.h5\n",
|
||
|
|
"Epoch 25/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00025: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 138s 3s/step - loss: 5.7189 - val_loss: 8.0760\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00025: val_loss did not improve from 5.38706\n",
|
||
|
|
"Epoch 26/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00026: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 144s 3s/step - loss: 6.0929 - val_loss: 12.6163\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00026: val_loss did not improve from 5.38706\n",
|
||
|
|
"Epoch 27/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00027: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 147s 3s/step - loss: 5.2239 - val_loss: 9.8536\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00027: val_loss did not improve from 5.38706\n",
|
||
|
|
"Epoch 28/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00028: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 158s 3s/step - loss: 5.4414 - val_loss: 6.4950\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00028: val_loss did not improve from 5.38706\n",
|
||
|
|
"Epoch 29/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00029: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 157s 3s/step - loss: 5.4436 - val_loss: 9.0002\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00029: val_loss did not improve from 5.38706\n",
|
||
|
|
"Epoch 30/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00030: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 162s 3s/step - loss: 4.9780 - val_loss: 4.9993\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00030: val_loss improved from 5.38706 to 4.99925, saving model to experimento_ssd7_panel_cell.h5\n",
|
||
|
|
"Epoch 31/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00031: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 140s 3s/step - loss: 4.9645 - val_loss: 5.6612\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00031: val_loss did not improve from 4.99925\n",
|
||
|
|
"Epoch 32/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00032: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 141s 3s/step - loss: 4.5982 - val_loss: 5.2083\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00032: val_loss did not improve from 4.99925\n",
|
||
|
|
"Epoch 33/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00033: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 143s 3s/step - loss: 4.3101 - val_loss: 6.4808\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00033: val_loss did not improve from 4.99925\n",
|
||
|
|
"Epoch 34/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00034: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 145s 3s/step - loss: 4.4252 - val_loss: 10.9472\n"
|
||
|
|
]
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"name": "stdout",
|
||
|
|
"output_type": "stream",
|
||
|
|
"text": [
|
||
|
|
"\n",
|
||
|
|
"Epoch 00034: val_loss did not improve from 4.99925\n",
|
||
|
|
"Epoch 35/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00035: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 153s 3s/step - loss: 4.4998 - val_loss: 7.1254\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00035: val_loss did not improve from 4.99925\n",
|
||
|
|
"Epoch 36/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00036: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 153s 3s/step - loss: 4.8952 - val_loss: 7.0446\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00036: val_loss did not improve from 4.99925\n",
|
||
|
|
"Epoch 37/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00037: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 154s 3s/step - loss: 4.9868 - val_loss: 9.3251\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00037: val_loss did not improve from 4.99925\n",
|
||
|
|
"Epoch 38/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00038: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 148s 3s/step - loss: 4.8918 - val_loss: 5.1689\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00038: val_loss did not improve from 4.99925\n",
|
||
|
|
"Epoch 39/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00039: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 143s 3s/step - loss: 4.5572 - val_loss: 4.9839\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00039: val_loss improved from 4.99925 to 4.98394, saving model to experimento_ssd7_panel_cell.h5\n",
|
||
|
|
"Epoch 40/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00040: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 150s 3s/step - loss: 4.4722 - val_loss: 5.7133\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00040: val_loss did not improve from 4.98394\n",
|
||
|
|
"Epoch 41/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00041: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 152s 3s/step - loss: 4.9414 - val_loss: 5.5843\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00041: val_loss did not improve from 4.98394\n",
|
||
|
|
"Epoch 42/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00042: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 148s 3s/step - loss: 4.5857 - val_loss: 5.1884\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00042: val_loss did not improve from 4.98394\n",
|
||
|
|
"Epoch 43/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00043: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 149s 3s/step - loss: 4.7094 - val_loss: 6.7545\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00043: val_loss did not improve from 4.98394\n",
|
||
|
|
"Epoch 44/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00044: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 151s 3s/step - loss: 5.0428 - val_loss: 5.2691\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00044: val_loss did not improve from 4.98394\n",
|
||
|
|
"Epoch 45/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00045: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 146s 3s/step - loss: 4.9842 - val_loss: 6.5112\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00045: val_loss did not improve from 4.98394\n",
|
||
|
|
"Epoch 46/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00046: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 147s 3s/step - loss: 4.9108 - val_loss: 6.0670\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00046: val_loss did not improve from 4.98394\n",
|
||
|
|
"Epoch 47/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00047: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 155s 3s/step - loss: 4.6837 - val_loss: 5.8351\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00047: val_loss did not improve from 4.98394\n",
|
||
|
|
"Epoch 48/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00048: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 149s 3s/step - loss: 5.1042 - val_loss: 5.1778\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00048: val_loss did not improve from 4.98394\n",
|
||
|
|
"Epoch 49/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00049: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 144s 3s/step - loss: 4.1312 - val_loss: 5.9606\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00049: val_loss did not improve from 4.98394\n",
|
||
|
|
"Epoch 50/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00050: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 122s 2s/step - loss: 4.5373 - val_loss: 5.4351\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00050: val_loss did not improve from 4.98394\n",
|
||
|
|
"Epoch 51/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00051: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 135s 3s/step - loss: 4.8955 - val_loss: 6.0315\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00051: val_loss did not improve from 4.98394\n",
|
||
|
|
"Epoch 52/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00052: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 150s 3s/step - loss: 4.9445 - val_loss: 5.7199\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00052: val_loss did not improve from 4.98394\n",
|
||
|
|
"Epoch 53/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00053: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 139s 3s/step - loss: 3.9748 - val_loss: 5.5974\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00053: val_loss did not improve from 4.98394\n",
|
||
|
|
"Epoch 54/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00054: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 153s 3s/step - loss: 4.8783 - val_loss: 8.6056\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00054: val_loss did not improve from 4.98394\n",
|
||
|
|
"Epoch 55/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00055: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 141s 3s/step - loss: 4.1649 - val_loss: 6.0042\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00055: val_loss did not improve from 4.98394\n",
|
||
|
|
"Epoch 56/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00056: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 149s 3s/step - loss: 4.8997 - val_loss: 9.1298\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00056: val_loss did not improve from 4.98394\n",
|
||
|
|
"Epoch 57/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00057: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 151s 3s/step - loss: 4.4433 - val_loss: 7.1151\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00057: val_loss did not improve from 4.98394\n",
|
||
|
|
"Epoch 58/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00058: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 147s 3s/step - loss: 4.5827 - val_loss: 5.4356\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00058: val_loss did not improve from 4.98394\n",
|
||
|
|
"Epoch 59/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00059: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 137s 3s/step - loss: 3.9437 - val_loss: 4.7926\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00059: val_loss improved from 4.98394 to 4.79262, saving model to experimento_ssd7_panel_cell.h5\n",
|
||
|
|
"Epoch 60/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00060: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 125s 3s/step - loss: 4.0939 - val_loss: 5.7098\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00060: val_loss did not improve from 4.79262\n",
|
||
|
|
"Epoch 61/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00061: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 161s 3s/step - loss: 5.1152 - val_loss: 5.2079\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00061: val_loss did not improve from 4.79262\n",
|
||
|
|
"Epoch 62/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00062: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 144s 3s/step - loss: 4.2958 - val_loss: 4.9239\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00062: val_loss did not improve from 4.79262\n",
|
||
|
|
"Epoch 63/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00063: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 141s 3s/step - loss: 3.8241 - val_loss: 4.5443\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00063: val_loss improved from 4.79262 to 4.54430, saving model to experimento_ssd7_panel_cell.h5\n",
|
||
|
|
"Epoch 64/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00064: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 134s 3s/step - loss: 4.7252 - val_loss: 5.9445\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00064: val_loss did not improve from 4.54430\n",
|
||
|
|
"Epoch 65/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00065: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 154s 3s/step - loss: 4.4455 - val_loss: 4.8326\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00065: val_loss did not improve from 4.54430\n",
|
||
|
|
"Epoch 66/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00066: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 145s 3s/step - loss: 4.4054 - val_loss: 5.6441\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00066: val_loss did not improve from 4.54430\n",
|
||
|
|
"Epoch 67/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00067: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 124s 2s/step - loss: 4.4165 - val_loss: 6.8159\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00067: val_loss did not improve from 4.54430\n",
|
||
|
|
"Epoch 68/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00068: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 162s 3s/step - loss: 5.0418 - val_loss: 4.8508\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00068: val_loss did not improve from 4.54430\n",
|
||
|
|
"Epoch 69/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00069: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 140s 3s/step - loss: 4.1512 - val_loss: 5.4053\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00069: val_loss did not improve from 4.54430\n",
|
||
|
|
"Epoch 70/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00070: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 148s 3s/step - loss: 4.6197 - val_loss: 5.2824\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00070: val_loss did not improve from 4.54430\n",
|
||
|
|
"Epoch 71/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00071: LearningRateScheduler setting learning rate to 0.001.\n"
|
||
|
|
]
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"name": "stdout",
|
||
|
|
"output_type": "stream",
|
||
|
|
"text": [
|
||
|
|
"50/50 [==============================] - 152s 3s/step - loss: 4.2807 - val_loss: 5.5992\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00071: val_loss did not improve from 4.54430\n",
|
||
|
|
"Epoch 72/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00072: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 143s 3s/step - loss: 4.5368 - val_loss: 6.5207\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00072: val_loss did not improve from 4.54430\n",
|
||
|
|
"Epoch 73/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00073: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 141s 3s/step - loss: 4.0598 - val_loss: 5.2421\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00073: val_loss did not improve from 4.54430\n",
|
||
|
|
"Epoch 74/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00074: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 150s 3s/step - loss: 4.4861 - val_loss: 5.4182\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00074: val_loss did not improve from 4.54430\n",
|
||
|
|
"Epoch 75/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00075: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 144s 3s/step - loss: 4.5263 - val_loss: 4.3774\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00075: val_loss improved from 4.54430 to 4.37742, saving model to experimento_ssd7_panel_cell.h5\n",
|
||
|
|
"Epoch 76/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00076: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 148s 3s/step - loss: 3.8465 - val_loss: 4.5809\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00076: val_loss did not improve from 4.37742\n",
|
||
|
|
"Epoch 77/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00077: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 152s 3s/step - loss: 4.0495 - val_loss: 4.9745\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00077: val_loss did not improve from 4.37742\n",
|
||
|
|
"Epoch 78/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00078: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 152s 3s/step - loss: 4.6009 - val_loss: 13.4989\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00078: val_loss did not improve from 4.37742\n",
|
||
|
|
"Epoch 79/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00079: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 142s 3s/step - loss: 4.6687 - val_loss: 6.4490\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00079: val_loss did not improve from 4.37742\n",
|
||
|
|
"Epoch 80/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00080: LearningRateScheduler setting learning rate to 0.001.\n",
|
||
|
|
"50/50 [==============================] - 147s 3s/step - loss: 4.5297 - val_loss: 8.0478\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00080: val_loss did not improve from 4.37742\n",
|
||
|
|
"Epoch 81/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00081: LearningRateScheduler setting learning rate to 0.0001.\n",
|
||
|
|
"50/50 [==============================] - 141s 3s/step - loss: 4.2662 - val_loss: 5.7929\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00081: val_loss did not improve from 4.37742\n",
|
||
|
|
"Epoch 82/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00082: LearningRateScheduler setting learning rate to 0.0001.\n",
|
||
|
|
"50/50 [==============================] - 149s 3s/step - loss: 4.1048 - val_loss: 4.6117\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00082: val_loss did not improve from 4.37742\n",
|
||
|
|
"Epoch 83/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00083: LearningRateScheduler setting learning rate to 0.0001.\n",
|
||
|
|
"50/50 [==============================] - 156s 3s/step - loss: 3.9905 - val_loss: 4.5542\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00083: val_loss did not improve from 4.37742\n",
|
||
|
|
"Epoch 84/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00084: LearningRateScheduler setting learning rate to 0.0001.\n",
|
||
|
|
"50/50 [==============================] - 155s 3s/step - loss: 4.3129 - val_loss: 4.4676\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00084: val_loss did not improve from 4.37742\n",
|
||
|
|
"Epoch 85/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00085: LearningRateScheduler setting learning rate to 0.0001.\n",
|
||
|
|
"50/50 [==============================] - 156s 3s/step - loss: 3.7951 - val_loss: 4.4689\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00085: val_loss did not improve from 4.37742\n",
|
||
|
|
"Epoch 86/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00086: LearningRateScheduler setting learning rate to 0.0001.\n",
|
||
|
|
"50/50 [==============================] - 155s 3s/step - loss: 4.3618 - val_loss: 4.4048\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00086: val_loss did not improve from 4.37742\n",
|
||
|
|
"Epoch 87/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00087: LearningRateScheduler setting learning rate to 0.0001.\n",
|
||
|
|
"50/50 [==============================] - 156s 3s/step - loss: 4.3538 - val_loss: 4.6832\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00087: val_loss did not improve from 4.37742\n",
|
||
|
|
"Epoch 88/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00088: LearningRateScheduler setting learning rate to 0.0001.\n",
|
||
|
|
"50/50 [==============================] - 152s 3s/step - loss: 4.2076 - val_loss: 4.4796\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00088: val_loss did not improve from 4.37742\n",
|
||
|
|
"Epoch 89/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00089: LearningRateScheduler setting learning rate to 0.0001.\n",
|
||
|
|
"50/50 [==============================] - 146s 3s/step - loss: 4.1322 - val_loss: 4.5462\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00089: val_loss did not improve from 4.37742\n",
|
||
|
|
"Epoch 90/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00090: LearningRateScheduler setting learning rate to 0.0001.\n",
|
||
|
|
"50/50 [==============================] - 157s 3s/step - loss: 4.4995 - val_loss: 4.5660\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00090: val_loss did not improve from 4.37742\n",
|
||
|
|
"Epoch 91/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00091: LearningRateScheduler setting learning rate to 0.0001.\n",
|
||
|
|
"50/50 [==============================] - 158s 3s/step - loss: 4.2653 - val_loss: 4.5265\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00091: val_loss did not improve from 4.37742\n",
|
||
|
|
"Epoch 92/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00092: LearningRateScheduler setting learning rate to 0.0001.\n",
|
||
|
|
"50/50 [==============================] - 153s 3s/step - loss: 4.3702 - val_loss: 4.5276\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00092: val_loss did not improve from 4.37742\n",
|
||
|
|
"Epoch 93/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00093: LearningRateScheduler setting learning rate to 0.0001.\n",
|
||
|
|
"50/50 [==============================] - 153s 3s/step - loss: 3.7340 - val_loss: 4.5439\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00093: val_loss did not improve from 4.37742\n",
|
||
|
|
"Epoch 94/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00094: LearningRateScheduler setting learning rate to 0.0001.\n",
|
||
|
|
"50/50 [==============================] - 151s 3s/step - loss: 4.0253 - val_loss: 4.3250\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00094: val_loss improved from 4.37742 to 4.32498, saving model to experimento_ssd7_panel_cell.h5\n",
|
||
|
|
"Epoch 95/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00095: LearningRateScheduler setting learning rate to 0.0001.\n",
|
||
|
|
"50/50 [==============================] - 143s 3s/step - loss: 4.0254 - val_loss: 4.6277\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00095: val_loss did not improve from 4.32498\n",
|
||
|
|
"Epoch 96/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00096: LearningRateScheduler setting learning rate to 0.0001.\n",
|
||
|
|
"50/50 [==============================] - 148s 3s/step - loss: 3.9857 - val_loss: 4.2953\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00096: val_loss improved from 4.32498 to 4.29533, saving model to experimento_ssd7_panel_cell.h5\n",
|
||
|
|
"Epoch 97/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00097: LearningRateScheduler setting learning rate to 0.0001.\n",
|
||
|
|
"50/50 [==============================] - 157s 3s/step - loss: 3.6750 - val_loss: 4.5637\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00097: val_loss did not improve from 4.29533\n",
|
||
|
|
"Epoch 98/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00098: LearningRateScheduler setting learning rate to 0.0001.\n",
|
||
|
|
"50/50 [==============================] - 154s 3s/step - loss: 3.7435 - val_loss: 4.3923\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00098: val_loss did not improve from 4.29533\n",
|
||
|
|
"Epoch 99/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00099: LearningRateScheduler setting learning rate to 0.0001.\n",
|
||
|
|
"50/50 [==============================] - 162s 3s/step - loss: 4.0930 - val_loss: 4.4010\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00099: val_loss did not improve from 4.29533\n",
|
||
|
|
"Epoch 100/100\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00100: LearningRateScheduler setting learning rate to 0.0001.\n",
|
||
|
|
"50/50 [==============================] - 134s 3s/step - loss: 3.8983 - val_loss: 4.4451\n",
|
||
|
|
"\n",
|
||
|
|
"Epoch 00100: val_loss did not improve from 4.29533\n"
|
||
|
|
]
|
||
|
|
}
|
||
|
|
],
|
||
|
|
"source": [
|
||
|
|
"#ENTRENAMIENTO DE MODELO\n",
|
||
|
|
"#####################################################################\n",
|
||
|
|
"# Instantiate two `DataGenerator` objects: One for training, one for validation.\n",
|
||
|
|
"######################################################################\n",
|
||
|
|
"# Optional: If you have enough memory, consider loading the images into memory for the reasons explained above.\n",
|
||
|
|
"\n",
|
||
|
|
"train_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n",
|
||
|
|
"val_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n",
|
||
|
|
"\n",
|
||
|
|
"# 2: Parse the image and label lists for the training and validation datasets. This can take a while.\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"# The XML parser needs to now what object class names to look for and in which order to map them to integers.\n",
|
||
|
|
"classes = ['background' ] + labels\n",
|
||
|
|
"\n",
|
||
|
|
"train_dataset.parse_xml(images_dirs= [config['train']['train_image_folder']],\n",
|
||
|
|
" image_set_filenames=[config['train']['train_image_set_filename']],\n",
|
||
|
|
" annotations_dirs=[config['train']['train_annot_folder']],\n",
|
||
|
|
" classes=classes,\n",
|
||
|
|
" include_classes='all',\n",
|
||
|
|
" #classes = ['background', 'panel', 'cell'], \n",
|
||
|
|
" #include_classes=classes,\n",
|
||
|
|
" exclude_truncated=False,\n",
|
||
|
|
" exclude_difficult=False,\n",
|
||
|
|
" ret=False)\n",
|
||
|
|
"\n",
|
||
|
|
"val_dataset.parse_xml(images_dirs= [config['test']['test_image_folder']],\n",
|
||
|
|
" image_set_filenames=[config['test']['test_image_set_filename']],\n",
|
||
|
|
" annotations_dirs=[config['test']['test_annot_folder']],\n",
|
||
|
|
" classes=classes,\n",
|
||
|
|
" include_classes='all',\n",
|
||
|
|
" #classes = ['background', 'panel', 'cell'], \n",
|
||
|
|
" #include_classes=classes,\n",
|
||
|
|
" exclude_truncated=False,\n",
|
||
|
|
" exclude_difficult=False,\n",
|
||
|
|
" ret=False)\n",
|
||
|
|
"\n",
|
||
|
|
"#########################\n",
|
||
|
|
"# 3: Set the batch size.\n",
|
||
|
|
"#########################\n",
|
||
|
|
"batch_size = config['train']['batch_size'] # Change the batch size if you like, or if you run into GPU memory issues.\n",
|
||
|
|
"\n",
|
||
|
|
"##########################\n",
|
||
|
|
"# 4: Set the image transformations for pre-processing and data augmentation options.\n",
|
||
|
|
"##########################\n",
|
||
|
|
"# For the training generator:\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"# For the validation generator:\n",
|
||
|
|
"convert_to_3_channels = ConvertTo3Channels()\n",
|
||
|
|
"resize = Resize(height=img_height, width=img_width)\n",
|
||
|
|
"\n",
|
||
|
|
"######################################3\n",
|
||
|
|
"# 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.\n",
|
||
|
|
"#########################################\n",
|
||
|
|
"# The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.\n",
|
||
|
|
"if config['model']['backend'] == 'ssd300':\n",
|
||
|
|
" predictor_sizes = [model.get_layer('conv4_3_norm_mbox_conf').output_shape[1:3],\n",
|
||
|
|
" model.get_layer('fc7_mbox_conf').output_shape[1:3],\n",
|
||
|
|
" model.get_layer('conv6_2_mbox_conf').output_shape[1:3],\n",
|
||
|
|
" model.get_layer('conv7_2_mbox_conf').output_shape[1:3],\n",
|
||
|
|
" model.get_layer('conv8_2_mbox_conf').output_shape[1:3],\n",
|
||
|
|
" model.get_layer('conv9_2_mbox_conf').output_shape[1:3]]\n",
|
||
|
|
" ssd_input_encoder = SSDInputEncoder(img_height=img_height,\n",
|
||
|
|
" img_width=img_width,\n",
|
||
|
|
" n_classes=n_classes,\n",
|
||
|
|
" predictor_sizes=predictor_sizes,\n",
|
||
|
|
" scales=scales,\n",
|
||
|
|
" aspect_ratios_per_layer=aspect_ratios,\n",
|
||
|
|
" two_boxes_for_ar1=two_boxes_for_ar1,\n",
|
||
|
|
" steps=steps,\n",
|
||
|
|
" offsets=offsets,\n",
|
||
|
|
" clip_boxes=clip_boxes,\n",
|
||
|
|
" variances=variances,\n",
|
||
|
|
" matching_type='multi',\n",
|
||
|
|
" pos_iou_threshold=0.5,\n",
|
||
|
|
" neg_iou_limit=0.5,\n",
|
||
|
|
" normalize_coords=normalize_coords)\n",
|
||
|
|
"\n",
|
||
|
|
"elif config['model']['backend'] == 'ssd7':\n",
|
||
|
|
" predictor_sizes = [model.get_layer('classes4').output_shape[1:3],\n",
|
||
|
|
" model.get_layer('classes5').output_shape[1:3],\n",
|
||
|
|
" model.get_layer('classes6').output_shape[1:3],\n",
|
||
|
|
" model.get_layer('classes7').output_shape[1:3]]\n",
|
||
|
|
" ssd_input_encoder = SSDInputEncoder(img_height=img_height,\n",
|
||
|
|
" img_width=img_width,\n",
|
||
|
|
" n_classes=n_classes,\n",
|
||
|
|
" predictor_sizes=predictor_sizes,\n",
|
||
|
|
" scales=scales,\n",
|
||
|
|
" aspect_ratios_global=aspect_ratios,\n",
|
||
|
|
" two_boxes_for_ar1=two_boxes_for_ar1,\n",
|
||
|
|
" steps=steps,\n",
|
||
|
|
" offsets=offsets,\n",
|
||
|
|
" clip_boxes=clip_boxes,\n",
|
||
|
|
" variances=variances,\n",
|
||
|
|
" matching_type='multi',\n",
|
||
|
|
" pos_iou_threshold=0.5,\n",
|
||
|
|
" neg_iou_limit=0.3,\n",
|
||
|
|
" normalize_coords=normalize_coords)\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
" \n",
|
||
|
|
"data_augmentation_chain = DataAugmentationVariableInputSize(resize_height = img_height,\n",
|
||
|
|
" resize_width = img_width,\n",
|
||
|
|
" random_brightness=(-48, 48, 0.5),\n",
|
||
|
|
" random_contrast=(0.5, 1.8, 0.5),\n",
|
||
|
|
" random_saturation=(0.5, 1.8, 0.5),\n",
|
||
|
|
" random_hue=(18, 0.5),\n",
|
||
|
|
" random_flip=0.5,\n",
|
||
|
|
" n_trials_max=3,\n",
|
||
|
|
" clip_boxes=True,\n",
|
||
|
|
" overlap_criterion='area',\n",
|
||
|
|
" bounds_box_filter=(0.3, 1.0),\n",
|
||
|
|
" bounds_validator=(0.5, 1.0),\n",
|
||
|
|
" n_boxes_min=1,\n",
|
||
|
|
" background=(0,0,0))\n",
|
||
|
|
"#######################\n",
|
||
|
|
"# 6: Create the generator handles that will be passed to Keras' `fit_generator()` function.\n",
|
||
|
|
"#######################\n",
|
||
|
|
"\n",
|
||
|
|
"train_generator = train_dataset.generate(batch_size=batch_size,\n",
|
||
|
|
" shuffle=True,\n",
|
||
|
|
" transformations= [data_augmentation_chain],\n",
|
||
|
|
" label_encoder=ssd_input_encoder,\n",
|
||
|
|
" returns={'processed_images',\n",
|
||
|
|
" 'encoded_labels'},\n",
|
||
|
|
" keep_images_without_gt=False)\n",
|
||
|
|
"\n",
|
||
|
|
"val_generator = val_dataset.generate(batch_size=batch_size,\n",
|
||
|
|
" shuffle=False,\n",
|
||
|
|
" transformations=[convert_to_3_channels,\n",
|
||
|
|
" resize],\n",
|
||
|
|
" label_encoder=ssd_input_encoder,\n",
|
||
|
|
" returns={'processed_images',\n",
|
||
|
|
" 'encoded_labels'},\n",
|
||
|
|
" keep_images_without_gt=False)\n",
|
||
|
|
"\n",
|
||
|
|
"# Summary instance training\n",
|
||
|
|
"category_train_list = []\n",
|
||
|
|
"for image_label in train_dataset.labels:\n",
|
||
|
|
" category_train_list += [i[0] for i in train_dataset.labels[0]]\n",
|
||
|
|
"summary_category_training = {train_dataset.classes[i]: category_train_list.count(i) for i in list(set(category_train_list))}\n",
|
||
|
|
"for i in summary_category_training.keys():\n",
|
||
|
|
" print(i, ': {:.0f}'.format(summary_category_training[i]))\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"# Get the number of samples in the training and validations datasets.\n",
|
||
|
|
"train_dataset_size = train_dataset.get_dataset_size()\n",
|
||
|
|
"val_dataset_size = val_dataset.get_dataset_size()\n",
|
||
|
|
"\n",
|
||
|
|
"print(\"Number of images in the training dataset:\\t{:>6}\".format(train_dataset_size))\n",
|
||
|
|
"print(\"Number of images in the validation dataset:\\t{:>6}\".format(val_dataset_size))\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"##########################\n",
|
||
|
|
"# Define model callbacks.\n",
|
||
|
|
"#########################\n",
|
||
|
|
"\n",
|
||
|
|
"# TODO: Set the filepath under which you want to save the model.\n",
|
||
|
|
"model_checkpoint = ModelCheckpoint(filepath= config['train']['saved_weights_name'],\n",
|
||
|
|
" monitor='val_loss',\n",
|
||
|
|
" verbose=1,\n",
|
||
|
|
" save_best_only=True,\n",
|
||
|
|
" save_weights_only=False,\n",
|
||
|
|
" mode='auto',\n",
|
||
|
|
" period=1)\n",
|
||
|
|
"#model_checkpoint.best =\n",
|
||
|
|
"\n",
|
||
|
|
"csv_logger = CSVLogger(filename='log.csv',\n",
|
||
|
|
" separator=',',\n",
|
||
|
|
" append=True)\n",
|
||
|
|
"\n",
|
||
|
|
"learning_rate_scheduler = LearningRateScheduler(schedule=lr_schedule,\n",
|
||
|
|
" verbose=1)\n",
|
||
|
|
"\n",
|
||
|
|
"terminate_on_nan = TerminateOnNaN()\n",
|
||
|
|
"\n",
|
||
|
|
"callbacks = [model_checkpoint,\n",
|
||
|
|
" csv_logger,\n",
|
||
|
|
" learning_rate_scheduler,\n",
|
||
|
|
" terminate_on_nan]\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"batch_images, batch_labels = next(train_generator)\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"initial_epoch = 0\n",
|
||
|
|
"final_epoch = 100 #config['train']['nb_epochs']\n",
|
||
|
|
"steps_per_epoch = 50\n",
|
||
|
|
"\n",
|
||
|
|
"history = model.fit_generator(generator=train_generator,\n",
|
||
|
|
" steps_per_epoch=steps_per_epoch,\n",
|
||
|
|
" epochs=final_epoch,\n",
|
||
|
|
" callbacks=callbacks,\n",
|
||
|
|
" validation_data=val_generator,\n",
|
||
|
|
" validation_steps=ceil(val_dataset_size/batch_size),\n",
|
||
|
|
" initial_epoch=initial_epoch,\n",
|
||
|
|
" verbose = 1 if config['train']['debug'] else 2)\n",
|
||
|
|
"\n",
|
||
|
|
"history_path = config['train']['saved_weights_name'].split('.')[0] + '_history'\n",
|
||
|
|
"\n",
|
||
|
|
"np.save(history_path, history.history)"
|
||
|
|
]
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"cell_type": "code",
|
||
|
|
"execution_count": null,
|
||
|
|
"metadata": {},
|
||
|
|
"outputs": [],
|
||
|
|
"source": []
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"cell_type": "code",
|
||
|
|
"execution_count": 3,
|
||
|
|
"metadata": {},
|
||
|
|
"outputs": [
|
||
|
|
{
|
||
|
|
"name": "stdout",
|
||
|
|
"output_type": "stream",
|
||
|
|
"text": [
|
||
|
|
"dict_keys(['val_loss', 'loss', 'lr'])\n"
|
||
|
|
]
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"data": {
|
||
|
|
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAEWCAYAAABrDZDcAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzsnXd4m9W9xz9Hw5Z3HO/YGQ7ZCdmELPbes6xCW2bpAm5bKNzuXm5LCx0UKGXTUhouZe8VEkIWIXuH7DjLM463LUvn/nHeV8uyLTmyJFvn8zx5tF6975Ejne/5zSOklGg0Go0mcbHEegAajUajiS1aCDQajSbB0UKg0Wg0CY4WAo1Go0lwtBBoNBpNgqOFQKPRaBIcLQQaTRcIIZ4XQtwf4rF7hBBnHut5NJpoo4VAo9FoEhwtBBqNRpPgaCHQ9HkMl8zdQoj1QohGIcQzQogCIcT7Qoh6IcQnQohsn+MvFkJsEkLUCiEWCiHG+rw2RQix2njf/wGOgGtdKIRYa7x3qRBiYg/HfKsQYocQokYI8ZYQYpDxvBBC/FkIUSGEOGp8pgnGa+cLITYbYzsghPhxj/5gGk0AWgg0/YUrgLOAUcBFwPvAfwO5qO/5HQBCiFHAPOAuIA94D3hbCJEkhEgC3gBeAAYC/zHOi/HeqcCzwLeBHOAJ4C0hRHI4AxVCnA78DrgKKAL2Ai8ZL58NnGx8jgHA1UC18dozwLellBnABODTcK6r0XSGFgJNf+ERKWW5lPIA8DnwhZRyjZSyFXgdmGIcdzXwrpTyYymlE3gISAFmAzMBO/AXKaVTSvkK8KXPNW4FnpBSfiGldEkp/wG0Gu8Lh68Dz0opVxvjuw+YJYQYBjiBDGAMIKSUW6SUh4z3OYFxQohMKeURKeXqMK+r0QRFC4Gmv1Duc785yON04/4g1AocACmlGygDio3XDkj/Tox7fe4PBX5kuIVqhRC1wGDjfeEQOIYG1Kq/WEr5KfAo8BhQLoR4UgiRaRx6BXA+sFcI8ZkQYlaY19VogqKFQJNoHERN6IDyyaMm8wPAIaDYeM5kiM/9MuB/pZQDfP6lSinnHeMY0lCupgMAUsq/SimnAeNRLqK7jee/lFJeAuSjXFgvh3ldjSYoWgg0icbLwAVCiDOEEHbgRyj3zlJgGdAO3CGEsAkhLgdm+Lz3KeB2IcSJRlA3TQhxgRAiI8wx/Bu4UQgx2Ygv/BblytojhDjBOL8daARaAJcRw/i6ECLLcGnVAa5j+DtoNB60EGgSCinlNuB64BGgChVYvkhK2SalbAMuB74FHEHFE17zee9KVJzgUeP1Hcax4Y5hPvBz4FWUFXIccI3xciZKcI6g3EfVqDgGwA3AHiFEHXC78Tk0mmNG6I1pNBqNJrHRFoFGo9EkOFoINBqNJsHRQqDRaDQJjhYCjUajSXBssR5AKOTm5sphw4bFehgajUbTp1i1alWVlDKvu+P6hBAMGzaMlStXxnoYGo1G06cQQuzt/ijtGtJoNJqERwuBRqPRJDhaCDQajSbB6RMxgmA4nU72799PS0tLrIfSqzgcDkpKSrDb7bEeikaj6af0WSHYv38/GRkZDBs2DP9mkf0HKSXV1dXs37+f0tLSWA9Ho9H0U/qsa6ilpYWcnJx+KwIAQghycnL6vdWj0WhiS58VAqBfi4BJInxGjUYTW/q0EHRLy1GoPxzrUWg0Gk1c07+FoLVBCUEvtNqura3lb3/7W9jvO//886mtrY34eDQajaan9G8hsKcAEtoj72PvTAhcrq43jXrvvfcYMGBAxMej0Wg0PaXPZg2FhD1F3TqbvfcjxL333svOnTuZPHkydrud9PR0ioqKWLt2LZs3b+bSSy+lrKyMlpYW7rzzTm677TbA2y6joaGB8847j7lz57J06VKKi4t58803SUmJ7Dg1Go2mO/qFEPz67U1sPlgX/MW2BrAeBWtyWOccNyiTX140vtPXH3jgATZu3MjatWtZuHAhF1xwARs3bvSkeT777LMMHDiQ5uZmTjjhBK644gpycnL8zrF9+3bmzZvHU089xVVXXcWrr77K9dfr3Qc1Gk106RdC0CXCAm43WHv3MjNmzPDL9f/rX//K66+/DkBZWRnbt2/vIASlpaVMnjwZgGnTprFnz57eHaRGo9EEoV8IQVcrd2r3QXMtFB4PvZiKmZaW5rm/cOFCPvnkE5YtW0Zqaiqnnnpq0FqA5GSvlWK1Wmlubu618Wk0Gk1n9O9gMajYgHSByxnR02ZkZFBfXx/0taNHj5KdnU1qaipbt25l+fLlEb22RqPRRJJ+YRF0ic0IvrY3gy0pYqfNyclhzpw5TJgwgZSUFAoKCjyvnXvuufz9739n4sSJjB49mpkzZ0bsuhqNRhNphOyFHHsAIcSzwIVAhZRyQsBrPwYeBPKklFXdnWv69OkycGOaLVu2MHbs2O4H4nbB4fWQUQgZRWF8gvgh5M+q0Wg0PgghVkkpp3d3XG+6hp4Hzg18UggxGDgL2NeL1/ZisaqMIaf2v2s0Gk0wek0IpJSLgJogL/0ZuAfoHVMkGPYULQQajUbTCVENFgshLgYOSCnXhXDsbUKIlUKIlZWVlcd2YXsKuNrA3X5s59FoNJp+SNSEQAiRCvwU+EUox0spn5RSTpdSTs/Lyzu2i9tT1a22CjQajaYD0bQIjgNKgXVCiD1ACbBaCFHY61f2bTWh0Wg0Gj+ilj4qpdwA5JuPDTGYHkrW0DFjtYPFpoVAo9FogtBrFoEQYh6wDBgthNgvhLi5t64VEhEOGPe0DTXAX/7yF5qamiI2Fo1GozkWejNr6FopZZGU0i6lLJFSPhPw+rCoWAMmthTVjjpCdRNaCDQaTX+h/1cWm1isgFRCEIGeQ75tqM866yzy8/N5+eWXaW1t5bLLLuPXv/41jY2NXHXVVezfvx+Xy8XPf/5zysvLOXjwIKeddhq5ubksWLDg2D+bRqPRHAP9QwjevxcOb+j6GFcbuFohKR0IQQgKj4fzHuj0Zd821B999BGvvPIKK1asQErJxRdfzKJFi6isrGTQoEG8++67gOpBlJWVxZ/+9CcWLFhAbm5uGB9So9Foeof+33TOxLQCeqGlxkcffcRHH33ElClTmDp1Klu3bmX79u0cf/zxfPLJJ/zkJz/h888/JysrK+LX1mg0mmOlf1gEXazcPTQfgSN7IG9MxHcrk1Jy33338e1vf7vDa6tWreK9997jvvvu4+yzz+YXvwipjEKj0WiiRgJZBMZHle6InM63DfU555zDs88+S0NDAwAHDhygoqKCgwcPkpqayvXXX8+Pf/xjVq9e3eG9Go1GE2v6h0UQChEWAt821Oeddx7XXXcds2bNAiA9PZ1//etf7Nixg7vvvhuLxYLdbufxxx8H4LbbbuO8886jqKhIB4s1Gk3M6bU21JHkmNpQm7Q1QtVXMHA4OPqWr163odZoND0hHtpQxxcRtgg0Go2mv5B4QuDWQqDRaDS+9GkhCMut1Uctgr7gutNoNH2bPisEDoeD6urq0CdKjxC4em9QEUZKSXV1NQ6HI9ZD0Wg0/Zg+mzVUUlLC/v37CWvTmtoKcLSC40jvDSzCOBwOSkpKYj0MjUbTj+mzQmC32yktLQ3vTf97Bkz7Fpz7214Zk0aj0fRF+qxrqEckpYGzMdaj0Gg0mrgiwYQgFdp0+2eNRqPxJbGEwJ4GTi0EGo1G40tiCUFSqqow1mg0Go2HxBICe6q2CDQajSaAxBKCpDQdI9BoNJoAEksI7Kk6a0ij0WgC6DUhEEI8K4SoEEJs9HnuQSHEViHEeiHE60KIAb11/aDorCGNRqPpQG9aBM8D5wY89zEwQUo5EfgKuK8Xr98RnTWk0Wg0Heg1IZBSLgJqAp77SErZbjxcDkS3d4KZNaQbuWk0Go2HWMYIbgLe7+xFIcRtQoiVQoiVYfUT6gp7qmo652qLzPk0Go2mHxATIRBC/BRoB17s7Bgp5ZNSyulSyul5eXmRuXBSmrrVtQQajUbjIepN54QQ3wQuBM6Q0W62b09Vt84mYGBUL63RaDTxSlSFQAh
|
||
|
|
"text/plain": [
|
||
|
|
"<Figure size 432x288 with 1 Axes>"
|
||
|
|
]
|
||
|
|
},
|
||
|
|
"metadata": {
|
||
|
|
"needs_background": "light"
|
||
|
|
},
|
||
|
|
"output_type": "display_data"
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"name": "stdout",
|
||
|
|
"output_type": "stream",
|
||
|
|
"text": [
|
||
|
|
"experimento_ssd7_panel.h5\n"
|
||
|
|
]
|
||
|
|
}
|
||
|
|
],
|
||
|
|
"source": [
|
||
|
|
"#Graficar aprendizaje\n",
|
||
|
|
"\n",
|
||
|
|
"history_path =config['train']['saved_weights_name'].split('.')[0] + '_history'\n",
|
||
|
|
"\n",
|
||
|
|
"hist_load = np.load(history_path + '.npy',allow_pickle=True).item()\n",
|
||
|
|
"\n",
|
||
|
|
"print(hist_load.keys())\n",
|
||
|
|
"\n",
|
||
|
|
"# summarize history for loss\n",
|
||
|
|
"plt.plot(hist_load['loss'])\n",
|
||
|
|
"plt.plot(hist_load['val_loss'])\n",
|
||
|
|
"plt.title('model loss')\n",
|
||
|
|
"plt.ylabel('loss')\n",
|
||
|
|
"plt.xlabel('epoch')\n",
|
||
|
|
"plt.legend(['train', 'test'], loc='upper left')\n",
|
||
|
|
"plt.show()\n",
|
||
|
|
"\n",
|
||
|
|
"print(config['train']['saved_weights_name'])"
|
||
|
|
]
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"cell_type": "markdown",
|
||
|
|
"metadata": {},
|
||
|
|
"source": [
|
||
|
|
"Evaluación del Modelo"
|
||
|
|
]
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"cell_type": "code",
|
||
|
|
"execution_count": null,
|
||
|
|
"metadata": {},
|
||
|
|
"outputs": [],
|
||
|
|
"source": [
|
||
|
|
"\n",
|
||
|
|
"config_path = 'config_7_panel.json'\n",
|
||
|
|
"\n",
|
||
|
|
"with open(config_path) as config_buffer:\n",
|
||
|
|
" config = json.loads(config_buffer.read())\n",
|
||
|
|
"\n",
|
||
|
|
" \n",
|
||
|
|
"model_mode = 'training'\n",
|
||
|
|
"# TODO: Set the path to the `.h5` file of the model to be loaded.\n",
|
||
|
|
"model_path = config['train']['saved_weights_name']\n",
|
||
|
|
"\n",
|
||
|
|
"# We need to create an SSDLoss object in order to pass that to the model loader.\n",
|
||
|
|
"ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n",
|
||
|
|
"\n",
|
||
|
|
"K.clear_session() # Clear previous models from memory.\n",
|
||
|
|
"\n",
|
||
|
|
"model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n",
|
||
|
|
" 'L2Normalization': L2Normalization,\n",
|
||
|
|
" 'DecodeDetections': DecodeDetections,\n",
|
||
|
|
" 'compute_loss': ssd_loss.compute_loss})\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
" \n",
|
||
|
|
"train_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n",
|
||
|
|
"val_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n",
|
||
|
|
"\n",
|
||
|
|
"# 2: Parse the image and label lists for the training and validation datasets. This can take a while.\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"# The XML parser needs to now what object class names to look for and in which order to map them to integers.\n",
|
||
|
|
"classes = ['background' ] + labels\n",
|
||
|
|
"\n",
|
||
|
|
"train_dataset.parse_xml(images_dirs= [config['train']['train_image_folder']],\n",
|
||
|
|
" image_set_filenames=[config['train']['train_image_set_filename']],\n",
|
||
|
|
" annotations_dirs=[config['train']['train_annot_folder']],\n",
|
||
|
|
" classes=classes,\n",
|
||
|
|
" include_classes='all',\n",
|
||
|
|
" #classes = ['background', 'panel', 'cell'], \n",
|
||
|
|
" #include_classes=classes,\n",
|
||
|
|
" exclude_truncated=False,\n",
|
||
|
|
" exclude_difficult=False,\n",
|
||
|
|
" ret=False)\n",
|
||
|
|
"\n",
|
||
|
|
"val_dataset.parse_xml(images_dirs= [config['test']['test_image_folder']],\n",
|
||
|
|
" image_set_filenames=[config['test']['test_image_set_filename']],\n",
|
||
|
|
" annotations_dirs=[config['test']['test_annot_folder']],\n",
|
||
|
|
" classes=classes,\n",
|
||
|
|
" include_classes='all',\n",
|
||
|
|
" #classes = ['background', 'panel', 'cell'], \n",
|
||
|
|
" #include_classes=classes,\n",
|
||
|
|
" exclude_truncated=False,\n",
|
||
|
|
" exclude_difficult=False,\n",
|
||
|
|
" ret=False)\n",
|
||
|
|
"\n",
|
||
|
|
"#########################\n",
|
||
|
|
"# 3: Set the batch size.\n",
|
||
|
|
"#########################\n",
|
||
|
|
"batch_size = config['train']['batch_size'] # Change the batch size if you like, or if you run into GPU memory issues.\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"evaluator = Evaluator(model=model,\n",
|
||
|
|
" n_classes=n_classes,\n",
|
||
|
|
" data_generator=val_dataset,\n",
|
||
|
|
" model_mode='training')\n",
|
||
|
|
"\n",
|
||
|
|
"results = evaluator(img_height=img_height,\n",
|
||
|
|
" img_width=img_width,\n",
|
||
|
|
" batch_size=4,\n",
|
||
|
|
" data_generator_mode='resize',\n",
|
||
|
|
" round_confidences=False,\n",
|
||
|
|
" matching_iou_threshold=0.5,\n",
|
||
|
|
" border_pixels='include',\n",
|
||
|
|
" sorting_algorithm='quicksort',\n",
|
||
|
|
" average_precision_mode='sample',\n",
|
||
|
|
" num_recall_points=11,\n",
|
||
|
|
" ignore_neutral_boxes=True,\n",
|
||
|
|
" return_precisions=True,\n",
|
||
|
|
" return_recalls=True,\n",
|
||
|
|
" return_average_precisions=True,\n",
|
||
|
|
" verbose=True)\n",
|
||
|
|
"\n",
|
||
|
|
"mean_average_precision, average_precisions, precisions, recalls = results\n",
|
||
|
|
"total_instances = []\n",
|
||
|
|
"precisions = []\n",
|
||
|
|
"\n",
|
||
|
|
"for i in range(1, len(average_precisions)):\n",
|
||
|
|
" \n",
|
||
|
|
" print('{:.0f} instances of class'.format(len(recalls[i])),\n",
|
||
|
|
" classes[i], 'with average precision: {:.4f}'.format(average_precisions[i]))\n",
|
||
|
|
" total_instances.append(len(recalls[i]))\n",
|
||
|
|
" precisions.append(average_precisions[i])\n",
|
||
|
|
"\n",
|
||
|
|
"if sum(total_instances) == 0:\n",
|
||
|
|
" \n",
|
||
|
|
" print('No test instances found.')\n",
|
||
|
|
"\n",
|
||
|
|
"else:\n",
|
||
|
|
"\n",
|
||
|
|
" print('mAP using the weighted average of precisions among classes: {:.4f}'.format(sum([a * b for a, b in zip(total_instances, precisions)]) / sum(total_instances)))\n",
|
||
|
|
" print('mAP: {:.4f}'.format(sum(precisions) / sum(x > 0 for x in total_instances)))\n",
|
||
|
|
"\n",
|
||
|
|
" for i in range(1, len(average_precisions)):\n",
|
||
|
|
" print(\"{:<14}{:<6}{}\".format(classes[i], 'AP', round(average_precisions[i], 3)))\n",
|
||
|
|
" print()\n",
|
||
|
|
" print(\"{:<14}{:<6}{}\".format('','mAP', round(mean_average_precision, 3)))"
|
||
|
|
]
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"cell_type": "markdown",
|
||
|
|
"metadata": {},
|
||
|
|
"source": [
|
||
|
|
"Cargar nuevamente el modelo desde los pesos.\n",
|
||
|
|
"Predicción"
|
||
|
|
]
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"cell_type": "code",
|
||
|
|
"execution_count": 8,
|
||
|
|
"metadata": {},
|
||
|
|
"outputs": [
|
||
|
|
{
|
||
|
|
"name": "stdout",
|
||
|
|
"output_type": "stream",
|
||
|
|
"text": [
|
||
|
|
"\n",
|
||
|
|
"Training on: \t{'panel': 1}\n",
|
||
|
|
"\n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"Layer (type) Output Shape Param # Connected to \n",
|
||
|
|
"==================================================================================================\n",
|
||
|
|
"input_1 (InputLayer) (None, 400, 400, 3) 0 \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"identity_layer (Lambda) (None, 400, 400, 3) 0 input_1[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"conv1 (Conv2D) (None, 400, 400, 32) 2432 identity_layer[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"bn1 (BatchNormalization) (None, 400, 400, 32) 128 conv1[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"elu1 (ELU) (None, 400, 400, 32) 0 bn1[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"pool1 (MaxPooling2D) (None, 200, 200, 32) 0 elu1[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"conv2 (Conv2D) (None, 200, 200, 48) 13872 pool1[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"bn2 (BatchNormalization) (None, 200, 200, 48) 192 conv2[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"elu2 (ELU) (None, 200, 200, 48) 0 bn2[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"pool2 (MaxPooling2D) (None, 100, 100, 48) 0 elu2[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"conv3 (Conv2D) (None, 100, 100, 64) 27712 pool2[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"bn3 (BatchNormalization) (None, 100, 100, 64) 256 conv3[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"elu3 (ELU) (None, 100, 100, 64) 0 bn3[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"pool3 (MaxPooling2D) (None, 50, 50, 64) 0 elu3[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"conv4 (Conv2D) (None, 50, 50, 64) 36928 pool3[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"bn4 (BatchNormalization) (None, 50, 50, 64) 256 conv4[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"elu4 (ELU) (None, 50, 50, 64) 0 bn4[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"pool4 (MaxPooling2D) (None, 25, 25, 64) 0 elu4[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"conv5 (Conv2D) (None, 25, 25, 48) 27696 pool4[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"bn5 (BatchNormalization) (None, 25, 25, 48) 192 conv5[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"elu5 (ELU) (None, 25, 25, 48) 0 bn5[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"pool5 (MaxPooling2D) (None, 12, 12, 48) 0 elu5[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"conv6 (Conv2D) (None, 12, 12, 48) 20784 pool5[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"bn6 (BatchNormalization) (None, 12, 12, 48) 192 conv6[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"elu6 (ELU) (None, 12, 12, 48) 0 bn6[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"pool6 (MaxPooling2D) (None, 6, 6, 48) 0 elu6[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"conv7 (Conv2D) (None, 6, 6, 32) 13856 pool6[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"bn7 (BatchNormalization) (None, 6, 6, 32) 128 conv7[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"elu7 (ELU) (None, 6, 6, 32) 0 bn7[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"classes4 (Conv2D) (None, 50, 50, 8) 4616 elu4[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"classes5 (Conv2D) (None, 25, 25, 8) 3464 elu5[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"classes6 (Conv2D) (None, 12, 12, 8) 3464 elu6[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"classes7 (Conv2D) (None, 6, 6, 8) 2312 elu7[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"boxes4 (Conv2D) (None, 50, 50, 16) 9232 elu4[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"boxes5 (Conv2D) (None, 25, 25, 16) 6928 elu5[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"boxes6 (Conv2D) (None, 12, 12, 16) 6928 elu6[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"boxes7 (Conv2D) (None, 6, 6, 16) 4624 elu7[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"classes4_reshape (Reshape) (None, 10000, 2) 0 classes4[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"classes5_reshape (Reshape) (None, 2500, 2) 0 classes5[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"classes6_reshape (Reshape) (None, 576, 2) 0 classes6[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"classes7_reshape (Reshape) (None, 144, 2) 0 classes7[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"anchors4 (AnchorBoxes) (None, 50, 50, 4, 8) 0 boxes4[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"anchors5 (AnchorBoxes) (None, 25, 25, 4, 8) 0 boxes5[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"anchors6 (AnchorBoxes) (None, 12, 12, 4, 8) 0 boxes6[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"anchors7 (AnchorBoxes) (None, 6, 6, 4, 8) 0 boxes7[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"classes_concat (Concatenate) (None, 13220, 2) 0 classes4_reshape[0][0] \n",
|
||
|
|
" classes5_reshape[0][0] \n",
|
||
|
|
" classes6_reshape[0][0] \n",
|
||
|
|
" classes7_reshape[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"boxes4_reshape (Reshape) (None, 10000, 4) 0 boxes4[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"boxes5_reshape (Reshape) (None, 2500, 4) 0 boxes5[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"boxes6_reshape (Reshape) (None, 576, 4) 0 boxes6[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"boxes7_reshape (Reshape) (None, 144, 4) 0 boxes7[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"anchors4_reshape (Reshape) (None, 10000, 8) 0 anchors4[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"anchors5_reshape (Reshape) (None, 2500, 8) 0 anchors5[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"anchors6_reshape (Reshape) (None, 576, 8) 0 anchors6[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"anchors7_reshape (Reshape) (None, 144, 8) 0 anchors7[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"classes_softmax (Activation) (None, 13220, 2) 0 classes_concat[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"boxes_concat (Concatenate) (None, 13220, 4) 0 boxes4_reshape[0][0] \n",
|
||
|
|
" boxes5_reshape[0][0] \n",
|
||
|
|
" boxes6_reshape[0][0] \n",
|
||
|
|
" boxes7_reshape[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"anchors_concat (Concatenate) (None, 13220, 8) 0 anchors4_reshape[0][0] \n",
|
||
|
|
" anchors5_reshape[0][0] \n",
|
||
|
|
" anchors6_reshape[0][0] \n",
|
||
|
|
" anchors7_reshape[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"predictions (Concatenate) (None, 13220, 14) 0 classes_softmax[0][0] \n",
|
||
|
|
" boxes_concat[0][0] \n",
|
||
|
|
" anchors_concat[0][0] \n",
|
||
|
|
"==================================================================================================\n",
|
||
|
|
"Total params: 186,192\n",
|
||
|
|
"Trainable params: 185,520\n",
|
||
|
|
"Non-trainable params: 672\n",
|
||
|
|
"__________________________________________________________________________________________________\n"
|
||
|
|
]
|
||
|
|
}
|
||
|
|
],
|
||
|
|
"source": [
|
||
|
|
"#############################\n",
|
||
|
|
"####Prediction\n",
|
||
|
|
"#############################\n",
|
||
|
|
"\n",
|
||
|
|
"from imageio import imread\n",
|
||
|
|
"from keras.preprocessing import image\n",
|
||
|
|
"import time\n",
|
||
|
|
"\n",
|
||
|
|
"def makedirs(path):\n",
|
||
|
|
" try:\n",
|
||
|
|
" os.makedirs(path)\n",
|
||
|
|
" except OSError:\n",
|
||
|
|
" if not os.path.isdir(path):\n",
|
||
|
|
" raise\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"config_path = 'config_7_panel.json'\n",
|
||
|
|
"input_path = ['panel_jpg/Mision_1/', 'panel_jpg/Mision_2/']\n",
|
||
|
|
"output_path = 'result_ssd7_panel/'\n",
|
||
|
|
"\n",
|
||
|
|
"with open(config_path) as config_buffer:\n",
|
||
|
|
" config = json.loads(config_buffer.read())\n",
|
||
|
|
"\n",
|
||
|
|
"makedirs(output_path)\n",
|
||
|
|
"###############################\n",
|
||
|
|
"# Parse the annotations\n",
|
||
|
|
"###############################\n",
|
||
|
|
"score_threshold = 0.5\n",
|
||
|
|
"score_threshold_iou = 0.5\n",
|
||
|
|
"labels = config['model']['labels']\n",
|
||
|
|
"categories = {}\n",
|
||
|
|
"#categories = {\"Razor\": 1, \"Gun\": 2, \"Knife\": 3, \"Shuriken\": 4} #la categoría 0 es la background\n",
|
||
|
|
"for i in range(len(labels)): categories[labels[i]] = i+1\n",
|
||
|
|
"print('\\nTraining on: \\t' + str(categories) + '\\n')\n",
|
||
|
|
"\n",
|
||
|
|
"img_height = config['model']['input'] # Height of the model input images\n",
|
||
|
|
"img_width = config['model']['input'] # Width of the model input images\n",
|
||
|
|
"img_channels = 3 # Number of color channels of the model input images\n",
|
||
|
|
"n_classes = len(labels) # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO\n",
|
||
|
|
"classes = ['background'] + labels\n",
|
||
|
|
"\n",
|
||
|
|
"model_mode = 'training'\n",
|
||
|
|
"# TODO: Set the path to the `.h5` file of the model to be loaded.\n",
|
||
|
|
"model_path = config['train']['saved_weights_name']\n",
|
||
|
|
"\n",
|
||
|
|
"# We need to create an SSDLoss object in order to pass that to the model loader.\n",
|
||
|
|
"ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n",
|
||
|
|
"\n",
|
||
|
|
"K.clear_session() # Clear previous models from memory.\n",
|
||
|
|
"\n",
|
||
|
|
"model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n",
|
||
|
|
" 'L2Normalization': L2Normalization,\n",
|
||
|
|
" 'DecodeDetections': DecodeDetections,\n",
|
||
|
|
" 'compute_loss': ssd_loss.compute_loss})\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"model.summary()\n"
|
||
|
|
]
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"cell_type": "code",
|
||
|
|
"execution_count": 9,
|
||
|
|
"metadata": {},
|
||
|
|
"outputs": [
|
||
|
|
{
|
||
|
|
"name": "stdout",
|
||
|
|
"output_type": "stream",
|
||
|
|
"text": [
|
||
|
|
"Tiempo Total: 1.040\n",
|
||
|
|
"Tiempo promedio por imagen: 0.104\n",
|
||
|
|
"OK\n"
|
||
|
|
]
|
||
|
|
}
|
||
|
|
],
|
||
|
|
"source": [
|
||
|
|
"image_paths = []\n",
|
||
|
|
"for inp in input_path:\n",
|
||
|
|
" if os.path.isdir(inp):\n",
|
||
|
|
" for inp_file in os.listdir(inp):\n",
|
||
|
|
" image_paths += [inp + inp_file]\n",
|
||
|
|
" else:\n",
|
||
|
|
" image_paths += [inp]\n",
|
||
|
|
"\n",
|
||
|
|
"image_paths = [inp_file for inp_file in image_paths if (inp_file[-4:] in ['.jpg', '.png', 'JPEG'])]\n",
|
||
|
|
"times = []\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"for img_path in image_paths:\n",
|
||
|
|
" orig_images = [] # Store the images here.\n",
|
||
|
|
" input_images = [] # Store resized versions of the images here.\n",
|
||
|
|
" #print(img_path)\n",
|
||
|
|
"\n",
|
||
|
|
" # preprocess image for network\n",
|
||
|
|
" orig_images.append(imread(img_path))\n",
|
||
|
|
" img = image.load_img(img_path, target_size=(img_height, img_width))\n",
|
||
|
|
" img = image.img_to_array(img)\n",
|
||
|
|
" input_images.append(img)\n",
|
||
|
|
" input_images = np.array(input_images)\n",
|
||
|
|
" # process image\n",
|
||
|
|
" start = time.time()\n",
|
||
|
|
" y_pred = model.predict(input_images)\n",
|
||
|
|
" y_pred_decoded = decode_detections(y_pred,\n",
|
||
|
|
" confidence_thresh=score_threshold,\n",
|
||
|
|
" iou_threshold=score_threshold_iou,\n",
|
||
|
|
" top_k=200,\n",
|
||
|
|
" normalize_coords=True,\n",
|
||
|
|
" img_height=img_height,\n",
|
||
|
|
" img_width=img_width)\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
" #print(\"processing time: \", time.time() - start)\n",
|
||
|
|
" times.append(time.time() - start)\n",
|
||
|
|
" # correct for image scale\n",
|
||
|
|
"\n",
|
||
|
|
" # visualize detections\n",
|
||
|
|
" # Set the colors for the bounding boxes\n",
|
||
|
|
" colors = plt.cm.brg(np.linspace(0, 1, 21)).tolist()\n",
|
||
|
|
"\n",
|
||
|
|
" plt.figure(figsize=(20,12))\n",
|
||
|
|
" plt.imshow(orig_images[0],cmap = 'gray')\n",
|
||
|
|
"\n",
|
||
|
|
" current_axis = plt.gca()\n",
|
||
|
|
" #print(y_pred)\n",
|
||
|
|
" for box in y_pred_decoded[0]:\n",
|
||
|
|
" # Transform the predicted bounding boxes for the 300x300 image to the original image dimensions.\n",
|
||
|
|
"\n",
|
||
|
|
" xmin = box[2] * orig_images[0].shape[1] / img_width\n",
|
||
|
|
" ymin = box[3] * orig_images[0].shape[0] / img_height\n",
|
||
|
|
" xmax = box[4] * orig_images[0].shape[1] / img_width\n",
|
||
|
|
" ymax = box[5] * orig_images[0].shape[0] / img_height\n",
|
||
|
|
"\n",
|
||
|
|
" color = colors[int(box[0])]\n",
|
||
|
|
" label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])\n",
|
||
|
|
" current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color=color, fill=False, linewidth=2))\n",
|
||
|
|
" current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':color, 'alpha':1.0})\n",
|
||
|
|
"\n",
|
||
|
|
" #plt.figure(figsize=(15, 15))\n",
|
||
|
|
" #plt.axis('off')\n",
|
||
|
|
" save_path = output_path + img_path.split('/')[-1]\n",
|
||
|
|
" plt.savefig(save_path)\n",
|
||
|
|
" plt.close()\n",
|
||
|
|
" \n",
|
||
|
|
"file = open(output_path + 'time.txt','w')\n",
|
||
|
|
"\n",
|
||
|
|
"file.write('Tiempo promedio:' + str(np.mean(times)))\n",
|
||
|
|
"\n",
|
||
|
|
"file.close()\n",
|
||
|
|
"print('Tiempo Total: {:.3f}'.format(np.sum(times)))\n",
|
||
|
|
"print('Tiempo promedio por imagen: {:.3f}'.format(np.mean(times)))\n",
|
||
|
|
"print('OK')"
|
||
|
|
]
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"cell_type": "code",
|
||
|
|
"execution_count": 6,
|
||
|
|
"metadata": {},
|
||
|
|
"outputs": [
|
||
|
|
{
|
||
|
|
"name": "stdout",
|
||
|
|
"output_type": "stream",
|
||
|
|
"text": [
|
||
|
|
"panel : 69\n",
|
||
|
|
"cell : 423\n"
|
||
|
|
]
|
||
|
|
}
|
||
|
|
],
|
||
|
|
"source": [
|
||
|
|
"\n",
|
||
|
|
"# Summary instance training\n",
|
||
|
|
"category_train_list = []\n",
|
||
|
|
"for image_label in train_dataset.labels:\n",
|
||
|
|
" category_train_list += [i[0] for i in train_dataset.labels[0]]\n",
|
||
|
|
"summary_category_training = {train_dataset.classes[i]: category_train_list.count(i) for i in list(set(category_train_list))}\n",
|
||
|
|
"for i in summary_category_training.keys():\n",
|
||
|
|
" print(i, ': {:.0f}'.format(summary_category_training[i]))\n"
|
||
|
|
]
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"cell_type": "code",
|
||
|
|
"execution_count": 28,
|
||
|
|
"metadata": {},
|
||
|
|
"outputs": [
|
||
|
|
{
|
||
|
|
"name": "stdout",
|
||
|
|
"output_type": "stream",
|
||
|
|
"text": [
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"Layer (type) Output Shape Param # Connected to \n",
|
||
|
|
"==================================================================================================\n",
|
||
|
|
"input_1 (InputLayer) (None, 400, 400, 3) 0 \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"identity_layer (Lambda) (None, 400, 400, 3) 0 input_1[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"conv1 (Conv2D) (None, 400, 400, 32) 2432 identity_layer[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"bn1 (BatchNormalization) (None, 400, 400, 32) 128 conv1[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"elu1 (ELU) (None, 400, 400, 32) 0 bn1[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"pool1 (MaxPooling2D) (None, 200, 200, 32) 0 elu1[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"conv2 (Conv2D) (None, 200, 200, 48) 13872 pool1[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"bn2 (BatchNormalization) (None, 200, 200, 48) 192 conv2[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"elu2 (ELU) (None, 200, 200, 48) 0 bn2[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"pool2 (MaxPooling2D) (None, 100, 100, 48) 0 elu2[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"conv3 (Conv2D) (None, 100, 100, 64) 27712 pool2[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"bn3 (BatchNormalization) (None, 100, 100, 64) 256 conv3[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"elu3 (ELU) (None, 100, 100, 64) 0 bn3[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"pool3 (MaxPooling2D) (None, 50, 50, 64) 0 elu3[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"conv4 (Conv2D) (None, 50, 50, 64) 36928 pool3[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"bn4 (BatchNormalization) (None, 50, 50, 64) 256 conv4[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"elu4 (ELU) (None, 50, 50, 64) 0 bn4[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"pool4 (MaxPooling2D) (None, 25, 25, 64) 0 elu4[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"conv5 (Conv2D) (None, 25, 25, 48) 27696 pool4[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"bn5 (BatchNormalization) (None, 25, 25, 48) 192 conv5[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"elu5 (ELU) (None, 25, 25, 48) 0 bn5[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"pool5 (MaxPooling2D) (None, 12, 12, 48) 0 elu5[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"conv6 (Conv2D) (None, 12, 12, 48) 20784 pool5[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"bn6 (BatchNormalization) (None, 12, 12, 48) 192 conv6[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"elu6 (ELU) (None, 12, 12, 48) 0 bn6[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"pool6 (MaxPooling2D) (None, 6, 6, 48) 0 elu6[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"conv7 (Conv2D) (None, 6, 6, 32) 13856 pool6[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"bn7 (BatchNormalization) (None, 6, 6, 32) 128 conv7[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"elu7 (ELU) (None, 6, 6, 32) 0 bn7[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"classes4 (Conv2D) (None, 50, 50, 12) 6924 elu4[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"classes5 (Conv2D) (None, 25, 25, 12) 5196 elu5[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"classes6 (Conv2D) (None, 12, 12, 12) 5196 elu6[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"classes7 (Conv2D) (None, 6, 6, 12) 3468 elu7[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"boxes4 (Conv2D) (None, 50, 50, 16) 9232 elu4[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"boxes5 (Conv2D) (None, 25, 25, 16) 6928 elu5[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"boxes6 (Conv2D) (None, 12, 12, 16) 6928 elu6[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"boxes7 (Conv2D) (None, 6, 6, 16) 4624 elu7[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"classes4_reshape (Reshape) (None, 10000, 3) 0 classes4[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"classes5_reshape (Reshape) (None, 2500, 3) 0 classes5[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"classes6_reshape (Reshape) (None, 576, 3) 0 classes6[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"classes7_reshape (Reshape) (None, 144, 3) 0 classes7[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"anchors4 (AnchorBoxes) (None, 50, 50, 4, 8) 0 boxes4[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"anchors5 (AnchorBoxes) (None, 25, 25, 4, 8) 0 boxes5[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"anchors6 (AnchorBoxes) (None, 12, 12, 4, 8) 0 boxes6[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"anchors7 (AnchorBoxes) (None, 6, 6, 4, 8) 0 boxes7[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"classes_concat (Concatenate) (None, 13220, 3) 0 classes4_reshape[0][0] \n",
|
||
|
|
" classes5_reshape[0][0] \n",
|
||
|
|
" classes6_reshape[0][0] \n",
|
||
|
|
" classes7_reshape[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"boxes4_reshape (Reshape) (None, 10000, 4) 0 boxes4[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"boxes5_reshape (Reshape) (None, 2500, 4) 0 boxes5[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"boxes6_reshape (Reshape) (None, 576, 4) 0 boxes6[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"boxes7_reshape (Reshape) (None, 144, 4) 0 boxes7[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"anchors4_reshape (Reshape) (None, 10000, 8) 0 anchors4[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"anchors5_reshape (Reshape) (None, 2500, 8) 0 anchors5[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"anchors6_reshape (Reshape) (None, 576, 8) 0 anchors6[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"anchors7_reshape (Reshape) (None, 144, 8) 0 anchors7[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"classes_softmax (Activation) (None, 13220, 3) 0 classes_concat[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"boxes_concat (Concatenate) (None, 13220, 4) 0 boxes4_reshape[0][0] \n",
|
||
|
|
" boxes5_reshape[0][0] \n",
|
||
|
|
" boxes6_reshape[0][0] \n",
|
||
|
|
" boxes7_reshape[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"anchors_concat (Concatenate) (None, 13220, 8) 0 anchors4_reshape[0][0] \n",
|
||
|
|
" anchors5_reshape[0][0] \n",
|
||
|
|
" anchors6_reshape[0][0] \n",
|
||
|
|
" anchors7_reshape[0][0] \n",
|
||
|
|
"__________________________________________________________________________________________________\n",
|
||
|
|
"predictions (Concatenate) (None, 13220, 15) 0 classes_softmax[0][0] \n",
|
||
|
|
" boxes_concat[0][0] \n",
|
||
|
|
" anchors_concat[0][0] \n",
|
||
|
|
"==================================================================================================\n",
|
||
|
|
"Total params: 193,120\n",
|
||
|
|
"Trainable params: 192,448\n",
|
||
|
|
"Non-trainable params: 672\n",
|
||
|
|
"__________________________________________________________________________________________________\n"
|
||
|
|
]
|
||
|
|
}
|
||
|
|
],
|
||
|
|
"source": [
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"\n",
|
||
|
|
"model.summary()"
|
||
|
|
]
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"cell_type": "code",
|
||
|
|
"execution_count": null,
|
||
|
|
"metadata": {},
|
||
|
|
"outputs": [],
|
||
|
|
"source": []
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"cell_type": "code",
|
||
|
|
"execution_count": null,
|
||
|
|
"metadata": {},
|
||
|
|
"outputs": [],
|
||
|
|
"source": [
|
||
|
|
"\n"
|
||
|
|
]
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"cell_type": "code",
|
||
|
|
"execution_count": null,
|
||
|
|
"metadata": {},
|
||
|
|
"outputs": [],
|
||
|
|
"source": []
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"cell_type": "code",
|
||
|
|
"execution_count": null,
|
||
|
|
"metadata": {},
|
||
|
|
"outputs": [],
|
||
|
|
"source": []
|
||
|
|
}
|
||
|
|
],
|
||
|
|
"metadata": {
|
||
|
|
"kernelspec": {
|
||
|
|
"display_name": "Python 3",
|
||
|
|
"language": "python",
|
||
|
|
"name": "python3"
|
||
|
|
},
|
||
|
|
"language_info": {
|
||
|
|
"codemirror_mode": {
|
||
|
|
"name": "ipython",
|
||
|
|
"version": 3
|
||
|
|
},
|
||
|
|
"file_extension": ".py",
|
||
|
|
"mimetype": "text/x-python",
|
||
|
|
"name": "python",
|
||
|
|
"nbconvert_exporter": "python",
|
||
|
|
"pygments_lexer": "ipython3",
|
||
|
|
"version": "3.6.8"
|
||
|
|
}
|
||
|
|
},
|
||
|
|
"nbformat": 4,
|
||
|
|
"nbformat_minor": 2
|
||
|
|
}
|