This commit is contained in:
Daniel Saavedra
2020-10-08 22:09:30 -03:00
parent 5d11697e85
commit 0248207c91
5 changed files with 79009 additions and 0 deletions

6
GPS_Panel/Classifier/.gitignore vendored Normal file
View File

@@ -0,0 +1,6 @@
dataset_panel/
dataset_prueba_1/
model/
preview/

View File

@@ -0,0 +1,263 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# import the necessary packages\n",
"from imutils import paths\n",
"import numpy as np\n",
"import argparse\n",
"import imutils\n",
"import pickle\n",
"import cv2\n",
"import os\n",
"from sklearn.preprocessing import LabelEncoder\n",
"from sklearn.svm import SVC\n",
"from imutils.video import VideoStream\n",
"from imutils.video import FPS\n",
"import time\n",
"import tensorflow as tf\n",
"from tensorflow.keras.layers import *\n",
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
"from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"# Set the directory you want to start from\n",
"rootDir = '.'\n",
"for dirName, subdirList, fileList in os.walk(rootDir):\n",
" print('Found directory: %s' % dirName)\n",
" for fname in fileList:\n",
" print('\\t%s' % fname)\n",
" \n",
"\n",
"# load our serialized face detector from disk\n",
"# fix seed for reproducible results (only works on CPU, not GPU)\n",
"seed = 9\n",
"np.random.seed(seed=seed)\n",
"tf.random.set_seed(seed=seed)\n",
"\n",
"# hyper parameters for model\n",
"nb_classes = 10 # number of classes\n",
"based_model_last_block_layer_number = 126 # value is based on based model selected.\n",
"img_width, img_height = 80, 80 # change based on the shape/structure of your images\n",
"batch_size = 8 # try 4, 8, 16, 32, 64, 128, 256 dependent on CPU/GPU memory capacity (powers of 2 values).\n",
"nb_epoch = 500 # number of iteration the algorithm gets trained.\n",
"learn_rate = 1e-4 # sgd learning rate\n",
"momentum = .9 # sgd momentum to avoid local minimum\n",
"transformation_ratio = .1 # how aggressive will be the data augmentation/transformation\n",
"patience = 10\n",
"\n",
"train_data_dir = './dataset_panel/' # Inside, each class should have it's own folder\n",
"#validation_data_dir = './dataset/' # each class should have it's own folder\n",
"model_path = './model/'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"\n",
"base_model = tf.keras.applications.Xception(input_shape=(img_width, img_height, 3), weights='imagenet', include_top=False)\n",
"# Top Model Block\n",
"x = base_model.output\n",
"x = GlobalAveragePooling2D()(x)\n",
"predictions = Dense(nb_classes, activation='softmax')(x)\n",
"\n",
"# add your top layer block to your base model\n",
"model = tf.keras.Model(base_model.input, predictions)\n",
"\n",
"\n",
"# # let's visualize layer names and layer indices to see how many layers/blocks to re-train\n",
"# # uncomment when choosing based_model_last_block_layer\n",
"# for i, layer in enumerate(model.layers):\n",
"# print(i, layer.name)\n",
"\n",
"# first: train only the top layers (which were randomly initialized)\n",
"# i.e. freeze all layers of the based model that is already pre-trained.\n",
"for layer in base_model.layers:\n",
" layer.trainable = False\n",
"\n",
"print(model.summary())\n",
"# Read Data and Augment it: Make sure to select augmentations that are appropriate to your images.\n",
"# To save augmentations un-comment save lines and add to your flow parameters.\n",
"train_datagen = ImageDataGenerator(rescale=1. / 255,\n",
" rotation_range=transformation_ratio,\n",
" shear_range=transformation_ratio,\n",
" zoom_range=transformation_ratio,\n",
" cval=transformation_ratio,\n",
" horizontal_flip=True,\n",
" vertical_flip=True,\n",
" validation_split=0.1)\n",
"\n",
"\n",
"\n",
"train_generator = train_datagen.flow_from_directory(train_data_dir,\n",
" target_size=(img_height, img_width),\n",
" batch_size=batch_size,\n",
" class_mode='binary',\n",
" subset='training') # set as training data\n",
"\n",
"validation_generator = train_datagen.flow_from_directory(train_data_dir, # same directory as training data\n",
" target_size=(img_height, img_width),\n",
" batch_size=batch_size,\n",
" class_mode='binary',\n",
" subset='validation') # set as validation data\n",
"\n",
"model.compile(optimizer='nadam',\n",
" loss='categorical_crossentropy', # categorical_crossentropy if multi-class classifier\n",
" metrics=['accuracy'])\n",
"\n",
"# save weights of best training epoch: monitor either val_loss or val_acc\n",
"\n",
"top_weights_path = os.path.join(os.path.abspath(model_path), 'top_model_weights.h5')\n",
"callbacks_list = [\n",
" ModelCheckpoint(top_weights_path, monitor='val_accuracy', verbose=1, save_best_only=True),\n",
" EarlyStopping(monitor='val_accuracy', patience= patience, verbose=0)\n",
"]\n",
"\n",
"# Train Simple CNN\n",
"hist = model.fit(train_generator,\n",
" steps_per_epoch= int(train_generator.samples/batch_size*.8),\n",
" epochs=int (nb_epoch / 5),\n",
" validation_data=validation_generator,\n",
" validation_steps = int(validation_generator.samples /batch_size*.8),\n",
" validation_freq=1,\n",
" callbacks=callbacks_list)\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# verbose\n",
"print(\"\\nStarting to Fine Tune Model\\n\")\n",
"\n",
"# add the best weights from the train top model\n",
"# at this point we have the pre-train weights of the base model and the trained weight of the new/added top model\n",
"# we re-load model weights to ensure the best epoch is selected and not the last one.\n",
"model.load_weights(top_weights_path)\n",
"\n",
"print(model.summary())\n",
"# based_model_last_block_layer_number points to the layer in your model you want to train.\n",
"# For example if you want to train the last block of a 19 layer VGG16 model this should be 15\n",
"# If you want to train the last Two blocks of an Inception model it should be 172\n",
"# layers before this number will used the pre-trained weights, layers above and including this number\n",
"# will be re-trained based on the new data.\n",
"for layer in model.layers[:based_model_last_block_layer_number]:\n",
" layer.trainable = False\n",
"for layer in model.layers[based_model_last_block_layer_number:]:\n",
" layer.trainable = True\n",
"\n",
"# compile the model with a SGD/momentum optimizer\n",
"# and a very slow learning rate.\n",
"model.compile(optimizer='nadam',\n",
" loss='categorical_crossentropy',\n",
" metrics=['accuracy'])\n",
"\n",
"# save weights of best training epoch: monitor either val_loss or val_acc\n",
"final_weights_path = os.path.join(os.path.abspath(model_path), 'model_weights.h5')\n",
"callbacks_list = [\n",
" ModelCheckpoint(final_weights_path, monitor='val_accuracy', verbose=1, save_best_only=True),\n",
" EarlyStopping(monitor='val_accuracy', patience=patience, verbose=0)\n",
"]\n",
"\n",
"# fine-tune the model\n",
"hist = model.fit(train_generator,\n",
" steps_per_epoch=train_generator.samples/batch_size * .8,\n",
" epochs=nb_epoch,\n",
" validation_data=validation_generator,\n",
" validation_steps = validation_generator.samples/batch_size*.8,\n",
" callbacks=callbacks_list)\n",
"\n",
"# save model\n",
"model_json = model.to_json()\n",
"with open(os.path.join(os.path.abspath(model_path), 'model.json'), 'w') as json_file:\n",
" json_file.write(model_json)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.metrics import classification_report, confusion_matrix\n",
"\n",
"json_file = open(os.path.join(os.path.abspath(model_path), 'model.json'), 'r')\n",
"loaded_model_json = json_file.read()\n",
"json_file.close()\n",
"loaded_model = tf.keras.models.model_from_json(loaded_model_json)\n",
"\n",
"\n",
"#Confution Matrix and Classification Report\n",
"Y_pred = model.predict_generator(validation_generator, validation_generator.samples // batch_size+1)\n",
"y_pred = np.argmax(Y_pred, axis=1)\n",
"print('Confusion Matrix')\n",
"print(confusion_matrix(validation_generator.classes, y_pred))\n",
"print('Classification Report')\n",
"target_names = train_generator.class_indices.keys()\n",
"print(classification_report(validation_generator.classes, y_pred, target_names=target_names))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.7"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long