Compare commits
1 Commits
master
...
dependabot
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
26acdd7c67 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -8,7 +8,6 @@ Train&Test_4/
|
||||
Train&Test_D/
|
||||
Train&Test_C/
|
||||
Train&Test_A/
|
||||
Train&Test_A2/
|
||||
Train&Test_S/
|
||||
result_ssd7_panel_cell/
|
||||
Thermal/
|
||||
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -239,7 +239,7 @@ def rgb2hsv(rgb):
|
||||
|
||||
def doubleMADsfromMedian(y,thresh=3.5):
|
||||
# warning: this function does not check for NAs
|
||||
# nor does it address issues when
|
||||
# nor does it address issues when
|
||||
# more than 50% of your data have identical values
|
||||
m = np.median(y)
|
||||
abs_dev = np.abs(y - m)
|
||||
@@ -255,7 +255,7 @@ def doubleMADsfromMedian(y,thresh=3.5):
|
||||
def watershed_marked(thresh, min_Area = 100, threshold_median_Area = 3):
|
||||
## Thresh is the segmentation image use to watershed
|
||||
##
|
||||
|
||||
|
||||
# Perform the distance transform algorithm
|
||||
dist = cv2.distanceTransform(thresh, cv2.DIST_L2, 3)
|
||||
# Normalize the distance image for range = {0.0, 1.0}
|
||||
@@ -300,14 +300,14 @@ def watershed_marked(thresh, min_Area = 100, threshold_median_Area = 3):
|
||||
for i,Logic in zip(Areas[:,0], L_Areas) :
|
||||
if Logic:
|
||||
markers[markers == i] = 0
|
||||
|
||||
|
||||
return Areas[L_Areas,:], dist_8u,markers
|
||||
|
||||
def pixel2gps(points, geot):
|
||||
# transform pixel to gps coordinate
|
||||
return np.vstack(gr.map_pixel_inv(points[:,1], points[:,0],geot[1],geot[-1], geot[0],geot[3])).T
|
||||
|
||||
|
||||
|
||||
|
||||
def gps2pixel(points_coord, geot):
|
||||
# transform gps coordinate to pixel
|
||||
|
||||
Binary file not shown.
@@ -9,8 +9,4 @@ matplotlib==3.2.2
|
||||
scipy==1.4.1
|
||||
ipython==7.19.0
|
||||
scikit_learn==0.23.2
|
||||
geopandas
|
||||
rtree==0.9.3
|
||||
simplekml
|
||||
urbanaccess pandana
|
||||
cenpy
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
original: https://github.com/RentadroneCL/Photovoltaic_Fault_Detector
|
||||
|
||||
# Photovoltaic Fault Detector
|
||||
|
||||
@@ -7,11 +6,11 @@ original: https://github.com/RentadroneCL/Photovoltaic_Fault_Detector
|
||||
[](https://www.codetriage.com/rentadronecl/photovoltaic_fault_detector)
|
||||
[](https://coveralls.io/github/RentadroneCL/Photovoltaic_Fault_Detector)
|
||||
|
||||
[SimpleMap.io](https://simplemap.io/)
|
||||
[Rentadrone.cl](https://rentadronecl.github.io)
|
||||
|
||||
## Forum
|
||||
|
||||
This project is part of the [UNICEF Innovation Fund Discourse community](https://unicef-if.discourse.group/c/projects/rentadrone/10). You can post comments or questions about each category of [SimpleMap.io Open-Source Initiative](https://rentadronecl.github.io) algorithms. We encourage users to participate in the forum and to engage with fellow users.
|
||||
This project is part of the [UNICEF Innovation Fund Discourse community](https://unicef-if.discourse.group/c/projects/rentadrone/10). You can post comments or questions about each category of [Rentadrone Developers](https://rentadrone.cl/developers/) algorithms. We encourage users to participate in the forum and to engage with fellow users.
|
||||
|
||||
## Summary
|
||||
|
||||
@@ -210,7 +209,7 @@ The configuration file for SSD300 is a json file, which looks like this (exampl
|
||||
`python train_ssd.py -c config.json -o /path/to/result`
|
||||
|
||||
or
|
||||
`python train_yolo.py -c config.json -o /path/to/result`
|
||||
`python train_ssd.py -c config.json -o /path/to/result`
|
||||
|
||||
By the end of this process, the code will write the weights of the best model to file best_weights.h5 (or whatever name specified in the setting "saved_weights_name" in the config.json file). The training process stops when the loss on the validation set is not improved in 20 consecutive epoches.
|
||||
|
||||
@@ -265,5 +264,6 @@ Before sending your pull requests, make sure you followed this list.
|
||||
- Read [Code of Conduct](CODE_OF_CONDUCT.md).
|
||||
- Check if my changes are consistent with the [guidelines](https://github.com/RentadroneCL/model-definition/blob/master/CONTRIBUTING.md#general-guidelines-and-philosophy-for-contribution).
|
||||
- Changes are consistent with the [Coding Style](https://github.com/RentadroneCL/model-definition/blob/master/CONTRIBUTING.md#c-coding-style).
|
||||
- Run [Unit Tests](https://github.com/RentadroneCL/model-definition/CONTRIBUTING.md#running-unit-tests).
|
||||
|
||||
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -1 +0,0 @@
|
||||
Tiempo promedio:nan
|
||||
Binary file not shown.
Binary file not shown.
@@ -1,29 +0,0 @@
|
||||
2020-12-14 22:27:04.267611: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'libnvinfer.so.6'; dlerror: libnvinfer.so.6: cannot open shared object file: No such file or directory
|
||||
2020-12-14 22:27:04.267749: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'libnvinfer_plugin.so.6'; dlerror: libnvinfer_plugin.so.6: cannot open shared object file: No such file or directory
|
||||
2020-12-14 22:27:04.267767: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:30] Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.
|
||||
2020-12-14 22:27:05.268036: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'libcuda.so.1'; dlerror: libcuda.so.1: cannot open shared object file: No such file or directory
|
||||
2020-12-14 22:27:05.268076: E tensorflow/stream_executor/cuda/cuda_driver.cc:351] failed call to cuInit: UNKNOWN ERROR (303)
|
||||
2020-12-14 22:27:05.268102: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host (dlsaavedra-X406UAR): /proc/driver/nvidia/version does not exist
|
||||
2020-12-14 22:27:05.268334: I tensorflow/core/platform/cpu_feature_guard.cc:142] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
|
||||
2020-12-14 22:27:05.311492: I tensorflow/core/platform/profile_utils/cpu_utils.cc:94] CPU Frequency: 1800000000 Hz
|
||||
2020-12-14 22:27:05.313101: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x55f4be3033d0 initialized for platform Host (this does not guarantee that XLA will be used). Devices:
|
||||
2020-12-14 22:27:05.313143: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version
|
||||
WARNING:tensorflow:AutoGraph could not transform <bound method YoloLayer.call of <yolo.YoloLayer object at 0x7fb92423a8d0>> and will run it as-is.
|
||||
Please report this to the TensorFlow team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output.
|
||||
Cause: unexpected indent (<unknown>, line 144)
|
||||
WARNING:tensorflow:AutoGraph could not transform <bound method YoloLayer.call of <yolo.YoloLayer object at 0x7fb904726710>> and will run it as-is.
|
||||
Please report this to the TensorFlow team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output.
|
||||
Cause: unexpected indent (<unknown>, line 144)
|
||||
WARNING:tensorflow:AutoGraph could not transform <bound method YoloLayer.call of <yolo.YoloLayer object at 0x7fb9044f2c10>> and will run it as-is.
|
||||
Please report this to the TensorFlow team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output.
|
||||
Cause: unexpected indent (<unknown>, line 144)
|
||||
WARNING:tensorflow:ModelCheckpoint mode 1 is unknown, fallback to auto mode.
|
||||
WARNING:tensorflow:Model failed to serialize as JSON. Ignoring... Layers with arguments in `__init__` must override `get_config`.
|
||||
2020-12-14 22:27:16.470550: I tensorflow/core/profiler/lib/profiler_session.cc:225] Profiler session started.
|
||||
2020-12-14 22:27:16.470928: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'libcupti.so.10.1'; dlerror: libcupti.so.10.1: cannot open shared object file: No such file or directory
|
||||
2020-12-14 22:27:16.470972: E tensorflow/core/profiler/internal/gpu/cupti_tracer.cc:1307] function cupti_interface_->Subscribe( &subscriber_, (CUpti_CallbackFunc)ApiCallback, this)failed with error CUPTI could not be loaded or symbol could not be found.
|
||||
2020-12-14 22:27:16.470988: E tensorflow/core/profiler/internal/gpu/cupti_tracer.cc:1346] function cupti_interface_->ActivityRegisterCallbacks( AllocCuptiActivityBuffer, FreeCuptiActivityBuffer)failed with error CUPTI could not be loaded or symbol could not be found.
|
||||
2020-12-14 22:27:20.300351: E tensorflow/core/profiler/internal/gpu/cupti_tracer.cc:1329] function cupti_interface_->EnableCallback( 0 , subscriber_, CUPTI_CB_DOMAIN_DRIVER_API, cbid)failed with error CUPTI could not be loaded or symbol could not be found.
|
||||
2020-12-14 22:27:20.300441: I tensorflow/core/profiler/internal/gpu/device_tracer.cc:88] GpuTracer has collected 0 callback api events and 0 activity events.
|
||||
2020-12-14 22:28:18.604124: W tensorflow/core/kernels/data/generator_dataset_op.cc:103] Error occurred when finalizing GeneratorDataset iterator: Cancelled: Operation was cancelled
|
||||
2020-12-14 22:29:29.713989: W tensorflow/core/kernels/data/generator_dataset_op.cc:103] Error occurred when finalizing GeneratorDataset iterator: Cancelled: Operation was cancelled
|
||||
@@ -1,22 +0,0 @@
|
||||
Seen labels: {'panel': 854}
|
||||
|
||||
Given labels: ['panel']
|
||||
|
||||
Training on: ['panel']
|
||||
|
||||
multi_gpu:1
|
||||
|
||||
Loading pretrained weights.
|
||||
|
||||
Train for 12 steps, validate for 1 steps
|
||||
Epoch 1/2
|
||||
|
||||
Epoch 00001: loss improved from inf to 40.76296, saving model to Result_yolo3_panel2/yolo3_full_panel.h5
|
||||
12/12 - 68s - loss: 40.7630 - yolo_layer_loss: 12.8884 - yolo_layer_1_loss: 27.8505 - yolo_layer_2_loss: 0.0241 - val_loss: 3793.7336 - val_yolo_layer_loss: 1322.7670 - val_yolo_layer_1_loss: 2470.9607 - val_yolo_layer_2_loss: 0.0060
|
||||
Epoch 2/2
|
||||
|
||||
Epoch 00002: loss did not improve from 40.76296
|
||||
12/12 - 71s - loss: 46.9910 - yolo_layer_loss: 20.9780 - yolo_layer_1_loss: 26.0097 - yolo_layer_2_loss: 0.0033 - val_loss: 72523.8125 - val_yolo_layer_loss: 4534.5332 - val_yolo_layer_1_loss: 67989.2812 - val_yolo_layer_2_loss: 0.0000e+00
|
||||
94 instances of class panel with average precision: 0.0000
|
||||
mAP using the weighted average of precisions among classes: 0.0000
|
||||
mAP: 0.0000
|
||||
@@ -1,11 +0,0 @@
|
||||
2020-12-14 22:29:36.919265: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'libnvinfer.so.6'; dlerror: libnvinfer.so.6: cannot open shared object file: No such file or directory
|
||||
2020-12-14 22:29:36.919408: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'libnvinfer_plugin.so.6'; dlerror: libnvinfer_plugin.so.6: cannot open shared object file: No such file or directory
|
||||
2020-12-14 22:29:36.919425: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:30] Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.
|
||||
2020-12-14 22:29:38.653634: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'libcuda.so.1'; dlerror: libcuda.so.1: cannot open shared object file: No such file or directory
|
||||
2020-12-14 22:29:38.653676: E tensorflow/stream_executor/cuda/cuda_driver.cc:351] failed call to cuInit: UNKNOWN ERROR (303)
|
||||
2020-12-14 22:29:38.653711: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host (dlsaavedra-X406UAR): /proc/driver/nvidia/version does not exist
|
||||
2020-12-14 22:29:38.653939: I tensorflow/core/platform/cpu_feature_guard.cc:142] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
|
||||
2020-12-14 22:29:38.678825: I tensorflow/core/platform/profile_utils/cpu_utils.cc:94] CPU Frequency: 1800000000 Hz
|
||||
2020-12-14 22:29:38.679672: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x5569ee6e0e00 initialized for platform Host (this does not guarantee that XLA will be used). Devices:
|
||||
2020-12-14 22:29:38.679731: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version
|
||||
WARNING:tensorflow:No training configuration found in save file: the model was *not* compiled. Compile it manually.
|
||||
@@ -1,4 +0,0 @@
|
||||
dict_items([(0, (0.0, 94.0))])
|
||||
94 instances of class panel with average precision: 0.0000
|
||||
mAP using the weighted average of precisions among classes: 0.0000
|
||||
mAP: 0.0000
|
||||
@@ -1,49 +0,0 @@
|
||||
{
|
||||
"model" : {
|
||||
"min_input_size": 400,
|
||||
"max_input_size": 400,
|
||||
"anchors": [0,0, 0,0, 0,0, 4,175, 4,92, 5,92, 6,190, 7,87, 30,5],
|
||||
"labels": ["panel"],
|
||||
"backend": "keras-yolo3-master/full_yolo_backend.h5"
|
||||
},
|
||||
|
||||
"train": {
|
||||
"train_image_folder": "Train&Test_A2/Train/images/",
|
||||
"train_annot_folder": "Train&Test_A2/Train/anns/",
|
||||
"cache_name": "Result_yolo3_panel2/train_panel.pkl",
|
||||
|
||||
"train_times": 3,
|
||||
|
||||
"batch_size": 2,
|
||||
"learning_rate": 1e-3,
|
||||
"nb_epochs": 500,
|
||||
"warmup_epochs": 15,
|
||||
"ignore_thresh": 0.5,
|
||||
"gpus": "0",
|
||||
|
||||
"grid_scales": [1,1,1],
|
||||
"obj_scale": 5,
|
||||
"noobj_scale": 1,
|
||||
"xywh_scale": 1,
|
||||
"class_scale": 1,
|
||||
|
||||
"tensorboard_dir": "Result_yolo3_panel2/log_experimento_panel_gpu",
|
||||
"saved_weights_name": "Result_yolo3_panel2/yolo3_full_panel.h5",
|
||||
"debug": true
|
||||
},
|
||||
|
||||
"valid": {
|
||||
"valid_image_folder": "Train&Test_A2/Train/images/",
|
||||
"valid_annot_folder": "Train&Test_A2/Train/anns/",
|
||||
"cache_name": "Result_yolo3_panel2/val_panel_2.pkl",
|
||||
|
||||
"valid_times": 1
|
||||
},
|
||||
"test": {
|
||||
"test_image_folder": "Train&Test_A2/Train/images/",
|
||||
"test_annot_folder": "Train&Test_A2/Train/anns/",
|
||||
"cache_name": "Result_yolo3_panel2/test_panel_2.pkl",
|
||||
|
||||
"test_times": 1
|
||||
}
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
{
|
||||
"model" : {
|
||||
"min_input_size": 400,
|
||||
"max_input_size": 400,
|
||||
"anchors": [0,0, 0,0, 0,0, 4,175, 4,92, 5,92, 6,190, 7,87, 30,5],
|
||||
"labels": ["panel"],
|
||||
"backend": "keras-yolo3-master/full_yolo_backend.h5"
|
||||
},
|
||||
|
||||
"train": {
|
||||
"train_image_folder": "Train&Test_A2/Train/images/",
|
||||
"train_annot_folder": "Train&Test_A2/Train/anns/",
|
||||
"cache_name": "Result_yolo3_panel2/train_panel.pkl",
|
||||
|
||||
"train_times": 3,
|
||||
|
||||
"batch_size": 2,
|
||||
"learning_rate": 1e-3,
|
||||
"nb_epochs": 2,
|
||||
"warmup_epochs": 15,
|
||||
"ignore_thresh": 0.5,
|
||||
"gpus": "0",
|
||||
|
||||
"grid_scales": [1,1,1],
|
||||
"obj_scale": 5,
|
||||
"noobj_scale": 1,
|
||||
"xywh_scale": 1,
|
||||
"class_scale": 1,
|
||||
|
||||
"tensorboard_dir": "Result_yolo3_panel2/log_experimento_panel_gpu",
|
||||
"saved_weights_name": "Result_yolo3_panel2/yolo3_full_panel.h5",
|
||||
"debug": true
|
||||
},
|
||||
|
||||
"valid": {
|
||||
"valid_image_folder": "Train&Test_A2/Test/images/",
|
||||
"valid_annot_folder": "Train&Test_A2/Test/anns/",
|
||||
"cache_name": "Result_yolo3_panel2/val_panel.pkl",
|
||||
|
||||
"valid_times": 1
|
||||
},
|
||||
"test": {
|
||||
"test_image_folder": "Train&Test_A2/Test/images/",
|
||||
"test_annot_folder": "Train&Test_A2/Test/anns/",
|
||||
"cache_name": "Result_yolo3_panel2/test_panel.pkl",
|
||||
|
||||
"test_times": 1
|
||||
}
|
||||
}
|
||||
@@ -272,7 +272,11 @@ def _main_(args):
|
||||
callbacks = callbacks
|
||||
)
|
||||
|
||||
train_model.load_weights(config['train']['saved_weights_name'])
|
||||
|
||||
# make a GPU version of infer_model for evaluation
|
||||
#if multi_gpu > 1:
|
||||
# infer_model = load_model(config['train']['saved_weights_name'])
|
||||
infer_model.load_weights(config['train']['saved_weights_name'])
|
||||
infer_model.save(config['train']['saved_weights_name'])
|
||||
###############################
|
||||
# Run the evaluation
|
||||
|
||||
@@ -4,14 +4,14 @@ gast==0.2.2
|
||||
grpcio
|
||||
h5py
|
||||
Markdown
|
||||
numpy==1.22
|
||||
numpy
|
||||
opencv-contrib-python
|
||||
opt-einsum
|
||||
protobuf
|
||||
PyYAML
|
||||
scipy
|
||||
six
|
||||
tensorflow==2.7.2
|
||||
tensorflow==2.4.0
|
||||
termcolor
|
||||
tqdm
|
||||
Werkzeug
|
||||
|
||||
Reference in New Issue
Block a user