Supervisor and Jurse ready for production #1
@ -7,13 +7,16 @@ detail: |
|
|||||||
générique. Il faut ajouter le chargement dynamique du protocole puis
|
générique. Il faut ajouter le chargement dynamique du protocole puis
|
||||||
réusiner le fonctionnement du supervisor pour respecter l'esprit universel
|
réusiner le fonctionnement du supervisor pour respecter l'esprit universel
|
||||||
de minigrida.
|
de minigrida.
|
||||||
protocol: Jurse
|
protocol:
|
||||||
|
name: Jurse
|
||||||
|
package: protocols.jurse
|
||||||
expe:
|
expe:
|
||||||
ground_truth:
|
ground_truth:
|
||||||
raster: ./Data/ground_truth/2018_IEEE_GRSS_DFC_GT_TR.tif
|
raster: ./Data/ground_truth/2018_IEEE_GRSS_DFC_GT_TR.tif
|
||||||
meta_labels: GT/jurse_idx.csv
|
meta_labels: ./Data/ground_truth/jurse_idx.csv
|
||||||
descriptors_script:
|
descriptors_script:
|
||||||
name: Descriptors.dfc_aps
|
name: dfc_aps
|
||||||
|
package: descriptors
|
||||||
parameters:
|
parameters:
|
||||||
areas:
|
areas:
|
||||||
- 100
|
- 100
|
||||||
@ -26,23 +29,25 @@ expe:
|
|||||||
- ./Data/phase1_rasters/DEM_C123_3msr/UH17_GEG051_TR.tif
|
- ./Data/phase1_rasters/DEM_C123_3msr/UH17_GEG051_TR.tif
|
||||||
treshold: 1e4
|
treshold: 1e4
|
||||||
cross_validation:
|
cross_validation:
|
||||||
name: CrossValidationGenerator.APsCVG
|
name: APsCVG
|
||||||
|
package: CVGenerators
|
||||||
parameters:
|
parameters:
|
||||||
n_test: 2
|
n_test: 2
|
||||||
classifier:
|
classifier:
|
||||||
name: sklearn.ensemble.RandomForestClassifier
|
name: RandomForestClassifier
|
||||||
|
package: sklearn.ensemble
|
||||||
parameters:
|
parameters:
|
||||||
min_samples_leaf: 10
|
min_samples_leaf: 10
|
||||||
n_estimators: 50
|
n_estimators: 50
|
||||||
n_jobs: -1
|
n_jobs: -1
|
||||||
random_state: 0
|
random_state: 0
|
||||||
expe_hashes:
|
hashes:
|
||||||
ground_truth: 2c5ecaddcb8c4a1c8863bc65e7440de4a1b4962c
|
ground_truth: 2c5ecaddcb8c4a1c8863bc65e7440de4a1b4962c
|
||||||
descriptors_script: cfdcc84d9d9c47177930257f286d850db446812b
|
descriptors_script: cfdcc84d9d9c47177930257f286d850db446812b
|
||||||
cross_validation: 4a61b34fda812fe717890b25d75430023335a7a6
|
cross_validation: 4a61b34fda812fe717890b25d75430023335a7a6
|
||||||
classifier: 40e6741ef8cc4b4fbe188b8ca0563eb5195b88ad
|
classifier: 40e6741ef8cc4b4fbe188b8ca0563eb5195b88ad
|
||||||
global: b8219fab322bf11ec1aac14a1f51466dd94ddbdd
|
global: b8219fab322bf11ec1aac14a1f51466dd94ddbdd
|
||||||
expe_report:
|
report:
|
||||||
supervisor: thecomedian
|
supervisor: thecomedian
|
||||||
start_date: Le 27/07/2018 à 16:28:52
|
start_date: Le 27/07/2018 à 16:28:52
|
||||||
end_date: Le 27/07/2018 à 16:29:54
|
end_date: Le 27/07/2018 à 16:29:54
|
||||||
@ -52,10 +57,10 @@ expe_report:
|
|||||||
description: 0.6744262149950373
|
description: 0.6744262149950373
|
||||||
classification: 168.82905034400028
|
classification: 168.82905034400028
|
||||||
metrics: 1.1557443889978458
|
metrics: 1.1557443889978458
|
||||||
expe_results:
|
results:
|
||||||
classification: test_b8219f.tif
|
classification: ./Enrichment/Results/test_b8219f.tif
|
||||||
dimensions: 42
|
metrics:
|
||||||
scores:
|
dimensions: 42
|
||||||
overall_accuracy: 0.5550408093111998
|
overall_accuracy: 0.5550408093111998
|
||||||
cohen_kappa: 0.41714275852261407
|
cohen_kappa: 0.41714275852261407
|
||||||
|
|
||||||
154
supervisor.py
154
supervisor.py
@ -53,7 +53,7 @@ def update_queue():
|
|||||||
tmp_queue = list()
|
tmp_queue = list()
|
||||||
for child in TEST_DIR.iterdir():
|
for child in TEST_DIR.iterdir():
|
||||||
if child.is_file() and child.suffix == '.yml':
|
if child.is_file() and child.suffix == '.yml':
|
||||||
tmp_queue.append({'expe_file': child,
|
tmp_queue.append({'expe_file': ExpePath(child),
|
||||||
'priority': get_priority(child)})
|
'priority': get_priority(child)})
|
||||||
|
|
||||||
queue = sorted(tmp_queue, key=itemgetter('priority'))
|
queue = sorted(tmp_queue, key=itemgetter('priority'))
|
||||||
@ -67,30 +67,56 @@ def get_priority(yml_file):
|
|||||||
|
|
||||||
|
|
||||||
def run(expe_file):
|
def run(expe_file):
|
||||||
|
start_time = time.time()
|
||||||
log.info('Run test {}'.format(expe_file))
|
log.info('Run test {}'.format(expe_file))
|
||||||
with open(expe_file) as f:
|
test = expe_file.read()
|
||||||
test = OrderedDict(yaml.safe_load(f))
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### Stage test
|
### Stage test
|
||||||
|
expe_file.stage(test)
|
||||||
|
|
||||||
### Load protocol
|
### Load protocol
|
||||||
protocol = getattr(importlib.import_module('protocols.jurse'), test['protocol'])
|
try:
|
||||||
experience = protocol(test['expe'])
|
protocol = getattr(importlib.import_module(test['protocol']['package']),
|
||||||
|
test['protocol']['name'])
|
||||||
|
experience = protocol(test['expe'])
|
||||||
|
except Exception as e:
|
||||||
|
err = 'Could not load protocol from test {}'.format(expe_file)
|
||||||
|
log.warning(err)
|
||||||
|
expe_file.error(test, 'loading protocol', e)
|
||||||
|
raise TestError(err)
|
||||||
log.info('{} test protocol loaded'.format(experience))
|
log.info('{} test protocol loaded'.format(experience))
|
||||||
|
|
||||||
### Write hahes
|
### Write hahes
|
||||||
hashes = experience.get_hashes()
|
test['hashes'] = experience.get_hashes()
|
||||||
log.info(hashes)
|
test['report'] = create_report(start_time)
|
||||||
|
expe_file.stage(test)
|
||||||
|
|
||||||
### Run test
|
### Run test
|
||||||
|
try:
|
||||||
|
experience.run()
|
||||||
|
except Exception as e:
|
||||||
|
err = 'Experience error'
|
||||||
|
log.warning(err)
|
||||||
|
expe_file.error(test, 'testing', e)
|
||||||
|
raise TestError(err)
|
||||||
|
|
||||||
### Write report
|
end_time = time.time()
|
||||||
|
|
||||||
|
### Write complete report
|
||||||
|
report = create_report(start_time, end_time)
|
||||||
|
ressources = OrderedDict()
|
||||||
|
ressouces['ram'] = None
|
||||||
|
ressouces['proccess_time'] = experience.get_process_time()
|
||||||
|
report['ressources'] = ressouces
|
||||||
|
test['report'] = report
|
||||||
|
|
||||||
### Write results
|
### Write results
|
||||||
|
test['results'] = experience.get_results()
|
||||||
|
expe_file.result(test)
|
||||||
|
log.info('Additional results in {}'.format(expe_file.get_result_path()))
|
||||||
|
|
||||||
### End of test
|
### End of test
|
||||||
|
log.info('Test complete')
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
@ -149,20 +175,71 @@ def run(expe_file):
|
|||||||
(STAGING_DIR / oname_yml).unlink()
|
(STAGING_DIR / oname_yml).unlink()
|
||||||
write_expe_file(RESULT_DIR / oname_yml, expe, expe_hashes, expe_report, oname_tif, metrics)
|
write_expe_file(RESULT_DIR / oname_yml, expe, expe_hashes, expe_report, oname_tif, metrics)
|
||||||
|
|
||||||
log.info('Test complete')
|
|
||||||
|
class ExpePath:
|
||||||
|
"""Utility wrapper for expe files.
|
||||||
|
|
||||||
|
Extend pathlib Path with staging, result and errors function to move the
|
||||||
|
test report through the Enrichment center.
|
||||||
|
|
||||||
|
"""
|
||||||
|
def __init__(self, path, hash_length=6):
|
||||||
|
self._actual = Path(path)
|
||||||
|
self._base_name = self._actual.stem
|
||||||
|
self._hash_length = hash_length
|
||||||
|
self._hash = None
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self._get_complete_name()
|
||||||
|
|
||||||
|
def read(self):
|
||||||
|
with open(self._actual) as f:
|
||||||
|
return OrderedDict(yaml.safe_load(f))
|
||||||
|
|
||||||
|
def _get_hash_name(self):
|
||||||
|
return '{}{}'.format(self._base_name,
|
||||||
|
'_' + self._hash[:self._hash_length] if self._hash is not None else '')
|
||||||
|
|
||||||
|
def _get_complete_name(self):
|
||||||
|
return self._get_hash_name() + '.yml'
|
||||||
|
|
||||||
|
def stage(self, expe):
|
||||||
|
log.info('Staging {}'.format(self._base_name))
|
||||||
|
self._check_hash(expe)
|
||||||
|
self._write(STAGING_DIR, expe)
|
||||||
|
|
||||||
|
def result(self, expe):
|
||||||
|
log.info('Write results for test {}'.format(self._base_name))
|
||||||
|
self._check_hash(expe)
|
||||||
|
self._write(RESULT_DIR, expe)
|
||||||
|
|
||||||
|
def error(self, expe, when='', e=Exception):
|
||||||
|
error = OrderedDict()
|
||||||
|
error['when'] = when
|
||||||
|
error['what'] = str(e)
|
||||||
|
error['where'] = traceback.format_exc()
|
||||||
|
expe['error'] = error
|
||||||
|
self._write(FAILED_DIR, expe)
|
||||||
|
|
||||||
|
def get_result_path(self):
|
||||||
|
return Path(RESULT_DIR) / self._get_hash_name()
|
||||||
|
|
||||||
|
def _check_hash(self, expe):
|
||||||
|
if self._hash is None:
|
||||||
|
if 'hashes' in expe:
|
||||||
|
self._hash = expe['hashes']['global']
|
||||||
|
|
||||||
|
def _write(self, path, expe):
|
||||||
|
new_path = Path(path) / self._get_complete_name()
|
||||||
|
with open(new_path, 'w') as of:
|
||||||
|
yaml.dump(expe, of,
|
||||||
|
default_flow_style=False,
|
||||||
|
encoding=None,
|
||||||
|
allow_unicode=True)
|
||||||
|
self._actual.unlink()
|
||||||
|
self._actual = new_path
|
||||||
|
|
||||||
|
|
||||||
def write_error(file, expe, hashes=None, report=None, when='', e=Exception):
|
|
||||||
error = OrderedDict()
|
|
||||||
error['when'] = when
|
|
||||||
error['what'] = str(e)
|
|
||||||
error['where'] = traceback.format_exc()
|
|
||||||
with open(file, 'w') as of:
|
|
||||||
yaml.dump(OrderedDict({'expe': expe,
|
|
||||||
'expe_hashes': hashes,
|
|
||||||
'expe_report': report,
|
|
||||||
'expe_error': error}),
|
|
||||||
of, default_flow_style=False, encoding=None, allow_unicode=True)
|
|
||||||
|
|
||||||
def write_expe_file(file, expe, hashes=None, report=None, classification=None, results=None):
|
def write_expe_file(file, expe, hashes=None, report=None, classification=None, results=None):
|
||||||
with open(file, 'w') as of:
|
with open(file, 'w') as of:
|
||||||
@ -186,24 +263,41 @@ def compute_hashes(expe):
|
|||||||
expe_hashes['global'] = glob.hexdigest()
|
expe_hashes['global'] = glob.hexdigest()
|
||||||
return expe_hashes
|
return expe_hashes
|
||||||
|
|
||||||
def create_report(kronos):
|
def create_report(stime=None, etime=None):
|
||||||
expe_report = OrderedDict()
|
expe_report = OrderedDict()
|
||||||
|
|
||||||
expe_report['supervisor'] = os.uname()[1]
|
expe_report['supervisor'] = os.uname()[1]
|
||||||
|
for datek, timev in zip(('start_date', 'end_date'), (stime, etime)):
|
||||||
for timev, datek in zip((kronos.get_start_date(), kronos.get_end_date()), ('start_date', 'end_date')):
|
|
||||||
expe_report[datek] = datetime.datetime.fromtimestamp(timev).strftime('Le %d/%m/%Y à %H:%M:%S') if timev is not None else None
|
expe_report[datek] = datetime.datetime.fromtimestamp(timev).strftime('Le %d/%m/%Y à %H:%M:%S') if timev is not None else None
|
||||||
|
|
||||||
ressources = kronos.get_times()
|
|
||||||
ressources['ram'] = None
|
|
||||||
|
|
||||||
expe_report['ressources'] = ressources
|
|
||||||
return expe_report
|
return expe_report
|
||||||
|
|
||||||
def watch_folder():
|
def watch_folder():
|
||||||
log.info('Waiting for test')
|
log.info('Waiting for test')
|
||||||
while not list(TEST_DIR.glob('*.yml')):
|
while not list(TEST_DIR.glob('*.yml')):
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
|
|
||||||
|
class Kronos(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._pt = time.process_time()
|
||||||
|
self._stime = time.time()
|
||||||
|
self._etime = None
|
||||||
|
|
||||||
|
def time(self, name):
|
||||||
|
self._times[name + '_process_time'] = time.process_time() - self._pt
|
||||||
|
self._pt = time.process_time()
|
||||||
|
self._etime = time.time()
|
||||||
|
|
||||||
|
def get_times(self):
|
||||||
|
return self._times
|
||||||
|
|
||||||
|
def get_start_date(self):
|
||||||
|
return self._stime
|
||||||
|
|
||||||
|
def get_end_date(self):
|
||||||
|
return self._etime
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
while(True):
|
while(True):
|
||||||
|
|||||||
43
test_mockup.yml
Normal file
43
test_mockup.yml
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
name: Première expérience
|
||||||
|
date: 9 juillet 2018
|
||||||
|
priority: 1
|
||||||
|
detail: |
|
||||||
|
Maquette pour la création du supervisor de minigrida. Par rapport à la
|
||||||
|
version legacy du projet LD2DAPs, le choix du protocole de test est
|
||||||
|
générique. Il faut ajouter le chargement dynamique du protocole puis
|
||||||
|
réusiner le fonctionnement du supervisor pour respecter l'esprit universel
|
||||||
|
de minigrida.
|
||||||
|
protocol:
|
||||||
|
name: Jurse
|
||||||
|
package: protocols.jurse
|
||||||
|
expe:
|
||||||
|
ground_truth:
|
||||||
|
raster: ./Data/ground_truth/2018_IEEE_GRSS_DFC_GT_TR.tif
|
||||||
|
meta_labels: ./Data/ground_truth/jurse_idx.csv
|
||||||
|
descriptors_script:
|
||||||
|
name: dfc_aps
|
||||||
|
package: descriptors
|
||||||
|
parameters:
|
||||||
|
areas:
|
||||||
|
- 100
|
||||||
|
- 1000
|
||||||
|
moi:
|
||||||
|
- 0.5
|
||||||
|
- 0.9
|
||||||
|
rasters:
|
||||||
|
- ./Data/phase1_rasters/DEM+B_C123/UH17_GEM051_TR.tif
|
||||||
|
- ./Data/phase1_rasters/DEM_C123_3msr/UH17_GEG051_TR.tif
|
||||||
|
treshold: 1e4
|
||||||
|
cross_validation:
|
||||||
|
name: APsCVG
|
||||||
|
package: CVGenerators
|
||||||
|
parameters:
|
||||||
|
n_test: 2
|
||||||
|
classifier:
|
||||||
|
name: RandomForestClassifier
|
||||||
|
package: sklearn.ensemble
|
||||||
|
parameters:
|
||||||
|
min_samples_leaf: 10
|
||||||
|
n_estimators: 50
|
||||||
|
n_jobs: -1
|
||||||
|
random_state: 0
|
||||||
Loading…
Reference in New Issue
Block a user