Skip to content
Snippets Groups Projects
Commit c8fed172 authored by Stefano Borini's avatar Stefano Borini Committed by GitHub
Browse files

Merge pull request #77 from force-h2020/unify-data-source-and-kpi-in-evaluation

Removed difference in KPI vs DataSource - Step 1
parents 18d58bbf a35e1c0d
No related branches found
No related tags found
No related merge requests found
...@@ -36,31 +36,57 @@ class CoreEvaluationDriver(BaseCoreDriver): ...@@ -36,31 +36,57 @@ class CoreEvaluationDriver(BaseCoreDriver):
mco_data_values = self._get_data_values_from_mco(mco_model, mco_data_values = self._get_data_values_from_mco(mco_model,
mco_communicator) mco_communicator)
ds_results = self._compute_ds_results( ds_results = self._compute_layer_results(
mco_data_values, mco_data_values,
workflow) workflow.data_sources,
"create_data_source"
)
kpi_results = self._compute_kpi_results( kpi_results = self._compute_layer_results(
ds_results + mco_data_values, ds_results + mco_data_values,
workflow) workflow.kpi_calculators,
"create_kpi_calculator"
)
mco_communicator.send_to_mco(mco_model, kpi_results) mco_communicator.send_to_mco(mco_model, kpi_results)
def _compute_ds_results(self, environment_data_values, workflow): def _compute_layer_results(self,
environment_data_values,
evaluator_models,
creator_method_name
):
"""Helper routine. """Helper routine.
Performs the evaluation of the DataSources, passing the current Performs the evaluation of a single layer.
environment data values (the MCO data) At the moment we have a single layer of DataSources followed
by a single layer of KPI calculators.
Parameters
----------
environment_data_values: list
A list of data values to submit to the evaluators.
evaluator_models: list
A list of the models for all the evaluators (data source
or kpi calculator)
creator_method_name: str
A string of the creator method for the evaluator on the
factory (e.g. create_kpi_calculator)
NOTE: The above parameter is going to go away as soon as we move
to unlimited layers and remove the distinction between data sources
and KPI calculators.
""" """
ds_results = [] results = []
for ds_model in workflow.data_sources: for model in evaluator_models:
ds_factory = ds_model.factory factory = model.factory
data_source = ds_factory.create_data_source() evaluator = getattr(factory, creator_method_name)()
# Get the slots for this data source. These must be matched to # Get the slots for this data source. These must be matched to
# the appropriate values in the environment data values. # the appropriate values in the environment data values.
# Matching is by position. # Matching is by position.
in_slots, out_slots = data_source.slots(ds_model) in_slots, out_slots = evaluator.slots(model)
# Binding performs the extraction of the specified data values # Binding performs the extraction of the specified data values
# satisfying the above input slots from the environment data values # satisfying the above input slots from the environment data values
...@@ -71,36 +97,36 @@ class CoreEvaluationDriver(BaseCoreDriver): ...@@ -71,36 +97,36 @@ class CoreEvaluationDriver(BaseCoreDriver):
# needed by the input slots. # needed by the input slots.
passed_data_values = self._bind_data_values( passed_data_values = self._bind_data_values(
environment_data_values, environment_data_values,
ds_model.input_slot_maps, model.input_slot_maps,
in_slots) in_slots)
# execute data source, passing only relevant data values. # execute data source, passing only relevant data values.
logging.info("Evaluating for Data Source {}".format( logging.info("Evaluating for Data Source {}".format(
ds_factory.name)) factory.name))
res = data_source.run(ds_model, passed_data_values) res = evaluator.run(model, passed_data_values)
if len(res) != len(out_slots): if len(res) != len(out_slots):
error_txt = ( error_txt = (
"The number of data values ({} values) returned" "The number of data values ({} values) returned"
" by the DataSource '{}' does not match the number" " by '{}' does not match the number"
" of output slots it specifies ({} values)." " of output slots it specifies ({} values)."
" This is likely a DataSource plugin error.").format( " This is likely a plugin error.").format(
len(res), ds_factory.name, len(out_slots) len(res), factory.name, len(out_slots)
) )
logging.error(error_txt) logging.error(error_txt)
raise RuntimeError(error_txt) raise RuntimeError(error_txt)
if len(res) != len(ds_model.output_slot_names): if len(res) != len(model.output_slot_names):
error_txt = ( error_txt = (
"The number of data values ({} values) returned" "The number of data values ({} values) returned"
" by the DataSource '{}' does not match the number" " by '{}' does not match the number"
" of user-defined names specified ({} values)." " of user-defined names specified ({} values)."
" This is either a DataSource plugin error or a file" " This is either a plugin error or a file"
" error.").format( " error.").format(
len(res), len(res),
ds_factory.name, factory.name,
len(ds_model.output_slot_names) len(model.output_slot_names)
) )
logging.error(error_txt) logging.error(error_txt)
...@@ -108,67 +134,14 @@ class CoreEvaluationDriver(BaseCoreDriver): ...@@ -108,67 +134,14 @@ class CoreEvaluationDriver(BaseCoreDriver):
# At this point, the returned data values are unnamed. # At this point, the returned data values are unnamed.
# Add the names as specified by the user. # Add the names as specified by the user.
for dv, output_slot_name in zip(res, ds_model.output_slot_names): for dv, output_slot_name in zip(res, model.output_slot_names):
dv.name = output_slot_name dv.name = output_slot_name
ds_results.extend(res) results.extend(res)
# Finally, return all the computed data values from all data sources, # Finally, return all the computed data values from all evaluators,
# properly named. # properly named.
return ds_results return results
def _compute_kpi_results(self, environment_data_values, workflow):
"""Perform evaluation of all KPI calculators.
environment_data_values contains all data values provided from
the MCO and data sources.
"""
kpi_results = []
for kpic_model in workflow.kpi_calculators:
kpic_factory = kpic_model.factory
kpi_calculator = kpic_factory.create_kpi_calculator()
in_slots, out_slots = kpi_calculator.slots(kpic_model)
passed_data_values = self._bind_data_values(
environment_data_values,
kpic_model.input_slot_maps,
in_slots)
logging.info("Evaluating for KPICalculator {}".format(
kpic_factory.name))
res = kpi_calculator.run(kpic_model, passed_data_values)
if len(res) != len(out_slots):
error_txt = (
"The number of data values ({} values) returned by"
" the KPICalculator '{}' does not match the"
" number of output slots ({} values). This is"
" likely a KPICalculator plugin error."
).format(len(res), kpic_factory.name, len(out_slots))
logging.error(error_txt)
raise RuntimeError(error_txt)
if len(res) != len(kpic_model.output_slot_names):
error_txt = (
"The number of data values ({} values) returned by"
" the KPICalculator '{}' does not match the"
" number of user-defined names specified ({} values)."
" This is either an input file error or a plugin"
" error."
).format(len(res), kpic_factory.name,
len(kpic_model.output_slot_names))
logging.error(error_txt)
raise RuntimeError(error_txt)
for kpi, output_slot_name in zip(
res, kpic_model.output_slot_names):
kpi.name = output_slot_name
kpi_results.extend(res)
return kpi_results
def _get_data_values_from_mco(self, model, communicator): def _get_data_values_from_mco(self, model, communicator):
"""Helper method. """Helper method.
......
...@@ -228,7 +228,7 @@ class TestCoreEvaluationDriver(unittest.TestCase): ...@@ -228,7 +228,7 @@ class TestCoreEvaluationDriver(unittest.TestCase):
with self.assertRaisesRegexp( with self.assertRaisesRegexp(
RuntimeError, RuntimeError,
"The number of data values \(1 values\)" "The number of data values \(1 values\)"
" returned by the DataSource 'null_ds' does not match" " returned by 'null_ds' does not match"
" the number of output slots"): " the number of output slots"):
driver.application_started() driver.application_started()
...@@ -243,7 +243,7 @@ class TestCoreEvaluationDriver(unittest.TestCase): ...@@ -243,7 +243,7 @@ class TestCoreEvaluationDriver(unittest.TestCase):
with self.assertRaisesRegexp( with self.assertRaisesRegexp(
RuntimeError, RuntimeError,
"The number of data values \(1 values\)" "The number of data values \(1 values\)"
" returned by the DataSource 'null_ds' does not match" " returned by 'null_ds' does not match"
" the number of user-defined names"): " the number of user-defined names"):
driver.application_started() driver.application_started()
...@@ -258,7 +258,7 @@ class TestCoreEvaluationDriver(unittest.TestCase): ...@@ -258,7 +258,7 @@ class TestCoreEvaluationDriver(unittest.TestCase):
with self.assertRaisesRegexp( with self.assertRaisesRegexp(
RuntimeError, RuntimeError,
"The number of data values \(1 values\)" "The number of data values \(1 values\)"
" returned by the KPICalculator 'null_kpic' does not match" " returned by 'null_kpic' does not match"
" the number of output slots"): " the number of output slots"):
driver.application_started() driver.application_started()
...@@ -273,6 +273,6 @@ class TestCoreEvaluationDriver(unittest.TestCase): ...@@ -273,6 +273,6 @@ class TestCoreEvaluationDriver(unittest.TestCase):
with self.assertRaisesRegexp( with self.assertRaisesRegexp(
RuntimeError, RuntimeError,
"The number of data values \(1 values\)" "The number of data values \(1 values\)"
" returned by the KPICalculator 'null_kpic' does not match" " returned by 'null_kpic' does not match"
" the number of user-defined names"): " the number of user-defined names"):
driver.application_started() driver.application_started()
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment