diff --git a/backend_tests/z_input_files/v2/solver_Biofuel_NIS_translation.xlsx b/backend_tests/z_input_files/v2/solver_Biofuel_NIS_translation.xlsx new file mode 100644 index 0000000..0f7410b Binary files /dev/null and b/backend_tests/z_input_files/v2/solver_Biofuel_NIS_translation.xlsx differ diff --git a/backend_tests/z_input_files/v2/solver_Netherlandsv1ToNISHierarchy2.xlsx b/backend_tests/z_input_files/v2/solver_Netherlandsv1ToNISHierarchy2.xlsx new file mode 100644 index 0000000..0bd0999 Binary files /dev/null and b/backend_tests/z_input_files/v2/solver_Netherlandsv1ToNISHierarchy2.xlsx differ diff --git a/backend_tests/z_input_files/v2/solver_Netherlandsv1ToNISHierarchy2_new.xlsx b/backend_tests/z_input_files/v2/solver_Netherlandsv1ToNISHierarchy2_new.xlsx new file mode 100644 index 0000000..4e913e2 Binary files /dev/null and b/backend_tests/z_input_files/v2/solver_Netherlandsv1ToNISHierarchy2_new.xlsx differ diff --git a/backend_tests/z_input_files/v2/solver_SimpleCaseLabourMultifunctional_withParams.xlsx b/backend_tests/z_input_files/v2/solver_SimpleCaseLabourMultifunctional_withParams.xlsx new file mode 100644 index 0000000..82911ba Binary files /dev/null and b/backend_tests/z_input_files/v2/solver_SimpleCaseLabourMultifunctional_withParams.xlsx differ diff --git a/backend_tests/z_input_files/v2/solver_SimpleCaseLabour_errorAstEvaluator.xlsx b/backend_tests/z_input_files/v2/solver_SimpleCaseLabour_errorAstEvaluator.xlsx new file mode 100644 index 0000000..d081ed8 Binary files /dev/null and b/backend_tests/z_input_files/v2/solver_SimpleCaseLabour_errorAstEvaluator.xlsx differ diff --git a/backend_tests/z_input_files/v2/solver_SimpleCaseLabour_nonlinear.xlsx b/backend_tests/z_input_files/v2/solver_SimpleCaseLabour_nonlinear.xlsx new file mode 100644 index 0000000..414eab6 Binary files /dev/null and b/backend_tests/z_input_files/v2/solver_SimpleCaseLabour_nonlinear.xlsx differ diff --git a/backend_tests/z_input_files/v2/solver_SimpleCaseWaterBehaveAs.xlsx b/backend_tests/z_input_files/v2/solver_SimpleCaseWaterBehaveAs.xlsx new file mode 100644 index 0000000..59c0b3a Binary files /dev/null and b/backend_tests/z_input_files/v2/solver_SimpleCaseWaterBehaveAs.xlsx differ diff --git a/backend_tests/z_input_files/v2/solver_SimpleCaseWaterConflictsMulti.xlsx b/backend_tests/z_input_files/v2/solver_SimpleCaseWaterConflictsMulti.xlsx new file mode 100644 index 0000000..9ecdf57 Binary files /dev/null and b/backend_tests/z_input_files/v2/solver_SimpleCaseWaterConflictsMulti.xlsx differ diff --git a/backend_tests/z_input_files/v2/solver_SimpleCaseWaterConflicts_problems.xlsx b/backend_tests/z_input_files/v2/solver_SimpleCaseWaterConflicts_problems.xlsx new file mode 100644 index 0000000..ce1a206 Binary files /dev/null and b/backend_tests/z_input_files/v2/solver_SimpleCaseWaterConflicts_problems.xlsx differ diff --git a/backend_tests/z_input_files/v2/solver_SimpleCaseWaterManyConflicts.xlsx b/backend_tests/z_input_files/v2/solver_SimpleCaseWaterManyConflicts.xlsx new file mode 100644 index 0000000..7cfe20b Binary files /dev/null and b/backend_tests/z_input_files/v2/solver_SimpleCaseWaterManyConflicts.xlsx differ diff --git a/backend_tests/z_input_files/v2/solver_SimpleCaseWaterObservers.xlsx b/backend_tests/z_input_files/v2/solver_SimpleCaseWaterObservers.xlsx new file mode 100644 index 0000000..b8f013c Binary files /dev/null and b/backend_tests/z_input_files/v2/solver_SimpleCaseWaterObservers.xlsx differ diff --git a/backend_tests/z_input_files/v2/solver_SimpleCase_OliveOil_LocalExternal.xlsx b/backend_tests/z_input_files/v2/solver_SimpleCase_OliveOil_LocalExternal.xlsx new file mode 100644 index 0000000..9456e9e Binary files /dev/null and b/backend_tests/z_input_files/v2/solver_SimpleCase_OliveOil_LocalExternal.xlsx differ diff --git a/backend_tests/z_input_files/v2/solver_howTo BackInterface.xlsx b/backend_tests/z_input_files/v2/solver_howTo BackInterface.xlsx new file mode 100644 index 0000000..396dd06 Binary files /dev/null and b/backend_tests/z_input_files/v2/solver_howTo BackInterface.xlsx differ diff --git a/nexinfosys/command_definitions.py b/nexinfosys/command_definitions.py index 9ae4a35..cb9c4b0 100644 --- a/nexinfosys/command_definitions.py +++ b/nexinfosys/command_definitions.py @@ -127,6 +127,10 @@ cmd_type=CommandType.analysis, execution_class_name="nexinfosys.command_executors.version2.matrix_indicators_command.MatrixIndicatorsCommand"), + Command(name="lcia_methods", allowed_names=["LCIAMethods"], is_v2=True, + cmd_type=CommandType.analysis, + execution_class_name="nexinfosys.command_executors.version2.lcia_methods_command.LCIAMethodsCommand"), + Command(name="datasetdata", allowed_names=["DatasetData"], is_v2=True, cmd_type=CommandType.input, execution_class_name="nexinfosys.command_executors.version2.dataset_data_command.DatasetDataCommand", diff --git a/nexinfosys/command_executors/version2/lcia_methods_command.py b/nexinfosys/command_executors/version2/lcia_methods_command.py new file mode 100644 index 0000000..debd515 --- /dev/null +++ b/nexinfosys/command_executors/version2/lcia_methods_command.py @@ -0,0 +1,48 @@ +import json +from typing import Optional, Dict, Any + +from nexinfosys.command_generators import Issue, IssueLocation, IType +from nexinfosys.common.helper import strcmp, first, PartialRetrievalDictionary +from nexinfosys.model_services import IExecutableCommand, get_case_study_registry_objects +from nexinfosys.models.musiasem_concepts import Observer, FactorTypesRelationUnidirectionalLinearTransformObservation, \ + FactorType, Processor, Indicator +from nexinfosys.command_executors import BasicCommand, CommandExecutionError, subrow_issue_message +from nexinfosys.command_field_definitions import get_command_fields_from_class +from nexinfosys.models.musiasem_concepts_helper import find_or_create_observer, find_processor_by_name + + +class LCIAMethodsCommand(BasicCommand): + def __init__(self, name: str): + BasicCommand.__init__(self, name, get_command_fields_from_class(self.__class__)) + + def _process_row(self, fields: Dict[str, Any], subrow=None) -> None: + """ + :param fields: + :param subrow: + :return: + """ + # Interface (Type) must exist + interface_type = self._get_factor_type_from_field(self, None, fields["interface"]) + # (LCIA) Indicator must exist + indicator = self._glb_idx.get(Indicator.partial_key(fields["lcia_indicator"])) + if len(indicator) == 1: + pass + elif len(indicator) == 0: + self._add_issue(IType.ERROR, f"Indicator with name '{fields['lcia_indicator']}' not found" + subrow_issue_message(subrow)) + return + else: + self._add_issue(IType.WARNING, + f"Indicator with name '{fields['lcia_indicator']}' found {len(indicator)} times" + subrow_issue_message(subrow)) + return + + # Store LCIA Methods as a new variable. + # TODO Use it to prepare a pd.DataFrame previous to calculating Indicators (after solving). Use "to_pickable" + lcia_methods = self._state.get("_lcia_methods") + if not lcia_methods: + lcia_methods = PartialRetrievalDictionary() + self._state.set("_lcia_methods", lcia_methods) + lcia_methods.put(dict(m=fields["lcia_method"], + i=fields["lcia_indicator"], + h=fields["lcia_horizon"]), + (fields["interface"], fields["interface_unit"], fields["lcia_coefficient"]) + ) diff --git a/nexinfosys/command_executors/version2/scalar_indicators_command.py b/nexinfosys/command_executors/version2/scalar_indicators_command.py index a03c571..d22fd61 100644 --- a/nexinfosys/command_executors/version2/scalar_indicators_command.py +++ b/nexinfosys/command_executors/version2/scalar_indicators_command.py @@ -45,5 +45,9 @@ def _process_row(self, fields: Dict[str, Any], subrow=None) -> None: benchmarks, IndicatorCategories.factors_expression if strcmp(fields.get("local"), "Yes") else IndicatorCategories.case_study, - fields.get("description")) + fields.get("description"), + fields["indicators_group"], + fields["unit"], + fields["unit_label"], + fields["source"]) self._glb_idx.put(indicator.key(), indicator) diff --git a/nexinfosys/command_field_definitions.py b/nexinfosys/command_field_definitions.py index 0f6d8d8..9b2c40f 100644 --- a/nexinfosys/command_field_definitions.py +++ b/nexinfosys/command_field_definitions.py @@ -10,7 +10,7 @@ time_expression, indicator_expression, code_string, simple_h_name, domain_definition, unit_name, url_parser, \ processor_names, value, list_simple_ident, reference, processor_name, processors_selector_expression, \ interfaces_list_expression, attributes_list_expression, indicators_list_expression, number_interval, pair_numbers, \ - external_ds_name, level_name, expression_with_parameters_or_list_simple_ident + external_ds_name, level_name, expression_with_parameters_or_list_simple_ident, signed_float from nexinfosys.common.constants import SubsystemType, Scope from nexinfosys.common.helper import first, class_full_name from nexinfosys.model_services import IExecutableCommand @@ -114,17 +114,18 @@ "interface_types": [ CommandField(allowed_names=["InterfaceTypeHierarchy"], name="interface_type_hierarchy", parser=simple_ident), CommandField(allowed_names=["InterfaceType"], name="interface_type", mandatory=True, parser=simple_ident), + CommandField(allowed_names=["ParentInterfaceType"], name="parent_interface_type", parser=simple_ident), CommandField(allowed_names=["Sphere"], name="sphere", mandatory=True, allowed_values=spheres, parser=simple_ident), CommandField(allowed_names=["RoegenType"], name="roegen_type", mandatory=True, allowed_values=roegen_types, parser=simple_ident), - CommandField(allowed_names=["ParentInterfaceType"], name="parent_interface_type", parser=simple_ident), - CommandField(allowed_names=["Level"], name="level", parser=level_name, attribute_of=FactorType), - CommandField(allowed_names=["Formula", "Expression"], name="formula", parser=unquoted_string), CommandField(allowed_names=["Description"], name="description", parser=unquoted_string), + CommandField(allowed_names=["Source"], name="qq_source", parser=reference), # Cristina (in "MuSIASEM Interface List" worksheet) CommandField(allowed_names=["Unit"], name="unit", mandatory=True, parser=unit_name), CommandField(allowed_names=["OppositeSubsystemType", "OppositeProcessorType"], name="opposite_processor_type", - default_value=SubsystemType.get_names()[0], allowed_values=SubsystemType.get_names(), parser=simple_ident), + allowed_values=SubsystemType.get_names(), parser=simple_ident), + CommandField(allowed_names=["Level"], name="level", parser=level_name, attribute_of=FactorType), + CommandField(allowed_names=["Formula", "Expression"], name="formula", parser=unquoted_string), CommandField(allowed_names=[attributeRegex], name="attributes", many_appearances=True, parser=value), CommandField(allowed_names=["Attributes"], name="attributes", parser=key_value_list) ], @@ -189,7 +190,6 @@ CommandField(allowed_names=["Uncertainty"], name="uncertainty", parser=unquoted_string), CommandField(allowed_names=["Assessment"], name="assessment", parser=unquoted_string), # TODO - #CommandField(allowed_names=["PedigreeMatrix"], name="pedigree_matrix", parser=reference_name), #CommandField(allowed_names=["Pedigree"], name="pedigree", parser=pedigree_code), #CommandField(allowed_names=["RelativeTo"], name="relative_to", parser=simple_ident_plus_unit_name), CommandField(allowed_names=["PedigreeMatrix"], name="pedigree_matrix", parser=reference), @@ -328,6 +328,30 @@ CommandField(allowed_names=["Year"], name="year", mandatory="entry_type in ('article', 'book', 'inbook', 'incollection', 'inproceedings', 'mastersthesis', 'phdthesis', 'proceedings', 'techreport')", parser=unquoted_string) ], + # Used only for help elaboration + "datasetqry": [ + CommandField(allowed_names=["InputDataset"], name="inputdataset", parser=external_ds_name), + CommandField(allowed_names=["AvailableAtDateTime"], name="availableatdatetime", parser=unquoted_string), + CommandField(allowed_names=["StartTime"], name="starttime", parser=time_expression), + CommandField(allowed_names=["EndTime"], name="endtime", parser=time_expression), + CommandField(allowed_names=["ResultDimensions"], name="resultdimensions", parser=simple_ident), + CommandField(allowed_names=["ResultMeasures"], name="resultmeasures", parser=simple_ident), + CommandField(allowed_names=["ResultMeasuresAggregation"], name="resultmeasuresaggregation", + default_value=aggregators_list[0], allowed_values=aggregators_list, parser=simple_ident), + CommandField(allowed_names=["ResultMeasureName"], name="resultmeasurename", parser=simple_ident), + CommandField(allowed_names=["OutputDataset"], name="outputdataset", parser=simple_ident), + ], + + # Analysis commands + + "problem_statement": [ + CommandField(allowed_names=["Scenario"], name="scenario_name", parser=simple_ident), + CommandField(allowed_names=["Parameter"], name="parameter", mandatory=True, parser=simple_ident), + CommandField(allowed_names=["Value"], name="parameter_value", mandatory=True, + parser=expression_with_parameters_or_list_simple_ident), # list_simple_ident + CommandField(allowed_names=["Description"], name="description", parser=unquoted_string) + ], + "scalar_indicator_benchmarks": [ CommandField(allowed_names=["BenchmarkGroup"], name="benchmark_group", default_value=benchmark_groups[0], allowed_values=benchmark_groups, parser=simple_ident), @@ -340,14 +364,19 @@ CommandField(allowed_names=["Description"], name="description", parser=unquoted_string) ], + # Modified to consider Cristina's annotations "scalar_indicators": [ - CommandField(allowed_names=["Indicator"], name="indicator_name", mandatory=True, parser=simple_ident), + CommandField(allowed_names=["IndicatorsGroup"], name="indicators_group", parser=simple_ident), # IndicatorType (Indicators) + CommandField(allowed_names=["Indicator"], name="indicator_name", mandatory=True, parser=simple_ident), # IndicatorName (Indicators) CommandField(allowed_names=["Local"], name="local", mandatory=True, allowed_values=yes_no, parser=simple_ident), - CommandField(allowed_names=["Formula", "Expression"], name="formula", mandatory=True, parser=indicator_expression), + CommandField(allowed_names=["Formula", "Expression"], name="formula", mandatory=True, parser=indicator_expression), # A call to LCIAMethod function, with parameter IndicatorMethod (Indicators) + CommandField(allowed_names=["Unit"], name="unit", mandatory=True, parser=unit_name), # IndicatorUnit (Indicators) # TODO Disabled: apply the formula to ALL processors (and ignore those where it cannot be evaluated) # CommandField(allowed_names=["Processors"], name="processors_selector", parser=processors_selector_expression) CommandField(allowed_names=["Benchmarks", "Benchmark"], name="benchmarks", parser=list_simple_ident), - CommandField(allowed_names=["Description"], name="description", parser=unquoted_string) + CommandField(allowed_names=["UnitLabel"], name="unit_label", mandatory=False, parser=unquoted_string), + CommandField(allowed_names=["Description"], name="description", parser=unquoted_string), + CommandField(allowed_names=["Reference", "Source"], name="source", mandatory=False, parser=reference), # SAME (Indicators) ], "matrix_indicators": [ @@ -360,25 +389,15 @@ CommandField(allowed_names=["Description"], name="description", parser=unquoted_string) ], - "problem_statement": [ - CommandField(allowed_names=["Scenario"], name="scenario_name", parser=simple_ident), - CommandField(allowed_names=["Parameter"], name="parameter", mandatory=True, parser=simple_ident), - CommandField(allowed_names=["Value"], name="parameter_value", mandatory=True, parser=expression_with_parameters_or_list_simple_ident), # list_simple_ident - CommandField(allowed_names=["Description"], name="description", parser=unquoted_string) - ], - - # Used only for help elaboration - "datasetqry": [ - CommandField(allowed_names=["InputDataset"], name="inputdataset", parser=external_ds_name), - CommandField(allowed_names=["AvailableAtDateTime"], name="availableatdatetime", parser=unquoted_string), - CommandField(allowed_names=["StartTime"], name="starttime", parser=time_expression), - CommandField(allowed_names=["EndTime"], name="endtime", parser=time_expression), - CommandField(allowed_names=["ResultDimensions"], name="resultdimensions", parser=simple_ident), - CommandField(allowed_names=["ResultMeasures"], name="resultmeasures", parser=simple_ident), - CommandField(allowed_names=["ResultMeasuresAggregation"], name="resultmeasuresaggregation", - default_value=aggregators_list[0], allowed_values=aggregators_list, parser=simple_ident), - CommandField(allowed_names=["ResultMeasureName"], name="resultmeasurename", parser=simple_ident), - CommandField(allowed_names=["OutputDataset"], name="outputdataset", parser=simple_ident), + # NEW command, implementation of Cristina's suggestions + "lcia_methods": [ + CommandField(allowed_names=["LCIAMethod"], name="lcia_method", mandatory=True, parser=simple_ident), # IndicatorMethod (Indicators), SAME (LCIAmethod) + CommandField(allowed_names=["LCIAIndicator"], name="lcia_indicator", mandatory=True, parser=simple_ident), # IndicatorName (Indicators), SAME (LCIAmethod) + CommandField(allowed_names=["LCIAHorizon"], name="lcia_horizon", mandatory=True, parser=simple_ident), # IndicatorTemporal (Indicators) + CommandField(allowed_names=["Interface"], name="interface", mandatory=True, parser=simple_ident), # SAME (LCIAmethod) + CommandField(allowed_names=["InterfaceUnit"], name="interface_unit", mandatory=True, parser=unit_name), # Not present, but needed to warrant independence from specification of InterfaceTypes + CommandField(allowed_names=["LCIACoefficient"], name="lcia_coefficient", mandatory=True, # SAME (LCIAmethod) + parser=signed_float) ], } diff --git a/nexinfosys/command_generators/__init__.py b/nexinfosys/command_generators/__init__.py index 009143d..cf531ef 100644 --- a/nexinfosys/command_generators/__init__.py +++ b/nexinfosys/command_generators/__init__.py @@ -92,5 +92,16 @@ def check(self, attribute, value): "kwargs": None, "special_kwargs": {"ProcessorsMap": "processors_map", "ProcessorsDOM": "processors_dom", "DataFrameGroup": "df_group", "IndicatorsDataFrameGroup": "df_indicators_group"}}, + {"name": "nancount", + "full_name": "nexinfosys.command_generators.parser_ast_evaluators.aggregator_nan_count", + "kwargs": None, + "special_kwargs": {"ProcessorsMap": "processors_map", "ProcessorsDOM": "processors_dom", + "DataFrameGroup": "df_group", "IndicatorsDataFrameGroup": "df_indicators_group"}}, + {"name": "lciamethod", + "full_name": "nexinfosys.command_generators.parser_ast_evaluators.lcia_method", + "kwargs": None, + "special_kwargs": {"ProcessorsMap": "processors_map", "ProcessorsDOM": "processors_dom", + "DataFrameGroup": "df_group", + "IndicatorsDataFrameGroup": "df_indicators_group"}}, ] }) diff --git a/nexinfosys/command_generators/parser_ast_evaluators.py b/nexinfosys/command_generators/parser_ast_evaluators.py index 0efac08..92106a9 100644 --- a/nexinfosys/command_generators/parser_ast_evaluators.py +++ b/nexinfosys/command_generators/parser_ast_evaluators.py @@ -176,7 +176,7 @@ def obtain_subset_of_processors(processors_selector: str, serialized_model: lxml def aggregator_generic(funct, field: str, xquery: str=None, scope: str='Total', processors_dom=None, processors_map=None, df_group=None, df_indicators_group=None): """ - SUM "field" for all processors meeting the XQuery and scope + AGGREGATE "field" for all processors meeting the XQuery and scope, applying aggregator "funct" :param field: :param xquery: :param processors_dom: @@ -210,7 +210,31 @@ def aggregator_generic(funct, field: str, xquery: str=None, scope: str='Total', def aggregator_sum(field: str, xquery: str=None, scope: str='Total', processors_dom=None, processors_map=None, df_group=None, df_indicators_group=None): - return aggregator_generic(np.sum, field, xquery, scope, processors_dom, processors_map, df_group, df_indicators_group) + return aggregator_generic(np.nansum, field, xquery, scope, processors_dom, processors_map, df_group, df_indicators_group) + + +def aggregator_avg(field: str, xquery: str=None, scope: str='Total', processors_dom=None, processors_map=None, df_group=None, df_indicators_group=None): + return aggregator_generic(np.nanavg, field, xquery, scope, processors_dom, processors_map, df_group, df_indicators_group) + + +def aggregator_max(field: str, xquery: str=None, scope: str='Total', processors_dom=None, processors_map=None, df_group=None, df_indicators_group=None): + return aggregator_generic(np.nanmax, field, xquery, scope, processors_dom, processors_map, df_group, df_indicators_group) + + +def aggregator_min(field: str, xquery: str=None, scope: str='Total', processors_dom=None, processors_map=None, df_group=None, df_indicators_group=None): + return aggregator_generic(np.nanmin, field, xquery, scope, processors_dom, processors_map, df_group, df_indicators_group) + + +def aggregator_count(field: str, xquery: str=None, scope: str='Total', processors_dom=None, processors_map=None, df_group=None, df_indicators_group=None): + return aggregator_generic(lambda v: len(v), field, xquery, scope, processors_dom, processors_map, df_group, df_indicators_group) + + +def aggregator_nan_count(field: str, xquery: str=None, scope: str='Total', processors_dom=None, processors_map=None, df_group=None, df_indicators_group=None): + return aggregator_generic(lambda v: sum(np.isnan(v)), field, xquery, scope, processors_dom, processors_map, df_group, df_indicators_group) + + +def lcia_method(field: str, xquery: str=None, scope: str='Total', processors_dom=None, processors_map=None, df_group=None, df_indicators_group=None): + return aggregator_generic(lambda v: len(v), field, xquery, scope, processors_dom, processors_map, df_group, df_indicators_group) # Comparison operators diff --git a/nexinfosys/command_generators/parser_spreadsheet_utils.py b/nexinfosys/command_generators/parser_spreadsheet_utils.py index 1579f54..798ef5c 100644 --- a/nexinfosys/command_generators/parser_spreadsheet_utils.py +++ b/nexinfosys/command_generators/parser_spreadsheet_utils.py @@ -227,7 +227,10 @@ def copy_worksheet(self): if self._copy_style: self.target.sheet_format = copy(self.source.sheet_format) + # if hasattr(self.source, "_merged_cells"): self.target._merged_cells = copy(self.source._merged_cells) + # else: + # self.target._merged_cells = None self.target.sheet_properties = copy(self.source.sheet_properties) def _copy_cells(self): diff --git a/nexinfosys/embedded_nis.py b/nexinfosys/embedded_nis.py index b9ade7c..96d2b8b 100644 --- a/nexinfosys/embedded_nis.py +++ b/nexinfosys/embedded_nis.py @@ -29,6 +29,7 @@ def __init__(self): self._dataframe_names = [] # type: List[str] self._dataframes = [] # type: List[pd.DataFrame] self._issues = None + self._state = None initialize_configuration() # Disable optimizations nexinfosys.set_global_configuration_variable("ENABLE_CYTHON_OPTIMIZATIONS", False) @@ -250,7 +251,7 @@ def submit(self) -> List: # STORE the issues self._issues = issues - + self._state = self._isession.state return self._issues else: raise Exception("Call 'open_session' before submitting") @@ -279,6 +280,8 @@ def solve(self) -> List: description=f"UNCONTROLLED CONDITION: {exc_info}. Please, contact the development team.", location=None)] + self._state = self._isession.state + return self._issues else: raise Exception("Call 'open_session' before submitting") @@ -287,6 +290,9 @@ def submit_and_solve(self): self.submit() return self.solve() + def get_state(self): + return self._state + # --------------- AFTER SUBMISSION --------------- def query_parameters(self): diff --git a/nexinfosys/models/musiasem_concepts.py b/nexinfosys/models/musiasem_concepts.py index 78f9dd3..7d20f16 100644 --- a/nexinfosys/models/musiasem_concepts.py +++ b/nexinfosys/models/musiasem_concepts.py @@ -1388,9 +1388,9 @@ def clone(self, state: Union[PartialRetrievalDictionary, State], objects_already # Local indicators for li in self._local_indicators: - formula = li._formula # TODO Adapt formula of the new indicator to the new Factors - li_ = Indicator(li._name, formula, li, li._benchmark, li._indicator_category) - self._local_indicators.append(li_) + # formula = li._formula # TODO Adapt formula of the new indicator to the new Factors + # li_ = Indicator(li._name, formula, li, li._benchmark, li._indicator_category) + self._local_indicators.append(li) # Clone child Processors (look for part-of relations) children_processors: Set[Processor] = set() @@ -2813,7 +2813,8 @@ class Indicator(Nameable, Identifiable, Encodable): * Attached to CaseStudy """ def __init__(self, name: str, formula: str, from_indicator: Optional["Indicator"], processors_selector: str, - benchmarks: List[Benchmark], indicator_category: IndicatorCategories, description=None): + benchmarks: List[Benchmark], indicator_category: IndicatorCategories, description=None, + indicators_group=None, unit=None, unit_label=None, source=None): Identifiable.__init__(self) Nameable.__init__(self, name) self._formula = formula @@ -2822,6 +2823,10 @@ def __init__(self, name: str, formula: str, from_indicator: Optional["Indicator" self._benchmarks = benchmarks self._indicator_category = indicator_category self._description = description + self._indicators_group = indicators_group + self._unit = unit + self._unit_label = unit_label + self._source = source def encode(self): d = Encodable.parents_encode(self, __class__) @@ -2832,7 +2837,11 @@ def encode(self): 'processors_selector': self._processors_selector, 'benchmarks': self._benchmarks, 'indicator_category': getattr(self._indicator_category, "name", None), - 'description': self._description + 'description': self._description, + 'indicators_group': self._indicators_group, + 'unit': self._unit, + 'unit_label': self._unit_label, + 'source': self._source }) return d @@ -2921,6 +2930,10 @@ def interfaces_selector(self): def indicators_selector(self): return self._indicators_selector + @property + def attributes_selector(self): + return self._attributes_selector + @property def description(self): return self._description diff --git a/nexinfosys/solving/flow_graph_solver.py b/nexinfosys/solving/flow_graph_solver.py index 4ffc8f5..0bc0662 100644 --- a/nexinfosys/solving/flow_graph_solver.py +++ b/nexinfosys/solving/flow_graph_solver.py @@ -25,6 +25,7 @@ * Observers (different versions). Take average always """ +import traceback from collections import defaultdict from copy import deepcopy from enum import Enum @@ -1174,6 +1175,9 @@ def init_processor_full_names(registry: PartialRetrievalDictionary): processor.full_hierarchy_name = processor.full_hierarchy_names(registry)[0] +# ########################################## +# ## MAIN ENTRY POINT ###################### +# ########################################## def flow_graph_solver(global_parameters: List[Parameter], problem_statement: ProblemStatement, global_state: State, dynamic_scenario: bool) -> List[Issue]: """ @@ -1361,6 +1365,7 @@ def flow_graph_solver(global_parameters: List[Parameter], problem_statement: Pro return issues except SolvingException as e: + traceback.print_exc() # Print the Exception to std output return [Issue(IType.ERROR, e.args[0])] @@ -1632,6 +1637,7 @@ def export_solver_data(datasets, data, dynamic_scenario, glb_idx, global_paramet ds_stakeholders = prepare_benchmarks_to_stakeholders(benchmarks) # Find all benchmarks. For each benchmark, create a row per stakeholder -> return the dataframe # Prepare Matrices + # TODO df_attributes matrices = prepare_matrix_indicators(matrix_indicators, glb_idx, dom_tree, p_map, df, df_local_indicators, dynamic_scenario) # @@ -2141,6 +2147,16 @@ def prepare_matrix_indicator(indicator: MatrixIndicator) -> pd.DataFrame: i_names = get_adapted_case_dataframe_filter(indicator_results, "Indicator", inds) indicators_df = indicators_df.query('Indicator in [' + ', '.join(['"' + _ + '"' for _ in i_names]) + ']') + # Filter Attributes + if indicator.attributes_selector: + attribs = set([_.strip() for _ in indicator.attributes_selector.split(",")]) + if not case_sensitive: + attribs = set([_.lower() for _ in attribs]) + + # Attributes + i_names = get_adapted_case_dataframe_filter(interface_results, "Interface", attribs) + attributes_df = interfaces_df.query('Interface in [' + ', '.join(['"' + _ + '"' for _ in i_names]) + ']') + # Pivot Table: Dimensions (rows) are (Scenario, Period, Processor[, Scope]) # Dimensions (columns) are (Interface, Orientation -of Interface-) # Measures (cells) are (Value)