diff --git a/scikeras/_saving_utils.py b/scikeras/_saving_utils.py index e2170a8b..7b2b5869 100644 --- a/scikeras/_saving_utils.py +++ b/scikeras/_saving_utils.py @@ -26,8 +26,8 @@ def pack_keras_model( tp = type(model) out = BytesIO() if tp not in keras.saving.object_registration.GLOBAL_CUSTOM_OBJECTS: - module = '.'.join(tp.__qualname__.split('.')[:-1]) - name = tp.__qualname__.split('.')[-1] + module = ".".join(tp.__qualname__.split(".")[:-1]) + name = tp.__qualname__.split(".")[-1] keras.saving.register_keras_serializable(module, name)(tp) save_model(model, out) model_bytes = np.asarray(memoryview(out.getvalue())) diff --git a/scikeras/_utils.py b/scikeras/_utils.py index 0258bdbf..4d054cbc 100644 --- a/scikeras/_utils.py +++ b/scikeras/_utils.py @@ -1,5 +1,4 @@ import inspect -from types import FunctionType from typing import Any, Callable, Dict, Iterable, Mapping, Sequence, Type, Union from keras import losses as losses_mod diff --git a/scikeras/wrappers.py b/scikeras/wrappers.py index a12b6a98..3156476a 100644 --- a/scikeras/wrappers.py +++ b/scikeras/wrappers.py @@ -6,9 +6,10 @@ from collections import defaultdict from typing import Any, Callable, Dict, Iterable, List, Mapping, Set, Tuple, Type, Union -import numpy as np -import tensorflow as tf import keras +import numpy as np +from keras import losses as losses_module +from keras.models import Model from scipy.sparse import isspmatrix, lil_matrix from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin from sklearn.exceptions import NotFittedError @@ -18,8 +19,6 @@ from sklearn.utils.class_weight import compute_sample_weight from sklearn.utils.multiclass import type_of_target from sklearn.utils.validation import _check_sample_weight, check_array, check_X_y -from keras import losses as losses_module -from keras.models import Model from scikeras._utils import ( accepts_kwargs, @@ -381,7 +380,9 @@ def _get_compile_kwargs(self): strict=False, ), ) - if compile_kwargs["metrics"] is not None and not isinstance(compile_kwargs['metrics'], (dict, list)): + if compile_kwargs["metrics"] is not None and not isinstance( + compile_kwargs["metrics"], (dict, list) + ): # Keras expects a list or dict of metrics, not a single metric compile_kwargs["metrics"] = [compile_kwargs["metrics"]] return compile_kwargs @@ -537,7 +538,7 @@ def _fit_keras_model( self.history_ = defaultdict(list) for key, val in hist.history.items(): - if not (key == 'loss' or key[:4] == 'val_'): + if not (key == "loss" or key[:4] == "val_"): try: key = metric_name(key) except ValueError: diff --git a/tests/multi_output_models.py b/tests/multi_output_models.py index 33a135df..1b213a07 100644 --- a/tests/multi_output_models.py +++ b/tests/multi_output_models.py @@ -1,8 +1,8 @@ from typing import List import numpy as np -from sklearn.utils.multiclass import type_of_target from keras.backend import floatx as tf_floatx +from sklearn.utils.multiclass import type_of_target from scikeras.utils.transformers import ClassifierLabelEncoder from scikeras.wrappers import KerasClassifier diff --git a/tests/test_api.py b/tests/test_api.py index 6405753f..a98c7760 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -3,8 +3,15 @@ from functools import partial from typing import Any, Dict +import keras import numpy as np import pytest +from keras import backend as K +from keras import losses as losses_module +from keras import metrics as metrics_module +from keras.layers import Conv2D, Dense, Flatten, Input +from keras.models import Model, Sequential +from keras.utils import to_categorical from sklearn.calibration import CalibratedClassifierCV from sklearn.datasets import load_diabetes, load_digits, load_iris from sklearn.ensemble import ( @@ -17,13 +24,6 @@ from sklearn.model_selection import GridSearchCV, RandomizedSearchCV from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler -import keras -from keras import backend as K -from keras import losses as losses_module -from keras import metrics as metrics_module -from keras.layers import Conv2D, Dense, Flatten, Input -from keras.models import Model, Sequential -from keras.utils import to_categorical from scikeras.wrappers import KerasClassifier, KerasRegressor diff --git a/tests/test_basewrapper.py b/tests/test_basewrapper.py index 1d00c94f..a25bff76 100644 --- a/tests/test_basewrapper.py +++ b/tests/test_basewrapper.py @@ -1,10 +1,10 @@ """Test that BaseWrapper for uses other than KerasClassifier and KerasRegressor. """ +import keras import numpy as np +from keras import layers from sklearn.base import TransformerMixin from sklearn.metrics import mean_squared_error -import keras -from keras import layers from scikeras.wrappers import BaseWrapper diff --git a/tests/test_callbacks.py b/tests/test_callbacks.py index 6a3ac1d2..000dc604 100644 --- a/tests/test_callbacks.py +++ b/tests/test_callbacks.py @@ -1,8 +1,8 @@ from collections import defaultdict from typing import Any, DefaultDict, Dict -import pytest import keras +import pytest from keras.callbacks import Callback from scikeras.wrappers import KerasClassifier diff --git a/tests/test_compile_kwargs.py b/tests/test_compile_kwargs.py index f07fd5d2..f0b803a6 100644 --- a/tests/test_compile_kwargs.py +++ b/tests/test_compile_kwargs.py @@ -1,15 +1,18 @@ +from __future__ import annotations + import numpy as np import pytest -from sklearn.datasets import make_classification -from keras.backend.common.variables import KerasVariable from keras import losses as losses_module from keras import metrics as metrics_module from keras import optimizers as optimizers_module +from keras.backend.common.variables import KerasVariable from keras.layers import Dense, Input from keras.models import Model +from sklearn.datasets import make_classification from scikeras.wrappers import KerasClassifier from tests.multi_output_models import MultiOutputClassifier +from tests.testing_utils import get_metric_names def get_model(num_hidden=10, meta=None, compile_kwargs=None): @@ -242,9 +245,18 @@ def test_loss_routed_params_dict(loss, n_outputs_): assert est.model_.loss["out1"].from_logits is False -@pytest.mark.parametrize("metrics", ["binary_accuracy", metrics_module.BinaryAccuracy]) +@pytest.mark.parametrize( + "metric", + [ + "binary_accuracy", + metrics_module.BinaryAccuracy, + metrics_module.BinaryAccuracy(name="custom_name"), + ], +) @pytest.mark.parametrize("n_outputs_", (1, 2)) -def test_metrics_single_metric_per_output(metrics, n_outputs_): +def test_metrics_single_metric_per_output( + metric: str | metrics_module.Metric | type[metrics_module.Metric], n_outputs_: int +): """Test a single metric per output using vanilla Keras sytnax and without any routed paramters. """ @@ -252,14 +264,14 @@ def test_metrics_single_metric_per_output(metrics, n_outputs_): X, y = make_classification() y = np.column_stack([y for _ in range(n_outputs_)]).squeeze() - # loss functions for each output and joined show up as metrics - metric_idx = 1 + (n_outputs_ if n_outputs_ > 1 else 0) - prefix = "out1_" if n_outputs_ > 1 else "" + metric_value = ( + metric if isinstance(metric, (metrics_module.Metric, str)) else metric() + ) - if isinstance(metrics, str): - expected_name = metrics + if isinstance(metric_value, str): + expected_name = metric else: - expected_name = metrics().name + expected_name = metric_value.name if n_outputs_ == 1: # List of metrics, not supported for multiple outputs where each output is required to get @@ -267,56 +279,41 @@ def test_metrics_single_metric_per_output(metrics, n_outputs_): est = MultiOutputClassifier( model=get_model, loss="binary_crossentropy", - metrics=[ - metrics if not isinstance(metrics, metrics_module.Metric) else metrics() - ], + metrics=[metric_value], ) est.fit(X, y) - assert est.model_.metrics[metric_idx].name == prefix + expected_name - - # List of lists of metrics - est = MultiOutputClassifier( - model=get_model, - loss="binary_crossentropy", - metrics=[ - [metrics if not isinstance(metrics, metrics_module.Metric) else metrics()] - for _ in range(n_outputs_) - ], - ) - est.fit(X, y) - assert prefix + expected_name == est.model_.metrics[metric_idx].name + assert get_metric_names(est) == [expected_name] + else: + # List of lists of metrics, only supported if we have multiple outputs + est = MultiOutputClassifier( + model=get_model, + loss="binary_crossentropy", + metrics=[[metric_value]] * n_outputs_, + ) + est.fit(X, y) + assert get_metric_names(est) == [expected_name] * n_outputs_ # Dict of metrics est = MultiOutputClassifier( model=get_model, loss="binary_crossentropy", - metrics={ - f"out{i+1}": metrics - if not isinstance(metrics, metrics_module.Metric) - else metrics() - for i in range(n_outputs_) - }, + metrics={f"out{i+1}": metric_value for i in range(n_outputs_)}, ) est.fit(X, y) - assert prefix + expected_name == est.model_.metrics[metric_idx].name + assert get_metric_names(est) == [expected_name] * n_outputs_ # Dict of lists est = MultiOutputClassifier( model=get_model, loss="binary_crossentropy", - metrics={ - f"out{i+1}": metrics - if not isinstance(metrics, metrics_module.Metric) - else metrics() - for i in range(n_outputs_) - }, + metrics={f"out{i+1}": [metric_value] for i in range(n_outputs_)}, ) est.fit(X, y) - assert prefix + expected_name == est.model_.metrics[metric_idx].name + assert get_metric_names(est) == [expected_name] * n_outputs_ @pytest.mark.parametrize("n_outputs_", (1, 2)) -def test_metrics_two_metric_per_output(n_outputs_): +def test_metrics_two_metric_per_output(n_outputs_: int): """Metrics without the ("name", metric, "output") syntax should ignore all routed and custom options. @@ -328,101 +325,59 @@ def test_metrics_two_metric_per_output(n_outputs_): metric_class = metrics_module.BinaryAccuracy - # loss functions for each output and joined show up as metrics - metric_idx = 1 + (n_outputs_ if n_outputs_ > 1 else 0) - - # List of lists of metrics - if n_outputs_ == 1: - metrics_ = [metric_class(name="1"), metric_class(name="2")] - else: - metrics_ = [ - [metric_class(name="1"), metric_class(name="2")] for _ in range(n_outputs_) - ] + metrics_value = [metric_class(name="1"), metric_class(name="2")] est = MultiOutputClassifier( model=get_model, loss="binary_crossentropy", - metrics=metrics_, + metrics=metrics_value if n_outputs_ == 1 else [metrics_value] * n_outputs_, ) est.fit(X, y) - if n_outputs_ == 1: - assert est.model_.metrics[metric_idx].name == "1" - else: - # For multi-output models, Keras pre-appends the output name - assert est.model_.metrics[metric_idx].name == "out1_1" - - # List of lists of metrics - if n_outputs_ == 1: - metrics_ = {"out1": [metric_class(name="1"), metric_class(name="2")]} - else: - metrics_ = { - f"out{i+1}": [metric_class(name="1"), metric_class(name="2")] - for i in range(n_outputs_) - } + assert get_metric_names(est) == ["1", "2"] * n_outputs_ # Dict of metrics est = MultiOutputClassifier( model=get_model, loss="binary_crossentropy", - metrics=metrics_, + metrics={f"out{i+1}": metrics_value for i in range(n_outputs_)}, ) est.fit(X, y) - if n_outputs_ == 1: - assert est.model_.metrics[metric_idx].name == "1" - else: - # For multi-output models, Keras pre-appends the output name - assert est.model_.metrics[metric_idx].name == "out1_1" + assert get_metric_names(est) == ["1", "2"] * n_outputs_ @pytest.mark.parametrize("n_outputs_", (1, 2)) -def test_metrics_routed_params_iterable(n_outputs_): - """Tests compiling metrics with routed parameters - when they are passed as an iterable. - """ +def test_metrics_routed_params_iterable(n_outputs_: int): + """Tests compiling metrics with routed parameters when they are passed as an iterable.""" metrics = metrics_module.BinaryAccuracy X, y = make_classification() y = np.column_stack([y for _ in range(n_outputs_)]).squeeze() - # loss functions for each output and joined show up as metrics - metric_idx = 1 + (n_outputs_ if n_outputs_ > 1 else 0) - est = MultiOutputClassifier( model=get_model, loss="binary_crossentropy", - metrics=[metrics], + metrics=[metrics] * n_outputs_, metrics__0__name="custom_name", ) est.fit(X, y) - compiled_metrics = est.model_.metrics - if n_outputs_ == 1: - assert compiled_metrics[metric_idx].name == "custom_name" - else: - assert compiled_metrics[metric_idx].name == "out1_custom_name" + expected = ( + ["custom_name", "binary_accuracy"] if n_outputs_ == 2 else ["custom_name"] + ) + assert get_metric_names(est) == expected - if n_outputs_ == 1: - metrics_ = [ - metrics, - ] - else: - metrics_ = [metrics for _ in range(n_outputs_)] est = MultiOutputClassifier( model=get_model, loss="binary_crossentropy", - metrics=metrics_, + metrics=[metrics] * n_outputs_, metrics__name="name_all_metrics", # ends up in index 1 only metrics__0__name="custom_name", # ends up in index 0 only ) est.fit(X, y) - compiled_metrics = est.model_.metrics - if n_outputs_ == 1: - assert compiled_metrics[metric_idx].name == "custom_name" - else: - assert compiled_metrics[metric_idx].name == "out1_custom_name" - assert compiled_metrics[metric_idx + 1].name == "out1_name_all_metrics" - assert compiled_metrics[metric_idx + 2].name == "out2_custom_name" - assert compiled_metrics[metric_idx + 3].name == "out2_name_all_metrics" + expected = ( + ["custom_name", "name_all_metrics"] if n_outputs_ == 2 else ["custom_name"] + ) + assert get_metric_names(est) == expected, get_metric_names(est) def test_metrics_routed_params_dict(): @@ -436,17 +391,15 @@ def test_metrics_routed_params_dict(): X, y = make_classification() y = np.column_stack([y for _ in range(n_outputs_)]).squeeze() - # loss functions for each output and joined show up as metrics - metric_idx = 1 + n_outputs_ - est = MultiOutputClassifier( model=get_model, loss="binary_crossentropy", - metrics={"out1": metrics}, - metrics__out1__name="custom_name", + metrics={"out1": metrics, "out2": metrics}, + metrics__out1__name="custom_name1", + metrics__out2__name="custom_name2", ) est.fit(X, y) - assert est.model_.metrics[metric_idx].name == "out1_custom_name" + assert get_metric_names(est) == ["custom_name1", "custom_name2"] if n_outputs_ == 1: metrics_ = ({"out1": metrics},) @@ -460,8 +413,7 @@ def test_metrics_routed_params_dict(): metrics__out1__name="custom_name", # ends up in out1 only ) est.fit(X, y) - assert est.model_.metrics[metric_idx].name == "out1_custom_name" - assert est.model_.metrics[metric_idx + 1].name == "out2_name_all_metrics" + assert get_metric_names(est) == ["custom_name", "name_all_metrics"] def test_metrics_invalid_string(): diff --git a/tests/test_errors.py b/tests/test_errors.py index 218f30f1..918296b9 100644 --- a/tests/test_errors.py +++ b/tests/test_errors.py @@ -2,9 +2,9 @@ import numpy as np import pytest -from sklearn.exceptions import NotFittedError from keras.layers import Dense, Input from keras.models import Model +from sklearn.exceptions import NotFittedError from scikeras.wrappers import BaseWrapper, KerasClassifier, KerasRegressor @@ -151,7 +151,9 @@ def get_model(compile, meta, compile_kwargs): return model est = KerasRegressor(model=get_model, loss=loss, compile=compile) - with pytest.raises(ValueError, match=r".*(?:provide a loss)|(?:Provide a `loss`).*"): + with pytest.raises( + ValueError, match=r".*(?:provide a loss)|(?:Provide a `loss`).*" + ): est.fit([[0], [1]], [0, 1]) diff --git a/tests/test_input_outputs.py b/tests/test_input_outputs.py index 6087d9f1..18fd02db 100644 --- a/tests/test_input_outputs.py +++ b/tests/test_input_outputs.py @@ -3,9 +3,11 @@ from typing import Any, Callable, Dict from unittest.mock import patch +import keras import numpy as np import pytest -import tensorflow as tf +from keras.layers import Concatenate, Dense, Input +from keras.models import Model from sklearn.base import BaseEstimator from sklearn.metrics import accuracy_score, r2_score from sklearn.model_selection import train_test_split @@ -14,9 +16,6 @@ ) from sklearn.neural_network import MLPClassifier, MLPRegressor from sklearn.preprocessing import FunctionTransformer, OneHotEncoder -import keras -from keras.layers import Concatenate, Dense, Input -from keras.models import Model from scikeras.wrappers import BaseWrapper, KerasClassifier, KerasRegressor diff --git a/tests/test_parameters.py b/tests/test_parameters.py index c8aa411b..ad341f39 100644 --- a/tests/test_parameters.py +++ b/tests/test_parameters.py @@ -1,13 +1,14 @@ import os from unittest import mock +import keras.backend import numpy as np import pytest +from keras import Sequential +from keras import layers as layers_mod from sklearn.base import clone from sklearn.datasets import make_classification from sklearn.preprocessing import FunctionTransformer -from keras import Sequential -from keras import layers as layers_mod from scikeras.wrappers import KerasClassifier, KerasRegressor @@ -117,44 +118,49 @@ def test_sample_weights_fit(): """Checks that the `sample_weight` parameter when passed to `fit` has the intended effect. """ - # build estimator - estimator = KerasClassifier( - model=dynamic_classifier, - model__hidden_layer_sizes=(100,), - epochs=10, - random_state=0, - ) - estimator1 = clone(estimator) - estimator2 = clone(estimator) + with keras.backend.set_floatx("float64"): + # build estimator + estimator = KerasClassifier( + model=dynamic_classifier, + model__hidden_layer_sizes=(100,), + epochs=10, + random_state=0, + ) + estimator1 = clone(estimator) + estimator2 = clone(estimator) - # we create 20 points - X = np.array([1] * 10000).reshape(-1, 1) - y = [1] * 5000 + [-1] * 5000 + # we create 20 points + X = np.array([1] * 10000).reshape(-1, 1) + y = [1] * 5000 + [-1] * 5000 - # heavily weight towards y=1 points - sw_first_class = [0.8] * 5000 + [0.2] * 5000 - # train estimator 1 with weights - estimator1.fit(X, y, sample_weight=sw_first_class) - # train estimator 2 without weights - estimator2.fit(X, y) - # estimator1 should tilt towards y=1 - # estimator2 should predict about equally - average_diff_pred_prob_1 = np.average(np.diff(estimator1.predict_proba(X), axis=1)) - average_diff_pred_prob_2 = np.average(np.diff(estimator2.predict_proba(X), axis=1)) - assert average_diff_pred_prob_2 < average_diff_pred_prob_1 - - # equal weighting - sw_equal = [0.5] * 5000 + [0.5] * 5000 - # train estimator 1 with weights - estimator1.fit(X, y, sample_weight=sw_equal) - # train estimator 2 without weights - estimator2.fit(X, y) - # both estimators should have about the same predictions - np.testing.assert_allclose( - actual=estimator1.predict_proba(X), - desired=estimator2.predict_proba(X), - rtol=1e-4, - ) + # heavily weight towards y=1 points + sw_first_class = [0.8] * 5000 + [0.2] * 5000 + # train estimator 1 with weights + estimator1.fit(X, y, sample_weight=sw_first_class) + # train estimator 2 without weights + estimator2.fit(X, y) + # estimator1 should tilt towards y=1 + # estimator2 should predict about equally + average_diff_pred_prob_1 = np.average( + np.diff(estimator1.predict_proba(X), axis=1) + ) + average_diff_pred_prob_2 = np.average( + np.diff(estimator2.predict_proba(X), axis=1) + ) + assert average_diff_pred_prob_2 < average_diff_pred_prob_1 + + # equal weighting + sw_equal = [0.5] * 5000 + [0.5] * 5000 + # train estimator 1 with weights + estimator1.fit(X, y, sample_weight=sw_equal) + # train estimator 2 without weights + estimator2.fit(X, y) + # both estimators should have about the same predictions + np.testing.assert_allclose( + actual=estimator1.predict_proba(X), + desired=estimator2.predict_proba(X), + rtol=1e-4, + ) def test_sample_weights_score(): @@ -271,7 +277,9 @@ def test_kwargs(wrapper, builder): kwarg_epochs = ( 2 # epochs is a special case for fit since SciKeras also uses it internally ) - extra_kwargs = {"verbose": True} # chosen because it is not a SciKeras hardcoded param + extra_kwargs = { + "verbose": True + } # chosen because it is not a SciKeras hardcoded param est = wrapper( model=builder, model__hidden_layer_sizes=(100,), @@ -313,7 +321,7 @@ def test_kwargs(wrapper, builder): # check that params were restored and extra_kwargs were not stored for param_name in ("batch_size", "fit__batch_size", "predict__batch_size"): assert getattr(est, param_name) == original_batch_size - assert est.verbose == False + assert est.verbose is False @pytest.mark.parametrize("kwargs", ({"epochs": 1}, {"initial_epoch": 1})) diff --git a/tests/test_scikit_learn_checks.py b/tests/test_scikit_learn_checks.py index b320b4ea..f685cb48 100644 --- a/tests/test_scikit_learn_checks.py +++ b/tests/test_scikit_learn_checks.py @@ -4,10 +4,10 @@ from typing import Any, Dict import pytest -from sklearn.datasets import load_iris -from sklearn.utils.estimator_checks import check_no_attributes_set_in_init from keras import Model, Sequential, layers from keras.backend import floatx, set_floatx +from sklearn.datasets import load_iris +from sklearn.utils.estimator_checks import check_no_attributes_set_in_init from scikeras.wrappers import KerasClassifier, KerasRegressor @@ -95,7 +95,7 @@ def test_fully_compliant_estimators_high_precision(estimator, check): pytest.skip( "This test is run as part of test_fully_compliant_estimators_low_precision." ) - with use_floatx("float64"): + with set_floatx("float64"): check(estimator) diff --git a/tests/test_serialization.py b/tests/test_serialization.py index e8a9bfe9..5cde664a 100644 --- a/tests/test_serialization.py +++ b/tests/test_serialization.py @@ -1,16 +1,16 @@ import pickle from typing import Any, Dict, Type +import keras +import keras.metrics +import keras.saving import numpy as np import pytest import tensorflow as tf -from sklearn.base import clone -from sklearn.datasets import fetch_california_housing, make_regression -import keras -import keras.saving -import keras.metrics from keras.layers import Dense, Input from keras.models import Model +from sklearn.base import clone +from sklearn.datasets import fetch_california_housing, make_regression from scikeras.wrappers import KerasRegressor @@ -244,11 +244,11 @@ def test_pickle_optimizer(opt_cls: Type[keras.optimizers.Optimizer]): opt.build([var1]) - grad1 = var1 ** 2 / 2.0 + grad1 = var1**2 / 2.0 opt.apply([grad1]) - grad2 = var1 ** 2 / 1.0 + grad2 = var1**2 / 1.0 opt.apply([grad2]) val_no_pickle = var1.numpy() @@ -258,13 +258,13 @@ def test_pickle_optimizer(opt_cls: Type[keras.optimizers.Optimizer]): opt.build([var1]) - grad1 = var1 ** 2 / 2.0 + grad1 = var1**2 / 2.0 opt.apply([grad1]) opt = pickle.loads(pickle.dumps(opt)) - grad2 = var1 ** 2 / 1.0 + grad2 = var1**2 / 1.0 opt.apply([grad2]) val_pickle = var1.numpy() diff --git a/tests/testing_utils.py b/tests/testing_utils.py index 69f884b3..0c81e605 100644 --- a/tests/testing_utils.py +++ b/tests/testing_utils.py @@ -7,6 +7,8 @@ parametrize_with_checks as _parametrize_with_checks, ) +from scikeras.wrappers import BaseWrapper + def basic_checks(estimator, loader): """Run basic checks (fit, score, pickle) on estimator.""" @@ -60,3 +62,9 @@ def parametrize_with_checks(estimators): ids = partial(_get_check_estimator_ids, estimator_ids=estimator_ids) return pytest.mark.parametrize("estimator, check", checks_generator, ids=ids) + + +def get_metric_names(estimator: BaseWrapper) -> list[str]: + """Get the names of the metrics used by the estimator.""" + # metrics[1] is a CompileMetrics which contains the user defined metrics + return [metric.name for metric in estimator.model_.metrics[1].metrics]