We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
From Github Actions logs:
=================================== FAILURES =================================== _______________________ test_tensorflow_functional[32-2] _______________________ batch_size = 32, shuffle_config = 2 data = {'X': array([[-1.00724718, -0.92444024, -1.1659146 ], [-1.0525093 , -1.15150253, -1.13007914], [-1.09[831](https://github.com/cleanlab/cleanlab/actions/runs/7730565156/job/21076241756#step:8:832)...4693, 1.15885651, 0.84745434]]), 'error_indices': [47, 48, 49, 50, 51, 52], 'num_classes': 2, 'num_features': 3, ...} hidden_units = 64 @pytest.mark.slow @pytest.mark.skipif("not python_version_ok()", reason="need at least python 3.7") @pytest.mark.parametrize("batch_size,shuffle_config", [(1, 0), (32, 0), (32, 1), (32, 2)]) def test_tensorflow_functional(batch_size, shuffle_config, data=DATA, hidden_units=64): dataset_tf = tf.data.Dataset.from_tensor_slices((data["X"], data["y"])) if shuffle_config == 0: # proper shuffling for SGD dataset_shuffled = dataset_tf.shuffle(buffer_size=len(data["X"])) elif shuffle_config == 1: # shuffling for datasets that don't fit in memory dataset_shuffled = dataset_tf.shuffle(buffer_size=60) else: dataset_shuffled = dataset_tf # no shuffling dataset_og_order = dataset_tf.batch(batch_size) dataset_tf = dataset_shuffled.batch(batch_size) def make_model(num_features, num_classes): inputs = tf.keras.Input(shape=(num_features,)) x = tf.keras.layers.Dense(hidden_units, activation="relu")(inputs) outputs = tf.keras.layers.Dense(num_classes)(x) model = tf.keras.Model(inputs=inputs, outputs=outputs, name="test_model") return model model = KerasWrapperModel( make_model, model_kwargs={"num_features": data["num_features"], "num_classes": data["num_classes"]}, ) model.summary() # Test base model works: model.fit( X=dataset_tf, y=data["y"], epochs=2, ) preds_base = model.predict_proba(dataset_tf) # Test CleanLearning performs well: cl = CleanLearning(model) cl.fit(dataset_tf, data["y"], clf_kwargs={"epochs": 10}, clf_final_kwargs={"epochs": 15}) preds = cl.predict(dataset_og_order) err = np.sum(preds != data["y_og"]) / len(data["y_og"]) issue_indices = list(cl.label_issues_df[cl.label_issues_df["is_label_issue"]].index.values) > assert len(set(issue_indices) & set(data["error_indices"])) != 0 E assert 0 != 0 E + where 0 = len((set() & {47, 48, 49, 50, 51, 52})) E + where set() = set([]) E + and {47, 48, 49, 50, 51, 52} = set([47, 48, 49, 50, 51, 52])
Capping pytest~=7.4 seems to resolve the issue. Done in #975. This version cap should be reverted asap.
The text was updated successfully, but these errors were encountered:
No branches or pull requests
Stack trace
From Github Actions logs:
Steps to reproduce
Additional information
Capping pytest~=7.4 seems to resolve the issue. Done in #975.
This version cap should be reverted asap.
The text was updated successfully, but these errors were encountered: