Skip to content

chore: fix typos in the tests directory #36813

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 28 commits into from
Mar 21, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
4565f3e
chore: fix typos in the tests
threewebcode Mar 19, 2025
7387bd0
chore: fix typos in the tests
threewebcode Mar 19, 2025
28e468f
chore: fix typos in the tests
threewebcode Mar 19, 2025
544e215
chore: fix typos in the tests
threewebcode Mar 19, 2025
4746b93
chore: fix typos in the tests
threewebcode Mar 19, 2025
53e56fa
chore: fix typos in the tests
threewebcode Mar 19, 2025
920373c
chore: fix typos in the tests
threewebcode Mar 19, 2025
2cd0da3
chore: fix typos in the tests
threewebcode Mar 19, 2025
f355e8c
chore: fix typos in the tests
threewebcode Mar 19, 2025
28adcea
chore: fix typos in the tests
threewebcode Mar 19, 2025
ad3ece9
chore: fix typos in the tests
threewebcode Mar 19, 2025
60af5ff
chore: fix typos in the tests
threewebcode Mar 19, 2025
0ce13a8
chore: fix typos in the tests
threewebcode Mar 19, 2025
2d21ed4
fix: format codes
threewebcode Mar 19, 2025
284bff2
chore: fix copy mismatch issue
threewebcode Mar 19, 2025
6290f0a
fix: format codes
threewebcode Mar 19, 2025
0ec4592
chore: fix copy mismatch issue
threewebcode Mar 19, 2025
1071ea2
chore: fix copy mismatch issue
threewebcode Mar 19, 2025
c88f102
chore: fix copy mismatch issue
threewebcode Mar 19, 2025
e21ad33
Merge branch 'main' into develop
threewebcode Mar 19, 2025
730620a
chore: restore previous words
threewebcode Mar 19, 2025
081dcc1
Merge branch 'main' into develop
threewebcode Mar 19, 2025
edf47a9
Merge branch 'main' into develop
threewebcode Mar 19, 2025
7363a13
Merge branch 'main' into develop
threewebcode Mar 19, 2025
4e987a1
Merge branch 'main' into develop
threewebcode Mar 19, 2025
5a809f7
Merge branch 'main' into develop
threewebcode Mar 20, 2025
ab9108b
Merge branch 'main' into develop
threewebcode Mar 20, 2025
8674ff0
chore: revert unexpected changes
threewebcode Mar 20, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion tests/deepspeed/test_deepspeed.py
Original file line number Diff line number Diff line change
Expand Up @@ -628,7 +628,7 @@ def model_init():
with CaptureStd() as cs:
trainer.hyperparameter_search(direction="maximize", n_trials=n_trials)
self.assertIn("DeepSpeed info", cl.out, "expected DeepSpeed logger output but got none")
self.assertIn(f"Trial {n_trials-1} finished with value", cs.err, "expected hyperparameter_search output")
self.assertIn(f"Trial {n_trials - 1} finished with value", cs.err, "expected hyperparameter_search output")
self.assertIn("Best is trial", cs.err, "expected hyperparameter_search output")

# --- These tests need to run on both zero stages --- #
Expand Down
4 changes: 2 additions & 2 deletions tests/models/bart/test_modeling_tf_bart.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def prepare_config_and_inputs_for_common(self):
clip_value_min=self.eos_token_id + 1,
clip_value_max=self.vocab_size + 1,
)
# Explicity add "end of sequence" to the inputs
# Explicitly add "end of sequence" to the inputs
eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1)
input_ids = tf.concat([input_ids, eos_tensor], axis=1)

Expand Down Expand Up @@ -225,7 +225,7 @@ def test_decoder_model_past_large_inputs(self):
self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs)

# TODO (Joao): fix me
@unittest.skip("Onnx compliancy broke with TF 2.10")
@unittest.skip("Onnx compliance broke with TF 2.10")
def test_onnx_compliancy(self):
pass

Expand Down
2 changes: 1 addition & 1 deletion tests/models/bert/test_modeling_tf_bert.py
Original file line number Diff line number Diff line change
Expand Up @@ -735,7 +735,7 @@ def test_custom_load_tf_weights(self):
self.assertTrue(layer.split("_")[0] in ["dropout", "classifier"])

# TODO (Joao): fix me
@unittest.skip("Onnx compliancy broke with TF 2.10")
@unittest.skip("Onnx compliance broke with TF 2.10")
def test_onnx_compliancy(self):
pass

Expand Down
12 changes: 6 additions & 6 deletions tests/models/blip/test_modeling_blip.py
Original file line number Diff line number Diff line change
Expand Up @@ -474,7 +474,7 @@ def test_retain_grad_hidden_states_attentions(self):
def test_model_get_set_embeddings(self):
pass

# override as the `logit_scale` parameter initilization is different for Blip
# override as the `logit_scale` parameter initialization is different for Blip
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

Expand All @@ -483,7 +483,7 @@ def test_initialization(self):
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
# check if `logit_scale` is initilized as per the original implementation
# check if `logit_scale` is initialized as per the original implementation
if name == "logit_scale":
self.assertAlmostEqual(
param.data.item(),
Expand Down Expand Up @@ -988,7 +988,7 @@ def test_training_gradient_checkpointing_use_reentrant(self):
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass

# override as the `logit_scale` parameter initilization is different for Blip
# override as the `logit_scale` parameter initialization is different for Blip
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

Expand All @@ -997,7 +997,7 @@ def test_initialization(self):
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
# check if `logit_scale` is initilized as per the original implementation
# check if `logit_scale` is initialized as per the original implementation
if name == "logit_scale":
self.assertAlmostEqual(
param.data.item(),
Expand Down Expand Up @@ -1206,7 +1206,7 @@ def test_training_gradient_checkpointing(self):
loss = model(**inputs).loss
loss.backward()

# override as the `logit_scale` parameter initilization is different for Blip
# override as the `logit_scale` parameter initialization is different for Blip
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

Expand All @@ -1215,7 +1215,7 @@ def test_initialization(self):
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
# check if `logit_scale` is initilized as per the original implementation
# check if `logit_scale` is initialized as per the original implementation
if name == "logit_scale":
self.assertAlmostEqual(
param.data.item(),
Expand Down
6 changes: 3 additions & 3 deletions tests/models/blip_2/test_modeling_blip_2.py
Original file line number Diff line number Diff line change
Expand Up @@ -521,7 +521,7 @@ def test_save_load_fast_init_to_base(self):
def test_sdpa_can_dispatch_composite_models(self):
"""
Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model.
This tests only by looking at layer names, as usually SDPA layers are calles "SDPAAttention".
This tests only by looking at layer names, as usually SDPA layers are called "SDPAAttention".
In contrast to the above test, this one checks if the "config._attn_implamentation" is a dict after the model
is loaded, because we manually replicate requested attn implementation on each sub-config when loading.
See https://github.com/huggingface/transformers/pull/32238 for more info
Expand Down Expand Up @@ -970,7 +970,7 @@ def test_cpu_offload(self):
def test_sdpa_can_dispatch_composite_models(self):
"""
Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model.
This tests only by looking at layer names, as usually SDPA layers are calles "SDPAAttention".
This tests only by looking at layer names, as usually SDPA layers are called "SDPAAttention".
In contrast to the above test, this one checks if the "config._attn_implamentation" is a dict after the model
is loaded, because we manually replicate requested attn implementation on each sub-config when loading.
See https://github.com/huggingface/transformers/pull/32238 for more info
Expand Down Expand Up @@ -1647,7 +1647,7 @@ def test_initialization(self):
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
# check if `logit_scale` is initilized as per the original implementation
# check if `logit_scale` is initialized as per the original implementation
if name == "logit_scale":
self.assertAlmostEqual(
param.data.item(),
Expand Down
2 changes: 1 addition & 1 deletion tests/models/bloom/test_tokenization_bloom.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ def test_encodings_from_xnli_dataset(self):
@require_jinja
def test_tokenization_for_chat(self):
tokenizer = self.get_rust_tokenizer()
tokenizer.chat_template = "{% for message in messages %}" "{{ message.content }}{{ eos_token }}" "{% endfor %}"
tokenizer.chat_template = "{% for message in messages %}{{ message.content }}{{ eos_token }}{% endfor %}"
test_chats = [
[{"role": "system", "content": "You are a helpful chatbot."}, {"role": "user", "content": "Hello!"}],
[
Expand Down
2 changes: 1 addition & 1 deletion tests/models/canine/test_tokenization_canine.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def test_prepare_batch_integration(self):
@require_torch
def test_encoding_keys(self):
tokenizer = self.canine_tokenizer
src_text = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
src_text = ["Once there was a man.", "He wrote a test in HuggingFace Transformers."]
batch = tokenizer(src_text, padding=True, return_tensors="pt")
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids", batch)
Expand Down
4 changes: 2 additions & 2 deletions tests/models/chinese_clip/test_modeling_chinese_clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -596,7 +596,7 @@ def test_retain_grad_hidden_states_attentions(self):
def test_model_get_set_embeddings(self):
pass

# override as the `logit_scale` parameter initilization is different for CHINESE_CLIP
# override as the `logit_scale` parameter initialization is different for CHINESE_CLIP
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

Expand All @@ -608,7 +608,7 @@ def test_initialization(self):
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
# check if `logit_scale` is initilized as per the original implementation
# check if `logit_scale` is initialized as per the original implementation
if name == "logit_scale":
self.assertAlmostEqual(
param.data.item(),
Expand Down
4 changes: 2 additions & 2 deletions tests/models/clap/test_modeling_clap.py
Original file line number Diff line number Diff line change
Expand Up @@ -543,7 +543,7 @@ def test_retain_grad_hidden_states_attentions(self):
def test_model_get_set_embeddings(self):
pass

# override as the `logit_scale` parameter initilization is different for CLAP
# override as the `logit_scale` parameter initialization is different for CLAP
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

Expand All @@ -552,7 +552,7 @@ def test_initialization(self):
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
# check if `logit_scale` is initilized as per the original implementation
# check if `logit_scale` is initialized as per the original implementation
if name == "logit_scale":
self.assertAlmostEqual(
param.data.item(),
Expand Down
4 changes: 2 additions & 2 deletions tests/models/clip/test_modeling_clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -761,7 +761,7 @@ def test_retain_grad_hidden_states_attentions(self):
def test_model_get_set_embeddings(self):
pass

# override as the `logit_scale` parameter initilization is different for CLIP
# override as the `logit_scale` parameter initialization is different for CLIP
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

Expand All @@ -770,7 +770,7 @@ def test_initialization(self):
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
# check if `logit_scale` is initilized as per the original implementation
# check if `logit_scale` is initialized as per the original implementation
if name == "logit_scale":
self.assertAlmostEqual(
param.data.item(),
Expand Down
2 changes: 1 addition & 1 deletion tests/models/clipseg/test_modeling_clipseg.py
Original file line number Diff line number Diff line change
Expand Up @@ -519,7 +519,7 @@ def test_initialization(self):
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
# check if `logit_scale` is initilized as per the original implementation
# check if `logit_scale` is initialized as per the original implementation
if "logit_scale" in name:
self.assertAlmostEqual(
param.data.item(),
Expand Down
4 changes: 2 additions & 2 deletions tests/models/clvp/test_modeling_clvp.py
Original file line number Diff line number Diff line change
Expand Up @@ -500,7 +500,7 @@ def test_inputs_embeds(self):
def test_model_get_set_embeddings(self):
pass

# override as the `logit_scale` parameter initilization is different for Clvp
# override as the `logit_scale` parameter initialization is different for Clvp
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

Expand All @@ -509,7 +509,7 @@ def test_initialization(self):
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
# check if `logit_scale` is initilized as per the original implementation
# check if `logit_scale` is initialized as per the original implementation
if name == "logit_scale":
expected_value = np.log(1 / 0.07)
returned_value = param.data.item()
Expand Down
12 changes: 6 additions & 6 deletions tests/models/conditional_detr/test_modeling_conditional_detr.py
Original file line number Diff line number Diff line change
Expand Up @@ -465,13 +465,13 @@ def test_different_timm_backbone(self):
self.model_tester.num_labels,
)
self.assertEqual(outputs.logits.shape, expected_shape)
# Confirm out_indices was propogated to backbone
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.model.backbone.conv_encoder.intermediate_channel_sizes), 3)
elif model_class.__name__ == "ConditionalDetrForSegmentation":
# Confirm out_indices was propogated to backbone
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.conditional_detr.model.backbone.conv_encoder.intermediate_channel_sizes), 3)
else:
# Confirm out_indices was propogated to backbone
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.backbone.conv_encoder.intermediate_channel_sizes), 3)

self.assertTrue(outputs)
Expand Down Expand Up @@ -501,13 +501,13 @@ def test_hf_backbone(self):
self.model_tester.num_labels,
)
self.assertEqual(outputs.logits.shape, expected_shape)
# Confirm out_indices was propogated to backbone
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.model.backbone.conv_encoder.intermediate_channel_sizes), 3)
elif model_class.__name__ == "ConditionalDetrForSegmentation":
# Confirm out_indices was propogated to backbone
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.conditional_detr.model.backbone.conv_encoder.intermediate_channel_sizes), 3)
else:
# Confirm out_indices was propogated to backbone
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.backbone.conv_encoder.intermediate_channel_sizes), 3)

self.assertTrue(outputs)
Expand Down
2 changes: 1 addition & 1 deletion tests/models/cpmant/test_modeling_cpmant.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ def test_inputs_embeds(self):
def test_retain_grad_hidden_states_attentions(self):
unittest.skip(
"CPMAnt doesn't support retain grad in hidden_states or attentions, because prompt management will peel off the output.hidden_states from graph.\
So is attentions. We strongly recommand you use loss to tune model."
So is attentions. We strongly recommend you use loss to tune model."
)(self.test_retain_grad_hidden_states_attentions)

def test_cpmant_model(self):
Expand Down
8 changes: 4 additions & 4 deletions tests/models/deformable_detr/test_modeling_deformable_detr.py
Original file line number Diff line number Diff line change
Expand Up @@ -542,10 +542,10 @@ def test_different_timm_backbone(self):
self.model_tester.num_labels,
)
self.assertEqual(outputs.logits.shape, expected_shape)
# Confirm out_indices was propogated to backbone
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.model.backbone.conv_encoder.intermediate_channel_sizes), 4)
else:
# Confirm out_indices was propogated to backbone
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.backbone.conv_encoder.intermediate_channel_sizes), 4)

self.assertTrue(outputs)
Expand Down Expand Up @@ -574,10 +574,10 @@ def test_hf_backbone(self):
self.model_tester.num_labels,
)
self.assertEqual(outputs.logits.shape, expected_shape)
# Confirm out_indices was propogated to backbone
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.model.backbone.conv_encoder.intermediate_channel_sizes), 4)
else:
# Confirm out_indices was propogated to backbone
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.backbone.conv_encoder.intermediate_channel_sizes), 4)

self.assertTrue(outputs)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ def _validate_backbone_init():
model.to(torch_device)
model.eval()

# Confirm out_indices propogated to backbone
# Confirm out_indices propagated to backbone
self.assertEqual(len(model.backbone.out_indices), 2)

config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
Expand Down
2 changes: 1 addition & 1 deletion tests/models/depth_pro/test_modeling_depth_pro.py
Original file line number Diff line number Diff line change
Expand Up @@ -323,7 +323,7 @@ def test_initialization(self):
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)

# this started when switched from normal initialization to kaiming_normal intialization
# this started when switched from normal initialization to kaiming_normal initialization
# maybe because the magnitude of offset values from ViT-encoders increases when followed by many convolution layers
def test_batching_equivalence(self, atol=1e-4, rtol=1e-4):
super().test_batching_equivalence(atol=atol, rtol=rtol)
Expand Down
12 changes: 6 additions & 6 deletions tests/models/detr/test_modeling_detr.py
Original file line number Diff line number Diff line change
Expand Up @@ -465,13 +465,13 @@ def test_different_timm_backbone(self):
self.model_tester.num_labels + 1,
)
self.assertEqual(outputs.logits.shape, expected_shape)
# Confirm out_indices was propogated to backbone
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.model.backbone.conv_encoder.intermediate_channel_sizes), 3)
elif model_class.__name__ == "DetrForSegmentation":
# Confirm out_indices was propogated to backbone
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.detr.model.backbone.conv_encoder.intermediate_channel_sizes), 3)
else:
# Confirm out_indices was propogated to backbone
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.backbone.conv_encoder.intermediate_channel_sizes), 3)

self.assertTrue(outputs)
Expand Down Expand Up @@ -500,13 +500,13 @@ def test_hf_backbone(self):
self.model_tester.num_labels + 1,
)
self.assertEqual(outputs.logits.shape, expected_shape)
# Confirm out_indices was propogated to backbone
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.model.backbone.conv_encoder.intermediate_channel_sizes), 3)
elif model_class.__name__ == "DetrForSegmentation":
# Confirm out_indices was propogated to backbone
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.detr.model.backbone.conv_encoder.intermediate_channel_sizes), 3)
else:
# Confirm out_indices was propogated to backbone
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.backbone.conv_encoder.intermediate_channel_sizes), 3)

self.assertTrue(outputs)
Expand Down
Loading