Skip to content

bump up pytorch bin to 0714 #12407

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jul 15, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .ci/docker/ci_commit_pins/pytorch.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
7cda4017ddda554752e89069ae205be5e8388f59
90f1e7bed15ca5e48c61c5b6dc5ad4810524f82f
4 changes: 2 additions & 2 deletions backends/xnnpack/test/ops/test_slice_copy.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def forward(self, x):

inputs = (torch.randn(1, 1, 3, 3),)
# Note that two of the slices are optimized away as they are identity.
self._test_slice_copy(ConvSlice(), inputs, 4, 2)
self._test_slice_copy(ConvSlice(), inputs, 2, 2)

def test_fp32_slice_copy_default_start(self):
"""
Expand Down Expand Up @@ -95,7 +95,7 @@ def forward(self, x):
(
Tester(module, inputs)
.export()
.check_count({"torch.ops.aten.slice.Tensor": 3})
.check_count({"torch.ops.aten.slice.Tensor": 1})
.to_edge_transform_and_lower()
.check_not(["torch.ops.higher_order.executorch_call_delegate"])
)
Expand Down
6 changes: 0 additions & 6 deletions devtools/inspector/tests/inspector_utils_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -610,9 +610,6 @@ def test_compare_intermediate_outputs_sequence_and_non_sequence(self):
with self.assertRaises(ValueError):
compare_intermediate_outputs(a, b, L1Comparator())

@unittest.skip(
"TODO: enable the test after required feature has been built in pytorch core nightly version"
)
def test_equip_debug_handle_to_export_program_success(self):
"""Test that propagate_back_debug_handle returns True and properly equips debug handles."""
# Create a test model
Expand Down Expand Up @@ -679,9 +676,6 @@ def test_equip_debug_handle_to_export_program_failure(self):
# Check that it returns False due to mismatch
self.assertFalse(result)

@unittest.skip(
"TODO: enable the test after required feature has been built in pytorch core nightly version"
)
def test_equip_debug_handle_to_export_program_op_to_be_removed_in_to_edge(self):
"""Test that propagate_back_debug_handle returns True and properly equips debug handles when an op is removed in to_edge"""

Expand Down
2 changes: 1 addition & 1 deletion install_requirements.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def python_is_compatible():
#
# NOTE: If you're changing, make the corresponding change in .ci/docker/ci_commit_pins/pytorch.txt
# by picking the hash from the same date in https://hud.pytorch.org/hud/pytorch/pytorch/nightly/
NIGHTLY_VERSION = "dev20250706"
NIGHTLY_VERSION = "dev20250714"


def install_requirements(use_pytorch_nightly):
Expand Down
13 changes: 3 additions & 10 deletions runtime/core/portable_type/c10/c10/util/BFloat16.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
// 1 bit for the sign, 8 bits for the exponent and 7 bits for the mantissa.

#include <c10/macros/Macros.h>
#include <c10/util/bit_cast.h>
#include <cmath>
#include <cstdint>
#include <cstring>
Expand Down Expand Up @@ -67,13 +68,7 @@ inline C10_HOST_DEVICE uint16_t round_to_nearest_even(float src) {
#endif
return UINT16_C(0x7FC0);
} else {
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
union {
uint32_t U32; // NOLINT(facebook-hte-BadMemberName)
float F32; // NOLINT(facebook-hte-BadMemberName)
};

F32 = src;
const uint32_t U32 = c10::bit_cast<uint32_t>(src);
uint32_t rounding_bias = ((U32 >> 16) & 1) + UINT32_C(0x7FFF);
return static_cast<uint16_t>((U32 + rounding_bias) >> 16);
}
Expand Down Expand Up @@ -111,9 +106,7 @@ struct alignas(2) BFloat16 {
#endif
};

C10_API inline std::ostream& operator<<(
std::ostream& out,
const BFloat16& value) {
inline std::ostream& operator<<(std::ostream& out, const BFloat16& value) {
out << (float)value;
return out;
}
Expand Down
2 changes: 1 addition & 1 deletion runtime/core/portable_type/c10/c10/util/Half.h
Original file line number Diff line number Diff line change
Expand Up @@ -414,7 +414,7 @@ struct alignas(2) Half {
#endif
};

C10_API inline std::ostream& operator<<(std::ostream& out, const Half& value) {
inline std::ostream& operator<<(std::ostream& out, const Half& value) {
out << (float)value;
return out;
}
Expand Down
4 changes: 3 additions & 1 deletion runtime/core/portable_type/c10/c10/util/bit_cast.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
#include <cstring>
#include <type_traits>

#include <c10/macros/Macros.h>

#if __has_include(<bit>) && (defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L)
#include <bit>
#define C10_HAVE_STD_BIT_CAST 1
Expand All @@ -23,7 +25,7 @@ using std::bit_cast;
// See https://en.cppreference.com/w/cpp/numeric/bit_cast for more
// information as well as the source of our implementations.
template <class To, class From>
std::enable_if_t<
C10_HOST_DEVICE std::enable_if_t<
sizeof(To) == sizeof(From) && std::is_trivially_copyable_v<From> &&
std::is_trivially_copyable_v<To>,
To>
Expand Down
Loading