Skip to content

Commit

Permalink
Fix: Always read accessor data as little-endian
Browse files Browse the repository at this point in the history
The glTF spec requires buffer data to be little endian, which the accessor tools did not respect up until now. Also fixes some strict aliasing violations
  • Loading branch information
spnda committed Jan 29, 2024
1 parent 0272e59 commit 65e49c2
Show file tree
Hide file tree
Showing 3 changed files with 91 additions and 31 deletions.
101 changes: 70 additions & 31 deletions include/fastgltf/tools.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,45 @@ concept Element = std::is_arithmetic_v<typename ElementTraits<ElementType>::comp

namespace internal {

template <typename DestType, typename SourceType>
/**
* This function deserializes some N bytes in little endian order (as required by the glTF spec)
* into the given arithmetic type T in a standard-conforming fashion.
*
* This uses bit-shifts and ORs to correctly convert the bytes to avoid violating the strict aliasing
* rule as if we would just use T*.
*/
template <typename T>
T deserializeComponent(const std::byte* bytes, std::size_t index) {
static_assert(std::is_integral_v<T> && !std::is_same_v<T, bool>, "Component deserialization is only supported on basic arithmetic types.");
T ret = 0;
// Turns out that on some systems a byte is not 8-bit so this sizeof is not technically correct.
for (std::size_t i = 0; i < sizeof(T); ++i) {
ret |= (static_cast<T>(bytes[i + index * sizeof(T)]) << i * 8);
}
return ret;
}

template<>
float deserializeComponent<float>(const std::byte* bytes, std::size_t index) {
static_assert(std::numeric_limits<float>::is_iec559 &&
std::numeric_limits<float>::radix == 2 &&
std::numeric_limits<float>::digits == 24 &&
std::numeric_limits<float>::max_exponent == 128,
"Float deserialization is only supported on IEE754 platforms");
return bit_cast<float>(deserializeComponent<std::uint32_t>(bytes, index));
}

template<>
double deserializeComponent<double>(const std::byte* bytes, std::size_t index) {
static_assert(std::numeric_limits<double>::is_iec559 &&
std::numeric_limits<double>::radix == 2 &&
std::numeric_limits<double>::digits == 53 &&
std::numeric_limits<double>::max_exponent == 1024,
"Float deserialization is only supported on IEE754 platforms");
return bit_cast<double>(deserializeComponent<std::uint64_t>(bytes, index));
}

template <typename DestType, typename SourceType>
constexpr DestType convertComponent(const SourceType& source, bool normalized) {
if (normalized) {
if constexpr (std::is_floating_point_v<SourceType> && std::is_integral_v<DestType>) {
Expand All @@ -150,7 +188,7 @@ constexpr DestType convertComponent(const SourceType& source, bool normalized) {

template <typename SourceType, typename DestType, std::size_t Index>
constexpr DestType convertComponent(const std::byte* bytes, bool normalized) {
return convertComponent<DestType>(reinterpret_cast<const SourceType*>(bytes)[Index], normalized);
return convertComponent<DestType>(deserializeComponent<SourceType>(bytes, Index), normalized);
}

template <typename ElementType, typename SourceType, std::size_t... I>
Expand Down Expand Up @@ -199,9 +237,8 @@ ElementType getAccessorElementAt(ComponentType componentType, const std::byte* b

// Performs a binary search for the index into the sparse index list whose value matches the desired index
template <typename ElementType>
bool findSparseIndex(const std::byte* bytes, std::size_t indexCount, std::size_t desiredIndex,
bool findSparseIndex(const std::byte* indices, std::size_t indexCount, std::size_t desiredIndex,
std::size_t& resultIndex) {
auto* elements = reinterpret_cast<const ElementType*>(bytes);
auto count = indexCount;

resultIndex = 0;
Expand All @@ -210,15 +247,15 @@ bool findSparseIndex(const std::byte* bytes, std::size_t indexCount, std::size_t
auto step = count / 2;
auto index = resultIndex + step;

if (elements[index] < static_cast<ElementType>(desiredIndex)) {
if (deserializeComponent<ElementType>(indices, index) < static_cast<ElementType>(desiredIndex)) {
resultIndex = index + 1;
count -= step + 1;
} else {
count = step;
}
}

return resultIndex < indexCount && elements[resultIndex] == static_cast<ElementType>(desiredIndex);
return resultIndex < indexCount && deserializeComponent<ElementType>(indices, resultIndex) == static_cast<ElementType>(desiredIndex);
}

// Finds the index of the nearest sparse index to the desired index
Expand Down Expand Up @@ -249,13 +286,13 @@ inline bool findSparseIndex(ComponentType componentType, const std::byte* bytes,
} // namespace internal

struct DefaultBufferDataAdapter {
const std::byte* operator()(const Buffer& buffer) const {
auto operator()(const Buffer& buffer) const {
return std::visit(visitor {
[](auto&) -> const std::byte* {
return nullptr;
return {};
},
[&](const sources::Vector& vec) {
return reinterpret_cast<const std::byte*>(vec.bytes.data());
return reinterpret_cast<const std::byte*>(vec.bytes.data());
},
[&](const sources::ByteView& bv) {
return bv.bytes.data();
Expand Down Expand Up @@ -373,8 +410,8 @@ class IterableAccessor {
const auto& view = asset.bufferViews[*accessor.bufferViewIndex];
stride = view.byteStride ? *view.byteStride : getElementByteSize(accessor.type, accessor.componentType);

bufferBytes = adapter(asset.buffers[view.bufferIndex]);
bufferBytes += view.byteOffset + accessor.byteOffset;
bufferBytes = adapter(asset.buffers[view.bufferIndex])
+ view.byteOffset + accessor.byteOffset;

if (accessor.sparse.has_value()) {
const auto& indicesView = asset.bufferViews[accessor.sparse->indicesBufferView];
Expand Down Expand Up @@ -433,7 +470,7 @@ ElementType getAccessorElement(const Asset& asset, const Accessor& accessor, siz
if (internal::findSparseIndex(accessor.sparse->indexComponentType, indicesBytes, accessor.sparse->count,
index, sparseIndex)) {
return internal::getAccessorElementAt<ElementType>(accessor.componentType,
valuesBytes + valueStride * sparseIndex,
&valuesBytes[valueStride * sparseIndex],
accessor.normalized);
}
}
Expand All @@ -450,12 +487,13 @@ ElementType getAccessorElement(const Asset& asset, const Accessor& accessor, siz
}

const auto& view = asset.bufferViews[*accessor.bufferViewIndex];
auto stride = view.byteStride ? *view.byteStride : getElementByteSize(accessor.type, accessor.componentType);
auto stride = view.byteStride.value_or(getElementByteSize(accessor.type, accessor.componentType));

auto* bytes = adapter(asset.buffers[view.bufferIndex]);
bytes += view.byteOffset + accessor.byteOffset;
auto* bytes = adapter(asset.buffers[view.bufferIndex])
+ view.byteOffset + accessor.byteOffset;

return internal::getAccessorElementAt<ElementType>(accessor.componentType, bytes + index * stride, accessor.normalized);
return internal::getAccessorElementAt<ElementType>(
accessor.componentType, &bytes[index * stride], accessor.normalized);
}

template<typename ElementType, typename BufferDataAdapter = DefaultBufferDataAdapter>
Expand Down Expand Up @@ -504,10 +542,10 @@ void iterateAccessor(const Asset& asset, const Accessor& accessor, Functor&& fun
// property or extensions MAY override zeros with actual values.
if (accessor.bufferViewIndex) {
auto& view = asset.bufferViews[*accessor.bufferViewIndex];
srcBytes = adapter(asset.buffers[view.bufferIndex]) + view.byteOffset + accessor.byteOffset;
srcStride = view.byteStride ? *view.byteStride
: getElementByteSize(accessor.type, accessor.componentType);
}
srcBytes = adapter(asset.buffers[view.bufferIndex])
+ view.byteOffset + accessor.byteOffset;
srcStride = view.byteStride.value_or(getElementByteSize(accessor.type, accessor.componentType));
}

auto nextSparseIndex = internal::getAccessorElementAt<std::uint32_t>(
accessor.sparse->indexComponentType, indicesBytes);
Expand All @@ -516,18 +554,18 @@ void iterateAccessor(const Asset& asset, const Accessor& accessor, Functor&& fun
for (std::size_t i = 0; i < accessor.count; ++i) {
if (i == nextSparseIndex) {
func(internal::getAccessorElementAt<ElementType>(accessor.componentType,
valuesBytes + valueStride * sparseIndexCount,
&valuesBytes[valueStride * sparseIndexCount],
accessor.normalized));

++sparseIndexCount;

if (sparseIndexCount < accessor.sparse->count) {
nextSparseIndex = internal::getAccessorElementAt<std::uint32_t>(
accessor.sparse->indexComponentType, indicesBytes + indexStride * sparseIndexCount);
accessor.sparse->indexComponentType, &indicesBytes[indexStride * sparseIndexCount]);
}
} else if (accessor.bufferViewIndex) {
func(internal::getAccessorElementAt<ElementType>(accessor.componentType,
srcBytes + srcStride * i,
&srcBytes[srcStride * i],
accessor.normalized));
} else {
func(ElementType{});
Expand All @@ -547,13 +585,14 @@ void iterateAccessor(const Asset& asset, const Accessor& accessor, Functor&& fun
}
else {
auto& view = asset.bufferViews[*accessor.bufferViewIndex];
auto stride = view.byteStride ? *view.byteStride : getElementByteSize(accessor.type, accessor.componentType);
auto stride = view.byteStride.value_or(getElementByteSize(accessor.type, accessor.componentType));

auto* bytes = adapter(asset.buffers[view.bufferIndex]);
bytes += view.byteOffset + accessor.byteOffset;
auto* bytes = adapter(asset.buffers[view.bufferIndex])
+ view.byteOffset + accessor.byteOffset;

for (std::size_t i = 0; i < accessor.count; ++i) {
func(internal::getAccessorElementAt<ElementType>(accessor.componentType, bytes + i * stride, accessor.normalized));
func(internal::getAccessorElementAt<ElementType>(
accessor.componentType, &bytes[i * stride], accessor.normalized));
}
}
}
Expand Down Expand Up @@ -632,20 +671,20 @@ void copyFromAccessor(const Asset& asset, const Accessor& accessor, void* dest,

auto* srcBytes = adapter(asset.buffers[view.bufferIndex]) + view.byteOffset + accessor.byteOffset;

// We have to perform normalization if the accessor is marked as containing normalized data, which is why
// we can't just memcpy then.
// If the data is normalized or the component/accessor type is different, we have to convert each element and can't memcpy.
if (std::is_trivially_copyable_v<ElementType> && !accessor.normalized && accessor.componentType == Traits::enum_component_type) {
if (srcStride == elemSize && srcStride == TargetStride) {
std::memcpy(dest, srcBytes, elemSize * accessor.count);
} else {
for (std::size_t i = 0; i < accessor.count; ++i) {
std::memcpy(dstBytes + TargetStride * i, srcBytes + srcStride * i, elemSize);
std::memcpy(dstBytes + TargetStride * i, &srcBytes[srcStride * i], elemSize);
}
}
} else {
for (std::size_t i = 0; i < accessor.count; ++i) {
auto* pDest = reinterpret_cast<ElementType*>(dstBytes + TargetStride * i);
*pDest = internal::getAccessorElementAt<ElementType>(accessor.componentType, srcBytes + srcStride * i);
*pDest = internal::getAccessorElementAt<ElementType>(
accessor.componentType, &srcBytes[srcStride * i]);
}
}
}
Expand Down
12 changes: 12 additions & 0 deletions include/fastgltf/types.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -1229,6 +1229,10 @@ namespace fastgltf {
template <typename Iterator>
explicit constexpr span(Iterator first, size_type count) : _ptr(first), _size(count) {}

#if FASTGLTF_CPP_20
constexpr span(std::span<T> data) : _ptr(data.data()), _size(data.size()) {}
#endif

constexpr span(const span& other) noexcept = default;
constexpr span& operator=(const span& other) noexcept = default;

Expand Down Expand Up @@ -1256,6 +1260,14 @@ namespace fastgltf {
return span(_ptr, count);
}

[[nodiscard]] constexpr span<T, Extent> last(size_type count) const {
return span(&data()[size() - count], count);
}

[[nodiscard]] constexpr span<T, Extent> subspan(size_type offset, size_type count = dynamic_extent) const {
return span(&data()[offset], count == dynamic_extent ? size() - offset : count);
}

#if FASTGLTF_CPP_20
operator std::span<T>() const {
return std::span(data(), size());
Expand Down
9 changes: 9 additions & 0 deletions tests/accessor_tests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,15 @@ TEST_CASE("Test data type conversion", "[gltf-tools]") {
}
}

TEST_CASE("Test little-endian correctness", "[gltf-tools]") {
// The test here is merely to verify that the internal deserialization functions correctly treat
// the input bytes as little-endian, regardless of system endianness.
// This test is effectively useless on little endian systems, but it should still make sense to keep it.
std::array<std::byte, 4> integer {{ std::byte(0x0A), std::byte(0x0B), std::byte(0x0C), std::byte(0x0D) }};
auto deserialized = fastgltf::internal::deserializeComponent<std::uint32_t>(integer.data(), 0);
REQUIRE(deserialized == 0x0D0C0B0A);
}

TEST_CASE("Test accessor", "[gltf-tools]") {
auto lightsLamp = sampleModels / "2.0" / "LightsPunctualLamp" / "glTF";
fastgltf::GltfDataBuffer jsonData;
Expand Down

0 comments on commit 65e49c2

Please sign in to comment.