Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support Location Providers #1452

Merged
merged 25 commits into from
Jan 10, 2025
Merged
Show file tree
Hide file tree
Changes from 23 commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
adfbd3c
Skeletal implementation
Dec 20, 2024
ea2b456
First attempt at hashing locations
Dec 20, 2024
ce5f0d5
Relocate to table submodule; code and comment improvements
Dec 20, 2024
d3e0c0f
Add unit tests
Dec 20, 2024
00917e9
Remove entropy check
Dec 20, 2024
c4e6be9
Merge branch 'main' into location-providers
smaheshwar-pltr Dec 20, 2024
bc2eab8
Nit: Prefer `self.table_properties`
Dec 20, 2024
9999cbb
Remove special character testing
Dec 21, 2024
23ef8f5
Add integration tests for writes
Dec 23, 2024
e47e18f
Move all `LocationProviders`-related code into locations.py
Jan 9, 2025
45391de
Nit: tiny for loop refactor
Jan 9, 2025
065bcbf
Fix typo
Jan 9, 2025
e5214d4
Object storage as default location provider
Jan 9, 2025
568af55
Update tests/integration/test_writes/test_partitioned_writes.py
smaheshwar-pltr Jan 9, 2025
e77af29
Test entropy in test_object_storage_injects_entropy
Jan 9, 2025
651aaea
Refactor integration tests to use properties and omit when default once
Jan 9, 2025
5bfa24b
Use a different table property for custom location provision
Jan 9, 2025
8cd46fa
write.location-provider.py-impl -> write.py-location-provider.impl
Jan 9, 2025
3dbb8d0
Merge branch 'main' into location-providers
Jan 10, 2025
e992c24
Make lint
Jan 10, 2025
f1e4a31
Move location provider loading into `write_file` for back-compat
Jan 10, 2025
46dd7ab
Make object storage no longer the default
Jan 10, 2025
490d08c
Merge branch 'main' into location-providers
Jan 10, 2025
3555932
Add test case for partitioned paths disabled but with no partition sp…
Jan 10, 2025
55d6c4f
Moved constants within ObjectStoreLocationProvider
Jan 10, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion pyiceberg/io/pyarrow.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,7 @@
visit,
visit_with_partner,
)
from pyiceberg.table.locations import load_location_provider
from pyiceberg.table.metadata import TableMetadata
from pyiceberg.table.name_mapping import NameMapping, apply_name_mapping
from pyiceberg.transforms import TruncateTransform
Expand Down Expand Up @@ -2305,6 +2306,7 @@ def write_file(io: FileIO, table_metadata: TableMetadata, tasks: Iterator[WriteT
property_name=TableProperties.PARQUET_ROW_GROUP_LIMIT,
default=TableProperties.PARQUET_ROW_GROUP_LIMIT_DEFAULT,
)
location_provider = load_location_provider(table_location=table_metadata.location, table_properties=table_metadata.properties)

def write_parquet(task: WriteTask) -> DataFile:
table_schema = table_metadata.schema()
Expand All @@ -2327,7 +2329,10 @@ def write_parquet(task: WriteTask) -> DataFile:
for batch in task.record_batches
]
arrow_table = pa.Table.from_batches(batches)
file_path = f"{table_metadata.location}/data/{task.generate_data_file_path('parquet')}"
file_path = location_provider.new_data_location(
data_file_name=task.generate_data_file_filename("parquet"),
partition_key=task.partition_key,
)
fo = io.new_output(file_path)
with fo.create(overwrite=True) as fos:
with pq.ParquetWriter(fos, schema=arrow_table.schema, **parquet_writer_kwargs) as writer:
Expand Down
15 changes: 8 additions & 7 deletions pyiceberg/table/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,14 @@ class TableProperties:
WRITE_PARTITION_SUMMARY_LIMIT = "write.summary.partition-limit"
WRITE_PARTITION_SUMMARY_LIMIT_DEFAULT = 0

WRITE_PY_LOCATION_PROVIDER_IMPL = "write.py-location-provider.impl"

OBJECT_STORE_ENABLED = "write.object-storage.enabled"
OBJECT_STORE_ENABLED_DEFAULT = False

WRITE_OBJECT_STORE_PARTITIONED_PATHS = "write.object-storage.partitioned-paths"
WRITE_OBJECT_STORE_PARTITIONED_PATHS_DEFAULT = True

DELETE_MODE = "write.delete.mode"
DELETE_MODE_COPY_ON_WRITE = "copy-on-write"
DELETE_MODE_MERGE_ON_READ = "merge-on-read"
Expand Down Expand Up @@ -1613,13 +1621,6 @@ def generate_data_file_filename(self, extension: str) -> str:
# https://github.com/apache/iceberg/blob/a582968975dd30ff4917fbbe999f1be903efac02/core/src/main/java/org/apache/iceberg/io/OutputFileFactory.java#L92-L101
return f"00000-{self.task_id}-{self.write_uuid}.{extension}"

def generate_data_file_path(self, extension: str) -> str:
if self.partition_key:
file_path = f"{self.partition_key.to_path()}/{self.generate_data_file_filename(extension)}"
return file_path
else:
return self.generate_data_file_filename(extension)


@dataclass(frozen=True)
class AddFileTask:
Expand Down
143 changes: 143 additions & 0 deletions pyiceberg/table/locations.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import importlib
import logging
from abc import ABC, abstractmethod
from typing import Optional

import mmh3

from pyiceberg.partitioning import PartitionKey
from pyiceberg.table import TableProperties
from pyiceberg.typedef import Properties
from pyiceberg.utils.properties import property_as_bool

logger = logging.getLogger(__name__)


class LocationProvider(ABC):
"""A base class for location providers, that provide data file locations for write tasks."""

table_location: str
table_properties: Properties

def __init__(self, table_location: str, table_properties: Properties):
self.table_location = table_location
self.table_properties = table_properties

@abstractmethod
def new_data_location(self, data_file_name: str, partition_key: Optional[PartitionKey] = None) -> str:
"""Return a fully-qualified data file location for the given filename.

Args:
data_file_name (str): The name of the data file.
partition_key (Optional[PartitionKey]): The data file's partition key. If None, the data is not partitioned.

Returns:
str: A fully-qualified location URI for the data file.
"""


class SimpleLocationProvider(LocationProvider):
def __init__(self, table_location: str, table_properties: Properties):
super().__init__(table_location, table_properties)

def new_data_location(self, data_file_name: str, partition_key: Optional[PartitionKey] = None) -> str:
prefix = f"{self.table_location}/data"
return f"{prefix}/{partition_key.to_path()}/{data_file_name}" if partition_key else f"{prefix}/{data_file_name}"


HASH_BINARY_STRING_BITS = 20
ENTROPY_DIR_LENGTH = 4
ENTROPY_DIR_DEPTH = 3
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: move these into ObjectStoreLocationProvider

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Makes sense esp given the file has now grown. It's pretty unreadable to prefix all the constants here with ObjectStoreLocationProvider though - I'll think about this.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we had issues dealing with constants in the file itself. https://github.com/apache/iceberg-python/pull/1217/files#diff-942c2f54eac4f30f1a1e2fa18b719e17cc1cb03ad32908a402c4ba3abe9eca63L37-L38

if its only used in ObjectStoreLocationProvider, i think its better to be in the class.

but also this is a nit comment :P

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I fully agree that it should be within the class - will find a way to do it readably 👍

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.



class ObjectStoreLocationProvider(LocationProvider):
_include_partition_paths: bool

def __init__(self, table_location: str, table_properties: Properties):
super().__init__(table_location, table_properties)
self._include_partition_paths = property_as_bool(
self.table_properties,
TableProperties.WRITE_OBJECT_STORE_PARTITIONED_PATHS,
TableProperties.WRITE_OBJECT_STORE_PARTITIONED_PATHS_DEFAULT,
)

def new_data_location(self, data_file_name: str, partition_key: Optional[PartitionKey] = None) -> str:
Copy link
Contributor Author

@smaheshwar-pltr smaheshwar-pltr Dec 20, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Tried to make this as consistent with its Java counter-part so file locations are consistent too. This means hashing on both the partition key and the data file name below, and using the same hash function.

Seemed reasonable to port over the the object storage stuff in this PR, given that the original issue #861 mentions this.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since Iceberg is mainly focussed on object-stores, I'm leaning towards making the ObjectStorageLocationProvider the default. Java is a great source of inspiration, but it also holds a lot of historical decisions that are not easy to change, so we should reconsider this at PyIceberg.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for this great suggestion and context! I agree:

  • I made this the default. The MANIFEST_MERGE_ENABLED_DEFAULT property already differs from Java and the docs which reassures me. I did still add a short comment beside OBJECT_STORE_ENABLED_DEFAULT to indicate that it differs.
  • I renamed DefaultLocationProvider to SimpleLocationProvider because it's no longer the default

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

^ cc @kevinjqliu, how does this sound to you? I realise the concerns you raised re things silently working differently with Java and PyIceberg seem a little contradicting with the above (but I think it's fine).

Copy link
Contributor Author

@smaheshwar-pltr smaheshwar-pltr Jan 9, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also, I've not yet changed WRITE_OBJECT_STORE_PARTITIONED_PATHS_DEFAULT to False (Java/docs have true) even though that's more aligned with object storage - from the docs:

We have also added a new table property write.object-storage.partitioned-paths that if set to false(default=true), this will omit the partition values from the file path. Iceberg does not need these values in the file path and setting this value to false can further reduce the key size.

I'm very open to be swayed / discuss this. After reading through apache/iceberg#11112 it seems there was a strong case for still supporting partition values in paths though I haven't been able to flesh it out fully. Perhaps it's backwards compatibility, for folks that inspect storage to see how their files are actually laid out; it does group them together nicely.

I'd be happy to change the default if there's reason for it. The readability of file paths will arguably anyway decrease with these hashes so the above might be a non-issue.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

While im in favor of making ObjectStorageLocationProvider the default for pyiceberg, i'd prefer to do so in a follow-up PR.
I like having this PR solely to implement the concept of LocationProvider and the ObjectStorageProvider

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

While im in favor of making ObjectStorageLocationProvider the default for pyiceberg, i'd prefer to do so in a follow-up PR.
I like having this PR solely to implement the concept of LocationProvider and the ObjectStorageProvider

Makes sense! We can have the discussion regarding defaults there. I'd like to keep the SimpleLocationProvider naming change from Default here though and discuss which provider should be the default in the next PR.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

SGTM! 🚀

if self._include_partition_paths and partition_key:
return self.new_data_location(f"{partition_key.to_path()}/{data_file_name}")

prefix = f"{self.table_location}/data"
hashed_path = self._compute_hash(data_file_name)

return (
f"{prefix}/{hashed_path}/{data_file_name}"
if self._include_partition_paths
else f"{prefix}/{hashed_path}-{data_file_name}"
Copy link
Contributor Author

@smaheshwar-pltr smaheshwar-pltr Dec 20, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Interesting that disabling include_partition_paths affects paths of non-partitioned data files. I've matched Java behaviour here but it does feel odd.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this is an interesting case, do we have a test to show this behavior explicitly? i think it'll be valuable to refer to it at a later time

)

@staticmethod
def _compute_hash(data_file_name: str) -> str:
# Bitwise AND to combat sign-extension; bitwise OR to preserve leading zeroes that `bin` would otherwise strip.
hash_code = mmh3.hash(data_file_name) & ((1 << HASH_BINARY_STRING_BITS) - 1) | (1 << HASH_BINARY_STRING_BITS)
return ObjectStoreLocationProvider._dirs_from_hash(bin(hash_code)[-HASH_BINARY_STRING_BITS:])

@staticmethod
def _dirs_from_hash(file_hash: str) -> str:
"""Divides hash into directories for optimized orphan removal operation using ENTROPY_DIR_DEPTH and ENTROPY_DIR_LENGTH."""
hash_with_dirs = []
for i in range(0, ENTROPY_DIR_DEPTH * ENTROPY_DIR_LENGTH, ENTROPY_DIR_LENGTH):
hash_with_dirs.append(file_hash[i : i + ENTROPY_DIR_LENGTH])

if len(file_hash) > ENTROPY_DIR_DEPTH * ENTROPY_DIR_LENGTH:
hash_with_dirs.append(file_hash[ENTROPY_DIR_DEPTH * ENTROPY_DIR_LENGTH :])

return "/".join(hash_with_dirs)


def _import_location_provider(
location_provider_impl: str, table_location: str, table_properties: Properties
) -> Optional[LocationProvider]:
try:
path_parts = location_provider_impl.split(".")
if len(path_parts) < 2:
raise ValueError(
f"{TableProperties.WRITE_PY_LOCATION_PROVIDER_IMPL} should be full path (module.CustomLocationProvider), got: {location_provider_impl}"
)
module_name, class_name = ".".join(path_parts[:-1]), path_parts[-1]
module = importlib.import_module(module_name)
class_ = getattr(module, class_name)
return class_(table_location, table_properties)
except ModuleNotFoundError:
logger.warning("Could not initialize LocationProvider: %s", location_provider_impl)
return None


def load_location_provider(table_location: str, table_properties: Properties) -> LocationProvider:
table_location = table_location.rstrip("/")

if location_provider_impl := table_properties.get(TableProperties.WRITE_PY_LOCATION_PROVIDER_IMPL):
if location_provider := _import_location_provider(location_provider_impl, table_location, table_properties):
logger.info("Loaded LocationProvider: %s", location_provider_impl)
return location_provider
else:
raise ValueError(f"Could not initialize LocationProvider: {location_provider_impl}")

if property_as_bool(table_properties, TableProperties.OBJECT_STORE_ENABLED, TableProperties.OBJECT_STORE_ENABLED_DEFAULT):
return ObjectStoreLocationProvider(table_location, table_properties)
else:
return SimpleLocationProvider(table_location, table_properties)
39 changes: 39 additions & 0 deletions tests/integration/test_writes/test_partitioned_writes.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
from pyiceberg.exceptions import NoSuchTableError
from pyiceberg.partitioning import PartitionField, PartitionSpec
from pyiceberg.schema import Schema
from pyiceberg.table import TableProperties
from pyiceberg.transforms import (
BucketTransform,
DayTransform,
Expand Down Expand Up @@ -280,6 +281,44 @@ def test_query_filter_v1_v2_append_null(
assert df.where(f"{col} is null").count() == 2, f"Expected 2 null rows for {col}"


@pytest.mark.integration
@pytest.mark.parametrize(
"part_col", ["int", "bool", "string", "string_long", "long", "float", "double", "date", "timestamp", "timestamptz", "binary"]
)
@pytest.mark.parametrize("format_version", [1, 2])
def test_object_storage_location_provider_excludes_partition_path(
session_catalog: Catalog, spark: SparkSession, arrow_table_with_null: pa.Table, part_col: str, format_version: int
) -> None:
nested_field = TABLE_SCHEMA.find_field(part_col)
partition_spec = PartitionSpec(
PartitionField(source_id=nested_field.field_id, field_id=1001, transform=IdentityTransform(), name=part_col)
)

tbl = _create_table(
session_catalog=session_catalog,
identifier=f"default.arrow_table_v{format_version}_with_null_partitioned_on_col_{part_col}",
# write.object-storage.partitioned-paths defaults to True
properties={"format-version": str(format_version), TableProperties.OBJECT_STORE_ENABLED: True},
data=[arrow_table_with_null],
partition_spec=partition_spec,
)

original_paths = tbl.inspect.data_files().to_pydict()["file_path"]
assert len(original_paths) == 3

# Update props to exclude partitioned paths and append data
with tbl.transaction() as tx:
tx.set_properties({TableProperties.WRITE_OBJECT_STORE_PARTITIONED_PATHS: False})
tbl.append(arrow_table_with_null)

added_paths = set(tbl.inspect.data_files().to_pydict()["file_path"]) - set(original_paths)
assert len(added_paths) == 3

# All paths before the props update should contain the partition, while all paths after should not
assert all(f"{part_col}=" in path for path in original_paths)
assert all(f"{part_col}=" not in path for path in added_paths)


@pytest.mark.integration
@pytest.mark.parametrize(
"spec",
Expand Down
27 changes: 27 additions & 0 deletions tests/integration/test_writes/test_writes.py
Original file line number Diff line number Diff line change
Expand Up @@ -285,6 +285,33 @@ def test_data_files(spark: SparkSession, session_catalog: Catalog, arrow_table_w
assert [row.deleted_data_files_count for row in rows] == [0, 1, 0, 0, 0]


@pytest.mark.integration
@pytest.mark.parametrize("format_version", [1, 2])
def test_object_storage_data_files(
kevinjqliu marked this conversation as resolved.
Show resolved Hide resolved
spark: SparkSession, session_catalog: Catalog, arrow_table_with_null: pa.Table, format_version: int
) -> None:
tbl = _create_table(
session_catalog=session_catalog,
identifier="default.object_stored",
properties={"format-version": format_version, TableProperties.OBJECT_STORE_ENABLED: True},
data=[arrow_table_with_null],
)
tbl.append(arrow_table_with_null)

paths = tbl.inspect.data_files().to_pydict()["file_path"]
assert len(paths) == 2

for location in paths:
assert location.startswith("s3://warehouse/default/object_stored/data/")
parts = location.split("/")
assert len(parts) == 11

# Entropy binary directories should have been injected
for dir_name in parts[6:10]:
assert dir_name
assert all(c in "01" for c in dir_name)


@pytest.mark.integration
def test_python_writes_with_spark_snapshot_reads(
spark: SparkSession, session_catalog: Catalog, arrow_table_with_null: pa.Table
Expand Down
Loading