Skip to content

Commit a2174dc

Browse files
authored
Use string paths with open_mfdataset (#1613)
1 parent 4c29fdf commit a2174dc

File tree

2 files changed

+4
-5
lines changed

2 files changed

+4
-5
lines changed

tests/geospatial/test_cloud_optimize.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,4 +19,4 @@ def test_cloud_optimize(
1919
with setup_benchmark(
2020
**scale_kwargs[scale], **cluster_kwargs
2121
) as benchmark: # noqa: F841
22-
benchmark(cloud_optimize, scale, s3fs=s3, storage_url=s3_url)
22+
benchmark(cloud_optimize, scale, fs=s3, storage_url=s3_url)

tests/geospatial/workloads/cloud_optimize.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66

77
def cloud_optimize(
8-
scale: Literal["small", "medium", "large"], s3fs: S3FileSystem, storage_url: str
8+
scale: Literal["small", "medium", "large"], fs: S3FileSystem, storage_url: str
99
):
1010
models = [
1111
"ACCESS-CM2",
@@ -59,12 +59,11 @@ def cloud_optimize(
5959

6060
# Get netCDF data files -- see https://registry.opendata.aws/nex-gddp-cmip6
6161
# for dataset details.
62-
file_list = []
62+
files = []
6363
for model in models:
6464
for variable in variables:
6565
data_dir = f"s3://nex-gddp-cmip6/NEX-GDDP-CMIP6/{model}/historical/r1i1p1f1/{variable}/*.nc"
66-
file_list += [f"s3://{path}" for path in s3fs.glob(data_dir)]
67-
files = [s3fs.open(f) for f in file_list]
66+
files += [f"s3://{path}" for path in fs.glob(data_dir)]
6867
print(f"Processing {len(files)} NetCDF files")
6968

7069
# Load input NetCDF data files

0 commit comments

Comments
 (0)