Skip to content

Commit

Permalink
fix fetch error handling on s3 ioerror -> valueerror
Browse files Browse the repository at this point in the history
  • Loading branch information
satchelbaldwin committed Feb 9, 2024
1 parent d3af49d commit c855f16
Showing 1 changed file with 3 additions and 1 deletion.
4 changes: 3 additions & 1 deletion api/dataset/remote.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def open_dataset(paths: AccessURLs, job_id=None) -> xarray.Dataset:
# function handles stripping out url part, so any mirror will have the same result
ds = open_remote_dataset_s3(paths[0]["opendap"])
return ds
except IOError as e:
except ValueError as e:
print(f"file not found in s3 mirroring: {e}")

for mirror in paths:
Expand Down Expand Up @@ -99,6 +99,7 @@ def open_remote_dataset_s3(urls: List[str]) -> xarray.Dataset:


def download_file_http(url: str, dir: str, auth: Tuple[str, str] | None = None):
print(f"downloading file {url}", flush=True)
rs = requests.get(url, stream=True)
if rs.status_code == 401:
rs = requests.get(url, stream=True, auth=auth)
Expand All @@ -118,6 +119,7 @@ def open_remote_dataset_http(
with ThreadPoolExecutor() as executor:
executor.map(lambda url: download_file_http(url, temp_directory, auth), urls)
files = [os.path.join(temp_directory, f) for f in os.listdir(temp_directory)]
print(f"files: {files}", flush=True)
ds = xarray.open_mfdataset(
files,
parallel=True,
Expand Down

0 comments on commit c855f16

Please sign in to comment.