-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathingest_csv.py
More file actions
71 lines (51 loc) · 1.79 KB
/
ingest_csv.py
File metadata and controls
71 lines (51 loc) · 1.79 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import os
from pyDataverse.utils import read_csv_as_dicts
from pyDataverse.models import Dataverse
from pyDataverse.models import Dataset
from pyDataverse.models import Datafile
from pyDataverse.api import NativeApi
# load metadata for datasets
pma_datasets_csv = "pma_datasets_toupload.csv"
ds_data = read_csv_as_dicts(pma_datasets_csv)
# create a dictionary of datasets
ds_lst = []
for ds in ds_data:
ds_obj = Dataset()
ds_obj.set(ds)
ds_lst.append(ds_obj)
# load metadata for files
pma_datafiles_csv = "pma_files_toupload.csv"
df_data = read_csv_as_dicts(pma_datafiles_csv)
# create a dictionary of datafiles
df_lst = []
for df in df_data:
df['restrict'] = False
df_obj = Datafile()
df_obj.set(df)
df_lst.append(df_obj)
# upload via API
# instantiate API
api = NativeApi("https://archive.data.jhu.edu", #API-KEY)
# create and publish dataverses first
# # # upload datasets
# change the dv_alias to upload to different dataverses
dv_alias = "pma"
dataset_id_2_pid = {}
for ds in ds_lst:
# for some reason we need to manually change the multiple value for productionPlace - but not sure how to do this yet. A problem for future Lubov
resp = api.create_dataset(dv_alias, ds.json(), pid=ds.get()["org.doi"].replace(" ", ""))
print(resp.json())
dataset_id_2_pid[ds.get()["org.dataset_id"]] = resp.json()["data"]["persistentId"]
# # add datafiles to the datasets we created
for df in df_lst:
pid = dataset_id_2_pid[df.get()["org.dataset_id"]]
print(pid)
filename = os.path.join(os.getcwd(), df.get()["folder"], df.get()["org.filename"])
print(filename)
df.set({"pid": pid, "filename": filename})
resp = api.upload_datafile(pid, filename, df.json())
print(resp.json())
# # publish datasets
# for dataset_id, pid in dataset_id_2_pid.items():
# resp = api.publish_dataset(pid, "major")
# resp.json()