Skip to content

Commit 51bbde9

Browse files
committed
first working commit of fits file creation
1 parent a826db3 commit 51bbde9

File tree

7 files changed

+261
-130
lines changed

7 files changed

+261
-130
lines changed

padre_meddea/calibration/calibration.py

Lines changed: 77 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,11 @@
1818

1919
from padre_meddea.util.util import create_science_filename, calc_time
2020
from padre_meddea.io.file_tools import read_raw_file
21+
from padre_meddea.io.fits_tools import (
22+
add_process_info_to_header,
23+
get_primary_header,
24+
get_std_comment,
25+
)
2126

2227
__all__ = [
2328
"process_file",
@@ -51,40 +56,28 @@ def process_file(filename: Path, overwrite=False) -> list:
5156
parsed_data = read_raw_file(file_path)
5257
if parsed_data["photons"] is not None: # we have event list data
5358
event_list, pkt_list = parsed_data["photons"]
59+
primary_hdr = get_primary_header()
60+
primary_hdr = add_process_info_to_header(primary_hdr)
61+
primary_hdr["LEVEL"] = (0, get_std_comment("LEVEL"))
62+
primary_hdr["DATATYPE"] = ("event_list", get_std_comment("DATATYPE"))
63+
primary_hdr["ORIGAPID"] = (
64+
padre_meddea.APID["photon"],
65+
get_std_comment("ORIGAPID"),
66+
)
67+
primary_hdr["ORIGFILE"] = (file_path.name, get_std_comment("ORIGFILE"))
5468

55-
primary_hdr = fits.Header()
56-
57-
# fill in metadata
58-
primary_hdr["DATE"] = (Time.now().fits, "FITS file creation date in UTC")
59-
for this_keyword, this_str in zip(
60-
["DATE-BEG", "DATE-END", "DATE-AVG"],
61-
[
62-
"Acquisition start time",
63-
"Acquisition end time",
64-
"Average time of acquisition",
65-
],
66-
):
69+
for this_keyword in ["DATE-BEG", "DATE-END", "DATE-AVG"]:
6770
primary_hdr[this_keyword] = (
6871
event_list.meta.get(this_keyword, ""),
69-
this_str,
72+
get_std_comment(this_keyword),
7073
)
7174

72-
primary_hdr["LEVEL"] = (0, "Data level of fits file")
73-
74-
# add processing information
75-
primary_hdr = add_process_info_to_header(primary_hdr)
76-
77-
# custom keywords
78-
primary_hdr["DATATYPE"] = ("event_list", "Description of the data")
79-
primary_hdr["ORIGAPID"] = (padre_meddea.APID["photon"], "APID(s) of the originating data")
80-
primary_hdr["ORIGFILE"] = (file_path.name, "Originating file(s)")
81-
82-
empty_primary = fits.PrimaryHDU(header=primary_hdr)
75+
empty_primary_hdu = fits.PrimaryHDU(header=primary_hdr)
8376
pkt_hdu = fits.BinTableHDU(pkt_list, name="PKT")
8477
pkt_hdu.add_checksum()
8578
hit_hdu = fits.BinTableHDU(event_list, name="SCI")
8679
hit_hdu.add_checksum()
87-
hdul = fits.HDUList([empty_primary, hit_hdu, pkt_hdu])
80+
hdul = fits.HDUList([empty_primary_hdu, hit_hdu, pkt_hdu])
8881

8982
path = create_science_filename(
9083
time=primary_hdr["DATE-BEG"],
@@ -102,43 +95,77 @@ def process_file(filename: Path, overwrite=False) -> list:
10295

10396
# Write the file, with the overwrite option controlled by the environment variable
10497
hdul.writeto(path, overwrite=overwrite)
105-
10698
# Store the output file path in a list
10799
output_files.append(path)
108100
if parsed_data["housekeeping"] is not None:
109101
hk_data = parsed_data["housekeeping"]
110102
# send data to AWS Timestream for Grafana dashboard
111103
record_timeseries(hk_data, "housekeeping")
112104
hk_table = Table(hk_data)
113-
primary_hdr = fits.Header()
114-
# fill in metadata
115-
primary_hdr["DATE"] = (Time.now().fits, "FITS file creation date in UTC")
116-
primary_hdr["LEVEL"] = (0, "Data level of fits file")
117-
primary_hdr["DATATYPE"] = ("housekeeping", "Description of the data")
118-
primary_hdr["ORIGAPID"] = (padre_meddea.APID["housekeeping"], "APID(s) of the originating data")
119-
primary_hdr["ORIGFILE"] = (file_path.name, "Originating file(s)")
120-
date_beg = calc_time(hk_data['timestamp'][0])
121-
primary_hdr["DATEREF"] = (date_beg.fits, "Reference date")
122-
123-
# add processing information
124-
primary_hdr = add_process_info_to_header(primary_hdr)
125105

126-
# add common fits keywords
127-
fits_meta = read_fits_keyword_file(
128-
padre_meddea._data_directory / "fits_keywords_primaryhdu.csv"
106+
primary_hdr = get_primary_header()
107+
primary_hdr = add_process_info_to_header(primary_hdr)
108+
primary_hdr["LEVEL"] = (0, get_std_comment("LEVEL"))
109+
primary_hdr["DATATYPE"] = ("housekeeping", get_std_comment("DATATYPE"))
110+
primary_hdr["ORIGAPID"] = (
111+
padre_meddea.APID["housekeeping"],
112+
get_std_comment("ORIGAPID"),
129113
)
130-
for row in fits_meta:
131-
primary_hdr[row["keyword"]] = (row["value"], row["comment"])
132-
hk_table['seqcount'] = hk_table["CCSDS_SEQUENCE_COUNT"]
133-
colnames_to_remove = ["CCSDS_VERSION_NUMBER", "CCSDS_PACKET_TYPE", "CCSDS_SECONDARY_FLAG", "CCSDS_SEQUENCE_FLAG", "CCSDS_APID", "CCSDS_SEQUENCE_COUNT", "CCSDS_PACKET_LENGTH", "CHECKSUM", "time"]
114+
primary_hdr["ORIGFILE"] = (file_path.name, get_std_comment("ORIGFILE"))
115+
116+
date_beg = calc_time(hk_data["timestamp"][0])
117+
primary_hdr["DATEREF"] = (date_beg.fits, get_std_comment("DATEREF"))
118+
119+
hk_table["seqcount"] = hk_table["CCSDS_SEQUENCE_COUNT"]
120+
colnames_to_remove = [
121+
"CCSDS_VERSION_NUMBER",
122+
"CCSDS_PACKET_TYPE",
123+
"CCSDS_SECONDARY_FLAG",
124+
"CCSDS_SEQUENCE_FLAG",
125+
"CCSDS_APID",
126+
"CCSDS_SEQUENCE_COUNT",
127+
"CCSDS_PACKET_LENGTH",
128+
"CHECKSUM",
129+
"time",
130+
]
134131
for this_col in colnames_to_remove:
135132
if this_col in hk_table.colnames:
136133
hk_table.remove_column(this_col)
137134

138-
empty_primary = fits.PrimaryHDU(header=primary_hdr)
139-
hk_hdu = fits.BinTableHDU(hk_table, name="HK")
135+
empty_primary_hdu = fits.PrimaryHDU(header=primary_hdr)
136+
hk_hdu = fits.BinTableHDU(data=hk_table, name="HK")
140137
hk_hdu.add_checksum()
141-
hdul = fits.HDUList([empty_primary, hk_hdu])
138+
139+
# add command response data if it exists
140+
if parsed_data["cmd_resp"] is not None:
141+
data_ts = parsed_data["cmd_resp"]
142+
this_header = fits.Header()
143+
this_header["DATEREF"] = (
144+
data_ts.time[0].fits,
145+
get_std_comment("DATEREF"),
146+
)
147+
record_timeseries(data_ts, "housekeeping")
148+
data_table = Table(data_ts)
149+
colnames_to_remove = [
150+
"CCSDS_VERSION_NUMBER",
151+
"CCSDS_PACKET_TYPE",
152+
"CCSDS_SECONDARY_FLAG",
153+
"CCSDS_SEQUENCE_FLAG",
154+
"CCSDS_APID",
155+
"CCSDS_SEQUENCE_COUNT",
156+
"CCSDS_PACKET_LENGTH",
157+
"CHECKSUM",
158+
"time",
159+
]
160+
for this_col in colnames_to_remove:
161+
if this_col in hk_table.colnames:
162+
data_table.remove_column(this_col)
163+
cmd_hdu = fits.BinTableHDU(data=data_table, name="READ")
164+
cmd_hdu.add_checksum()
165+
else: # if None still end an empty Binary Table
166+
this_header = fits.Header()
167+
cmd_hdu = fits.BinTableHDU(data=None, header=this_header, name="READ")
168+
hdul = fits.HDUList([empty_primary_hdu, hk_hdu, cmd_hdu])
142169

143170
path = create_science_filename(
144171
time=date_beg,
@@ -149,13 +176,8 @@ def process_file(filename: Path, overwrite=False) -> list:
149176
)
150177
hdul.writeto(path, overwrite=overwrite)
151178
output_files.append(path)
152-
153-
154-
155-
156-
# calibrated_file = calibrate_file(data_filename)
157-
# data_plot_files = plot_file(data_filename)
158-
# calib_plot_files = plot_file(calibrated_file)
179+
if parsed_data["spectra"] is not None:
180+
spec_data = parsed_data["spectra"]
159181

160182
# add other tasks below
161183
return output_files
Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
keyword,comment
2+
AUTHOR,Who designed the observation
3+
CREATOR,Name of software pipeline that produced the FITS file
4+
DETECTOR,Name of the detector
5+
INFO_URL,a human-readable web page describing the data
6+
OBSRVTRY,Name of the observatory
7+
TIMESYS,Time scale of the time-related keywords
8+
TELESCOP,Telescope/Sensor name
9+
INSTRUME,Instrument name
10+
MISSION,Mission name
11+
ORIGIN,File originator
12+
DATE-BEG,Acquisition start time
13+
DATE-END,Acquisition end time
14+
DATE-AVG,Average time of acquisition
15+
LEVEL,Data level of fits file
16+
DATE,File creation date in UTC
17+
DATATYPE,Description of the data
18+
ORIGAPID,APID(s) of the originating data
19+
ORIGFILE,Originating file(s)
20+
DATEREF,Reference date
21+
PRSTEP(?P<count>[1-9]),Processing step type
22+
PRPROC(?P<count>[1-9]),Name of procedure performing PRSTEP<count>
23+
PRPVER(?P<count>[1-9]),Version of procedure PRPROC<count>
24+
PRLIB(?P<count>[1-9])A,Software library containing PRPROC<count>
25+
PRVER(?P<count>[1-9])A,Version of PRLIB1A
26+
PRHSH(?P<count>[1-9])A,GIT commit hash for PRLIB<count>A
27+
PRBRA(?P<count>[1-9])A,GIT/SVN repository branch of PRLIB<count>A
28+
PRVER(?P<count>[1-9])B,Date of last commit of PRLIB<count>B
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
keyword,value
2+
AUTHOR,Steven D. Christe
3+
CREATOR,padre_meddea
4+
DETECTOR,meddea
5+
INFO_URL,https://padre-meddea.readthedocs.io/en/latest/user-guide/data.html
6+
OBSRVTRY,PADRE
7+
TIMESYS,UTC
8+
TELESCOP,PADRE/MeDDEA
9+
INSTRUME,MeDDEA
10+
MISSION,PADRE
11+
ORIGIN,NASA GSFC

0 commit comments

Comments
 (0)