18
18
19
19
from padre_meddea .util .util import create_science_filename , calc_time
20
20
from padre_meddea .io .file_tools import read_raw_file
21
+ from padre_meddea .io .fits_tools import (
22
+ add_process_info_to_header ,
23
+ get_primary_header ,
24
+ get_std_comment ,
25
+ )
21
26
22
27
__all__ = [
23
28
"process_file" ,
@@ -51,40 +56,28 @@ def process_file(filename: Path, overwrite=False) -> list:
51
56
parsed_data = read_raw_file (file_path )
52
57
if parsed_data ["photons" ] is not None : # we have event list data
53
58
event_list , pkt_list = parsed_data ["photons" ]
59
+ primary_hdr = get_primary_header ()
60
+ primary_hdr = add_process_info_to_header (primary_hdr )
61
+ primary_hdr ["LEVEL" ] = (0 , get_std_comment ("LEVEL" ))
62
+ primary_hdr ["DATATYPE" ] = ("event_list" , get_std_comment ("DATATYPE" ))
63
+ primary_hdr ["ORIGAPID" ] = (
64
+ padre_meddea .APID ["photon" ],
65
+ get_std_comment ("ORIGAPID" ),
66
+ )
67
+ primary_hdr ["ORIGFILE" ] = (file_path .name , get_std_comment ("ORIGFILE" ))
54
68
55
- primary_hdr = fits .Header ()
56
-
57
- # fill in metadata
58
- primary_hdr ["DATE" ] = (Time .now ().fits , "FITS file creation date in UTC" )
59
- for this_keyword , this_str in zip (
60
- ["DATE-BEG" , "DATE-END" , "DATE-AVG" ],
61
- [
62
- "Acquisition start time" ,
63
- "Acquisition end time" ,
64
- "Average time of acquisition" ,
65
- ],
66
- ):
69
+ for this_keyword in ["DATE-BEG" , "DATE-END" , "DATE-AVG" ]:
67
70
primary_hdr [this_keyword ] = (
68
71
event_list .meta .get (this_keyword , "" ),
69
- this_str ,
72
+ get_std_comment ( this_keyword ) ,
70
73
)
71
74
72
- primary_hdr ["LEVEL" ] = (0 , "Data level of fits file" )
73
-
74
- # add processing information
75
- primary_hdr = add_process_info_to_header (primary_hdr )
76
-
77
- # custom keywords
78
- primary_hdr ["DATATYPE" ] = ("event_list" , "Description of the data" )
79
- primary_hdr ["ORIGAPID" ] = (padre_meddea .APID ["photon" ], "APID(s) of the originating data" )
80
- primary_hdr ["ORIGFILE" ] = (file_path .name , "Originating file(s)" )
81
-
82
- empty_primary = fits .PrimaryHDU (header = primary_hdr )
75
+ empty_primary_hdu = fits .PrimaryHDU (header = primary_hdr )
83
76
pkt_hdu = fits .BinTableHDU (pkt_list , name = "PKT" )
84
77
pkt_hdu .add_checksum ()
85
78
hit_hdu = fits .BinTableHDU (event_list , name = "SCI" )
86
79
hit_hdu .add_checksum ()
87
- hdul = fits .HDUList ([empty_primary , hit_hdu , pkt_hdu ])
80
+ hdul = fits .HDUList ([empty_primary_hdu , hit_hdu , pkt_hdu ])
88
81
89
82
path = create_science_filename (
90
83
time = primary_hdr ["DATE-BEG" ],
@@ -102,43 +95,77 @@ def process_file(filename: Path, overwrite=False) -> list:
102
95
103
96
# Write the file, with the overwrite option controlled by the environment variable
104
97
hdul .writeto (path , overwrite = overwrite )
105
-
106
98
# Store the output file path in a list
107
99
output_files .append (path )
108
100
if parsed_data ["housekeeping" ] is not None :
109
101
hk_data = parsed_data ["housekeeping" ]
110
102
# send data to AWS Timestream for Grafana dashboard
111
103
record_timeseries (hk_data , "housekeeping" )
112
104
hk_table = Table (hk_data )
113
- primary_hdr = fits .Header ()
114
- # fill in metadata
115
- primary_hdr ["DATE" ] = (Time .now ().fits , "FITS file creation date in UTC" )
116
- primary_hdr ["LEVEL" ] = (0 , "Data level of fits file" )
117
- primary_hdr ["DATATYPE" ] = ("housekeeping" , "Description of the data" )
118
- primary_hdr ["ORIGAPID" ] = (padre_meddea .APID ["housekeeping" ], "APID(s) of the originating data" )
119
- primary_hdr ["ORIGFILE" ] = (file_path .name , "Originating file(s)" )
120
- date_beg = calc_time (hk_data ['timestamp' ][0 ])
121
- primary_hdr ["DATEREF" ] = (date_beg .fits , "Reference date" )
122
-
123
- # add processing information
124
- primary_hdr = add_process_info_to_header (primary_hdr )
125
105
126
- # add common fits keywords
127
- fits_meta = read_fits_keyword_file (
128
- padre_meddea ._data_directory / "fits_keywords_primaryhdu.csv"
106
+ primary_hdr = get_primary_header ()
107
+ primary_hdr = add_process_info_to_header (primary_hdr )
108
+ primary_hdr ["LEVEL" ] = (0 , get_std_comment ("LEVEL" ))
109
+ primary_hdr ["DATATYPE" ] = ("housekeeping" , get_std_comment ("DATATYPE" ))
110
+ primary_hdr ["ORIGAPID" ] = (
111
+ padre_meddea .APID ["housekeeping" ],
112
+ get_std_comment ("ORIGAPID" ),
129
113
)
130
- for row in fits_meta :
131
- primary_hdr [row ["keyword" ]] = (row ["value" ], row ["comment" ])
132
- hk_table ['seqcount' ] = hk_table ["CCSDS_SEQUENCE_COUNT" ]
133
- colnames_to_remove = ["CCSDS_VERSION_NUMBER" , "CCSDS_PACKET_TYPE" , "CCSDS_SECONDARY_FLAG" , "CCSDS_SEQUENCE_FLAG" , "CCSDS_APID" , "CCSDS_SEQUENCE_COUNT" , "CCSDS_PACKET_LENGTH" , "CHECKSUM" , "time" ]
114
+ primary_hdr ["ORIGFILE" ] = (file_path .name , get_std_comment ("ORIGFILE" ))
115
+
116
+ date_beg = calc_time (hk_data ["timestamp" ][0 ])
117
+ primary_hdr ["DATEREF" ] = (date_beg .fits , get_std_comment ("DATEREF" ))
118
+
119
+ hk_table ["seqcount" ] = hk_table ["CCSDS_SEQUENCE_COUNT" ]
120
+ colnames_to_remove = [
121
+ "CCSDS_VERSION_NUMBER" ,
122
+ "CCSDS_PACKET_TYPE" ,
123
+ "CCSDS_SECONDARY_FLAG" ,
124
+ "CCSDS_SEQUENCE_FLAG" ,
125
+ "CCSDS_APID" ,
126
+ "CCSDS_SEQUENCE_COUNT" ,
127
+ "CCSDS_PACKET_LENGTH" ,
128
+ "CHECKSUM" ,
129
+ "time" ,
130
+ ]
134
131
for this_col in colnames_to_remove :
135
132
if this_col in hk_table .colnames :
136
133
hk_table .remove_column (this_col )
137
134
138
- empty_primary = fits .PrimaryHDU (header = primary_hdr )
139
- hk_hdu = fits .BinTableHDU (hk_table , name = "HK" )
135
+ empty_primary_hdu = fits .PrimaryHDU (header = primary_hdr )
136
+ hk_hdu = fits .BinTableHDU (data = hk_table , name = "HK" )
140
137
hk_hdu .add_checksum ()
141
- hdul = fits .HDUList ([empty_primary , hk_hdu ])
138
+
139
+ # add command response data if it exists
140
+ if parsed_data ["cmd_resp" ] is not None :
141
+ data_ts = parsed_data ["cmd_resp" ]
142
+ this_header = fits .Header ()
143
+ this_header ["DATEREF" ] = (
144
+ data_ts .time [0 ].fits ,
145
+ get_std_comment ("DATEREF" ),
146
+ )
147
+ record_timeseries (data_ts , "housekeeping" )
148
+ data_table = Table (data_ts )
149
+ colnames_to_remove = [
150
+ "CCSDS_VERSION_NUMBER" ,
151
+ "CCSDS_PACKET_TYPE" ,
152
+ "CCSDS_SECONDARY_FLAG" ,
153
+ "CCSDS_SEQUENCE_FLAG" ,
154
+ "CCSDS_APID" ,
155
+ "CCSDS_SEQUENCE_COUNT" ,
156
+ "CCSDS_PACKET_LENGTH" ,
157
+ "CHECKSUM" ,
158
+ "time" ,
159
+ ]
160
+ for this_col in colnames_to_remove :
161
+ if this_col in hk_table .colnames :
162
+ data_table .remove_column (this_col )
163
+ cmd_hdu = fits .BinTableHDU (data = data_table , name = "READ" )
164
+ cmd_hdu .add_checksum ()
165
+ else : # if None still end an empty Binary Table
166
+ this_header = fits .Header ()
167
+ cmd_hdu = fits .BinTableHDU (data = None , header = this_header , name = "READ" )
168
+ hdul = fits .HDUList ([empty_primary_hdu , hk_hdu , cmd_hdu ])
142
169
143
170
path = create_science_filename (
144
171
time = date_beg ,
@@ -149,13 +176,8 @@ def process_file(filename: Path, overwrite=False) -> list:
149
176
)
150
177
hdul .writeto (path , overwrite = overwrite )
151
178
output_files .append (path )
152
-
153
-
154
-
155
-
156
- # calibrated_file = calibrate_file(data_filename)
157
- # data_plot_files = plot_file(data_filename)
158
- # calib_plot_files = plot_file(calibrated_file)
179
+ if parsed_data ["spectra" ] is not None :
180
+ spec_data = parsed_data ["spectra" ]
159
181
160
182
# add other tasks below
161
183
return output_files
0 commit comments