From af7058c46d8521137b5f2dc831c4757df8165e16 Mon Sep 17 00:00:00 2001 From: xzackli Date: Tue, 18 Jul 2023 17:23:25 -0400 Subject: [PATCH 01/13] add command-line arguments for mcm script for better division of labor --- project/data_analysis/python/get_mcm_and_bbl.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/project/data_analysis/python/get_mcm_and_bbl.py b/project/data_analysis/python/get_mcm_and_bbl.py index de52430b..7a7bfb75 100644 --- a/project/data_analysis/python/get_mcm_and_bbl.py +++ b/project/data_analysis/python/get_mcm_and_bbl.py @@ -30,6 +30,11 @@ log.info(f"number of mcm matrices to compute : {n_mcms}") so_mpi.init(True) subtasks = so_mpi.taskrange(imin=0, imax=n_mcms - 1) + +if len(sys.argv) == 4: + log.info(f"computing only the mcm matrices : {int(sys.argv[2])}:{int(sys.argv[3])}") + subtasks = subtasks[int(sys.argv[2]):int(sys.argv[3])] + for task in subtasks: task = int(task) sv1, ar1, sv2, ar2 = sv1_list[task], ar1_list[task], sv2_list[task], ar2_list[task] From 3720b56265897cb08d9ff878207148fc5fb1f9e5 Mon Sep 17 00:00:00 2001 From: xzackli Date: Tue, 18 Jul 2023 17:48:33 -0400 Subject: [PATCH 02/13] della slurm script --- .../paramfiles/della/8core1hr.slurm | 19 +++ .../data_analysis/paramfiles/della/README.md | 26 +++ .../paramfiles/della/global_dr6_v4.dict | 155 ++++++++++++++++++ 3 files changed, 200 insertions(+) create mode 100644 project/data_analysis/paramfiles/della/8core1hr.slurm create mode 100644 project/data_analysis/paramfiles/della/README.md create mode 100644 project/data_analysis/paramfiles/della/global_dr6_v4.dict diff --git a/project/data_analysis/paramfiles/della/8core1hr.slurm b/project/data_analysis/paramfiles/della/8core1hr.slurm new file mode 100644 index 00000000..f01c0e3a --- /dev/null +++ b/project/data_analysis/paramfiles/della/8core1hr.slurm @@ -0,0 +1,19 @@ +#! /bin/bash +# +#SBATCH --nodes=1 # node count +#SBATCH --ntasks-per-node=1 # total number of tasks per node +#SBATCH --cpus-per-task=8 # cpu-cores per task (>1 if multi-threaded tasks) +#SBATCH --mem-per-cpu=6G # memory per cpu-core (4G per cpu-core is default) +#SBATCH -t 1:00:00 +#SBATCH -p physics +#SBATCH --output=slurmoutput/R-%j.out + +cd /tigress/zequnl/PSpipe/project/data_analysis +module load intel/2021.1 openmpi/intel-2021.1/4.1.0 anaconda3/2023.3 +export MPICC=$(which mpicc) +conda activate ps + +export OMP_NUM_THREADS=8 +export JULIA_NUM_THREADS=8 + +$1 diff --git a/project/data_analysis/paramfiles/della/README.md b/project/data_analysis/paramfiles/della/README.md new file mode 100644 index 00000000..cdba9b9c --- /dev/null +++ b/project/data_analysis/paramfiles/della/README.md @@ -0,0 +1,26 @@ + + +On Della, it's better to submit SLURM jobs via script instead of interactively. For +convenience, let's define a commonly used job script, + +```bash +alias 8core1hr="sbatch paramfiles/della/8core1hr.slurm $1" +``` + +We need windows, + +```bash +8core1hr "srun --ntasks 1 --cpus-per-task 8 --cpu-bind=cores python -u python/get_window_dr6.py paramfiles/della/global_dr6_v4.dict" +``` + +We need to generate some signal spectra for the covariance matrix, + +```bash +8core1hr "srun --ntasks 1 --cpus-per-task 8 --cpu-bind=cores python -u python/get_best_fit_mflike.py paramfiles/della/global_dr6_v4.dict" +``` + +Next we'll need to compute the mode-coupling matrices, + +```bash +8core1hr "srun --ntasks 1 --cpus-per-task 8 --cpu-bind=cores python -u python/get_mcm_and_bbl.py paramfiles/della/global_dr6_v4.dict 0 1" +``` diff --git a/project/data_analysis/paramfiles/della/global_dr6_v4.dict b/project/data_analysis/paramfiles/della/global_dr6_v4.dict new file mode 100644 index 00000000..04192f19 --- /dev/null +++ b/project/data_analysis/paramfiles/della/global_dr6_v4.dict @@ -0,0 +1,155 @@ +surveys = ["dr6"] + +arrays_dr6 = ["pa4_f220", "pa5_f090", "pa5_f150", "pa6_f090", "pa6_f150"] + +map_dir = '/scratch/gpfs/ACT/dr6v4/maps/dr6v4_20230316/release/' +data_dir = '/tigress/zequnl/cmb/data/dr6/' + +deconvolve_pixwin = True +pixwin_dr6 = {"pix": 'CAR', "order": 0} +binning_file = data_dir + "binning/BIN_ACTPOL_50_4_SC_large_bin_at_low_ell" +niter = 0 +remove_mean = False +binned_mcm = False +lmax = 8500 +type = 'Dl' +write_splits_spectra = True +cov_T_E_only = False +multistep_path = data_dir +use_toeplitz_mcm = False +use_toeplitz_cov = True +use_beam_covariance = True + +#window parameters + +ps_mask_dr6_pa4_f150 = data_dir + 'masks/act_pts_mask_fluxcut_15.0mJy_at150Ghz_rad_5.0_monster_dust_proj.fits' +ps_mask_dr6_pa4_f220 = data_dir + 'masks/act_pts_mask_fluxcut_15.0mJy_at150Ghz_rad_5.0_monster_dust_proj.fits' +ps_mask_dr6_pa5_f090 = data_dir + 'masks/act_pts_mask_fluxcut_15.0mJy_at150Ghz_rad_5.0_monster_dust_proj.fits' +ps_mask_dr6_pa5_f150 = data_dir + 'masks/act_pts_mask_fluxcut_15.0mJy_at150Ghz_rad_5.0_monster_dust_proj.fits' +ps_mask_dr6_pa6_f090 = data_dir + 'masks/act_pts_mask_fluxcut_15.0mJy_at150Ghz_rad_5.0_monster_dust_proj.fits' +ps_mask_dr6_pa6_f150 = data_dir + 'masks/act_pts_mask_fluxcut_15.0mJy_at150Ghz_rad_5.0_monster_dust_proj.fits' + +gal_mask_dr6_pa4_f150 = data_dir + "masks/mask_galactic_equatorial_car_halfarcmin_pixelmatch_proj.fits" +gal_mask_dr6_pa4_f220 = data_dir + "masks/mask_galactic_equatorial_car_halfarcmin_pixelmatch_proj.fits" +gal_mask_dr6_pa5_f090 = data_dir + "masks/mask_galactic_equatorial_car_halfarcmin_pixelmatch_proj.fits" +gal_mask_dr6_pa5_f150 = data_dir + "masks/mask_galactic_equatorial_car_halfarcmin_pixelmatch_proj.fits" +gal_mask_dr6_pa6_f090 = data_dir + "masks/mask_galactic_equatorial_car_halfarcmin_pixelmatch_proj.fits" +gal_mask_dr6_pa6_f150 = data_dir + "masks/mask_galactic_equatorial_car_halfarcmin_pixelmatch_proj.fits" + +apod_pts_source_degree = 0.3 +apod_survey_degree = 2 +skip_from_edges_degree = 1 +cross_link_threshold = 0.97 +n_med_ivar = 3 + +# kspace filter parameters +apply_kspace_filter = True +kspace_tf_path = "analytical" +k_filter_dr6 = {"type":"binary_cross","vk_mask":[-90, 90], "hk_mask":[-50, 50], "weighted":False} + +deconvolve_map_maker_tf_dr6 = False + +mm_tf_dr6_pa4_f150 = data_dir + "transfer_fcns/tf_unity.dat" +mm_tf_dr6_pa4_f220 = data_dir + "transfer_fcns/tf_unity.dat" +mm_tf_dr6_pa5_f090 = data_dir + "transfer_fcns/tf_unity.dat" +mm_tf_dr6_pa5_f150 = data_dir + "transfer_fcns/tf_unity.dat" +mm_tf_dr6_pa6_f090 = data_dir + "transfer_fcns/tf_unity.dat" +mm_tf_dr6_pa6_f150 = data_dir + "transfer_fcns/tf_unity.dat" + +# maps +n_splits_dr6 = 4 +src_free_maps_dr6 = True + +maps_dr6_pa4_f150 = [map_dir + 'cmb_night_pa4_f150_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] +maps_dr6_pa4_f220 = [map_dir + 'cmb_night_pa4_f220_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] +maps_dr6_pa5_f090 = [map_dir + 'cmb_night_pa5_f090_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] +maps_dr6_pa5_f150 = [map_dir + 'cmb_night_pa5_f150_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] +maps_dr6_pa6_f090 = [map_dir + 'cmb_night_pa6_f090_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] +maps_dr6_pa6_f150 = [map_dir + 'cmb_night_pa6_f150_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] + + +cal_dr6_pa4_f150 = 1.0072 +cal_dr6_pa4_f220 = 1.0340 +cal_dr6_pa5_f090 = 1.0219 +cal_dr6_pa5_f150 = 0.9877 +cal_dr6_pa6_f090 = 1.0182 +cal_dr6_pa6_f150 = 0.9699 + + +pol_eff_dr6_pa4_f150 = 0.9584 +pol_eff_dr6_pa4_f220 = 1.0 +pol_eff_dr6_pa5_f090 = 0.9646 +pol_eff_dr6_pa5_f150 = 0.9488 +pol_eff_dr6_pa6_f090 = 0.9789 +pol_eff_dr6_pa6_f150 = 0.9656 + + +passband_dir = data_dir + "passbands/" +do_bandpass_integration = True +freq_info_dr6_pa4_f150 = {"freq_tag": 150, "passband": passband_dir + "passband_dr6_pa4_f150.dat"} +freq_info_dr6_pa4_f220 = {"freq_tag": 220, "passband": passband_dir + "passband_dr6_pa4_f220.dat"} +freq_info_dr6_pa5_f090 = {"freq_tag": 90, "passband": passband_dir + "passband_dr6_pa5_f090.dat"} +freq_info_dr6_pa5_f150 = {"freq_tag": 150, "passband": passband_dir + "passband_dr6_pa5_f150.dat"} +freq_info_dr6_pa6_f090 = {"freq_tag": 90, "passband": passband_dir + "passband_dr6_pa6_f090.dat"} +freq_info_dr6_pa6_f150 = {"freq_tag": 150, "passband": passband_dir + "passband_dr6_pa6_f150.dat"} + + +beam_dr6_pa4_f150 = data_dir + 'beams/20230130_beams/coadd_pa4_f150_night_beam_tform_jitter_cmb.txt' +beam_dr6_pa4_f220 = data_dir + 'beams/20230130_beams/coadd_pa4_f220_night_beam_tform_jitter_cmb.txt' +beam_dr6_pa5_f090 = data_dir + 'beams/20230130_beams/coadd_pa5_f090_night_beam_tform_jitter_cmb.txt' +beam_dr6_pa5_f150 = data_dir + 'beams/20230130_beams/coadd_pa5_f150_night_beam_tform_jitter_cmb.txt' +beam_dr6_pa6_f090 = data_dir + 'beams/20230130_beams/coadd_pa6_f090_night_beam_tform_jitter_cmb.txt' +beam_dr6_pa6_f150 = data_dir + 'beams/20230130_beams/coadd_pa6_f150_night_beam_tform_jitter_cmb.txt' + +leakage_file_dir = data_dir + 'beams/20230130_beams/' + +leakage_beam_dr6_pa4_f150 = 'gamma_mp_uranus_pa4_f150.txt' +leakage_beam_dr6_pa4_f220 = 'gamma_mp_uranus_pa4_f220.txt' +leakage_beam_dr6_pa5_f090 = 'gamma_mp_uranus_pa5_f090.txt' +leakage_beam_dr6_pa5_f150 = 'gamma_mp_uranus_pa5_f150.txt' +leakage_beam_dr6_pa6_f090 = 'gamma_mp_uranus_pa6_f090.txt' +leakage_beam_dr6_pa6_f150 = 'gamma_mp_uranus_pa6_f150.txt' + + + + +window_T_dr6_pa4_f150 = "windows/window_dr6_pa4_f150.fits" +window_pol_dr6_pa4_f150 = "windows/window_dr6_pa4_f150.fits" + +window_T_dr6_pa4_f220 = "windows/window_dr6_pa4_f220.fits" +window_pol_dr6_pa4_f220 = "windows/window_dr6_pa4_f220.fits" + +window_T_dr6_pa5_f090 = "windows/window_dr6_pa5_f090.fits" +window_pol_dr6_pa5_f090 = "windows/window_dr6_pa5_f090.fits" + +window_T_dr6_pa5_f150 = "windows/window_dr6_pa5_f150.fits" +window_pol_dr6_pa5_f150 = "windows/window_dr6_pa5_f150.fits" + +window_T_dr6_pa6_f090 = "windows/window_dr6_pa6_f090.fits" +window_pol_dr6_pa6_f090 = "windows/window_dr6_pa6_f090.fits" + +window_T_dr6_pa6_f150 = "windows/window_dr6_pa6_f150.fits" +window_pol_dr6_pa6_f150 = "windows/window_dr6_pa6_f150.fits" + + +# best fit params (only used for sim generation and covariances computation) +cosmo_params = {"cosmomc_theta":0.0104085, "logA": 3.044, "ombh2": 0.02237, "omch2": 0.1200, "ns": 0.9649, "Alens": 1.0, "tau": 0.0544} +fg_norm = {"nu_0": 150.0, "ell_0": 3000, "T_CMB": 2.725} +fg_components = {'tt': ['tSZ_and_CIB', 'cibp', 'kSZ', 'radio', 'dust'], 'te': ['radio', 'dust'], 'ee': ['radio', 'dust'], 'bb': ['radio', 'dust'], 'tb': ['radio', 'dust'], 'eb': []} +fg_params = {"a_tSZ": 3.30, "a_kSZ": 1.60, "a_p": 6.90, "beta_p": 2.08, "a_c": 4.90, "beta_c": 2.20, "a_s": 3.10, "a_gtt": 8.7, "xi": 0.1, "T_d": 9.60, "a_gte": 0, "a_gtb": 0, "a_gee": 0, "a_gbb": 0, "a_pste": 0, "a_pstb": 0, "a_psee": 0, "a_psbb": 0} + +#sim +iStart = 0 +iStop = 39 +sim_alm_dtype = "complex64" +read_noise_alms_from_disk = False +noise_sim_type = "fdw" +noise_model_parameters = {"downgrade": 4, "mask_est_name": "dr6v3_20220316_baseline_union_mask", "mask_obs_name": "dr6v3_xlink_union_mask_0.001", "union_sources": "regular_20220316", "notes": "20220619"} + +#plot +range_TT = [10, 8000] +range_TE = [-150, 150] +range_ET = [-150, 150] +range_EE = [-20, 50] + +planck_data_dir = data_dir + "planck_data/" From f89edae56caa2080f5c80a333c61d27f223fe135 Mon Sep 17 00:00:00 2001 From: xzackli Date: Sun, 23 Jul 2023 14:00:25 -0400 Subject: [PATCH 03/13] add short queue options --- .../paramfiles/della/8core2hr.slurm | 19 +++++++++++++++++++ .../data_analysis/paramfiles/della/README.md | 11 ++++++++--- .../data_analysis/python/get_window_dr6.py | 5 +++++ 3 files changed, 32 insertions(+), 3 deletions(-) create mode 100644 project/data_analysis/paramfiles/della/8core2hr.slurm diff --git a/project/data_analysis/paramfiles/della/8core2hr.slurm b/project/data_analysis/paramfiles/della/8core2hr.slurm new file mode 100644 index 00000000..a62537b9 --- /dev/null +++ b/project/data_analysis/paramfiles/della/8core2hr.slurm @@ -0,0 +1,19 @@ +#! /bin/bash +# +#SBATCH --nodes=1 # node count +#SBATCH --ntasks-per-node=1 # total number of tasks per node +#SBATCH --cpus-per-task=8 # cpu-cores per task (>1 if multi-threaded tasks) +#SBATCH --mem-per-cpu=6G # memory per cpu-core (4G per cpu-core is default) +#SBATCH -t 2:00:00 # QOS is determined by time +#SBATCH -p physics +#SBATCH --output=slurmoutput/R-%j.out + +cd /tigress/zequnl/PSpipe/project/data_analysis +module load intel/2021.1 openmpi/intel-2021.1/4.1.0 anaconda3/2023.3 +export MPICC=$(which mpicc) +conda activate ps + +export OMP_NUM_THREADS=8 +export JULIA_NUM_THREADS=8 + +$1 diff --git a/project/data_analysis/paramfiles/della/README.md b/project/data_analysis/paramfiles/della/README.md index cdba9b9c..55d1308d 100644 --- a/project/data_analysis/paramfiles/della/README.md +++ b/project/data_analysis/paramfiles/della/README.md @@ -4,13 +4,16 @@ On Della, it's better to submit SLURM jobs via script instead of interactively. convenience, let's define a commonly used job script, ```bash -alias 8core1hr="sbatch paramfiles/della/8core1hr.slurm $1" +alias 8core1hr="sbatch paramfiles/della/8core1hr.slurm $1" # test QOS +alias 8core2hr="sbatch paramfiles/della/8core2hr.slurm $1" # short QOS ``` We need windows, ```bash -8core1hr "srun --ntasks 1 --cpus-per-task 8 --cpu-bind=cores python -u python/get_window_dr6.py paramfiles/della/global_dr6_v4.dict" +for i in {0..4}; do \ + 8core2hr "srun --ntasks 1 --cpus-per-task 8 --cpu-bind=cores python -u python/get_window_dr6.py paramfiles/della/global_dr6_v4.dict $i $((i+1))" +done ``` We need to generate some signal spectra for the covariance matrix, @@ -22,5 +25,7 @@ We need to generate some signal spectra for the covariance matrix, Next we'll need to compute the mode-coupling matrices, ```bash -8core1hr "srun --ntasks 1 --cpus-per-task 8 --cpu-bind=cores python -u python/get_mcm_and_bbl.py paramfiles/della/global_dr6_v4.dict 0 1" +for i in {0..15}; do \ + 8core2hr "srun --ntasks 1 --cpus-per-task 8 --cpu-bind=cores python -u python/get_mcm_and_bbl.py paramfiles/della/global_dr6_v4.dict $i $((i+1))" +done ``` diff --git a/project/data_analysis/python/get_window_dr6.py b/project/data_analysis/python/get_window_dr6.py index 4e984b72..2e468ea1 100644 --- a/project/data_analysis/python/get_window_dr6.py +++ b/project/data_analysis/python/get_window_dr6.py @@ -66,6 +66,11 @@ def create_crosslink_mask(xlink_map, cross_link_threshold): so_mpi.init(True) subtasks = so_mpi.taskrange(imin=0, imax=n_wins - 1) + +if len(sys.argv) == 4: + log.info(f"computing only the windows : {int(sys.argv[2])}:{int(sys.argv[3])}") + subtasks = subtasks[int(sys.argv[2]):int(sys.argv[3])] + for task in subtasks: task = int(task) sv, ar = sv_list[task], ar_list[task] From 5a33a24adae5df46fd6475e334f88adec127a8c4 Mon Sep 17 00:00:00 2001 From: xzackli Date: Wed, 26 Jul 2023 23:26:41 -0400 Subject: [PATCH 04/13] add slurm features to alms and gaussian covmat --- .../paramfiles/della/10core2hr.slurm | 19 +++++++++++++++++++ project/data_analysis/python/get_alms.py | 5 +++++ .../python/get_covariance_blocks.py | 5 +++++ 3 files changed, 29 insertions(+) create mode 100644 project/data_analysis/paramfiles/della/10core2hr.slurm diff --git a/project/data_analysis/paramfiles/della/10core2hr.slurm b/project/data_analysis/paramfiles/della/10core2hr.slurm new file mode 100644 index 00000000..0e3206dc --- /dev/null +++ b/project/data_analysis/paramfiles/della/10core2hr.slurm @@ -0,0 +1,19 @@ +#! /bin/bash +# +#SBATCH --nodes=1 # node count +#SBATCH --ntasks-per-node=1 # total number of tasks per node +#SBATCH --cpus-per-task=10 # cpu-cores per task (>1 if multi-threaded tasks) +#SBATCH --mem-per-cpu=8G # memory per cpu-core (4G per cpu-core is default) +#SBATCH -t 2:00:00 # QOS is determined by time +#SBATCH -p physics +#SBATCH --output=slurmoutput/R-%j.out + +cd /tigress/zequnl/PSpipe/project/data_analysis +module load intel/2021.1 openmpi/intel-2021.1/4.1.0 anaconda3/2023.3 +export MPICC=$(which mpicc) +conda activate ps + +export OMP_NUM_THREADS=10 +export JULIA_NUM_THREADS=10 + +$1 diff --git a/project/data_analysis/python/get_alms.py b/project/data_analysis/python/get_alms.py index 08353a9e..0c8a8582 100644 --- a/project/data_analysis/python/get_alms.py +++ b/project/data_analysis/python/get_alms.py @@ -33,6 +33,11 @@ so_mpi.init(True) subtasks = so_mpi.taskrange(imin=0, imax=n_ar-1) +if len(sys.argv) == 4: + log.info(f"computing only the alms : {int(sys.argv[2])}:{int(sys.argv[3])}") + subtasks = subtasks[int(sys.argv[2]):int(sys.argv[3])] +log.info(subtasks) + for task in subtasks: task = int(task) sv, ar = sv_list[task], ar_list[task] diff --git a/project/data_analysis/python/get_covariance_blocks.py b/project/data_analysis/python/get_covariance_blocks.py index ab03df09..1611a1f4 100644 --- a/project/data_analysis/python/get_covariance_blocks.py +++ b/project/data_analysis/python/get_covariance_blocks.py @@ -79,7 +79,12 @@ log.info(f"number of covariance matrices to compute : {ncovs}") so_mpi.init(True) subtasks = so_mpi.taskrange(imin=0, imax=ncovs - 1) + +if len(sys.argv) == 4: + log.info(f"computing only the covariance matrices : {int(sys.argv[2])}:{int(sys.argv[3])}") + subtasks = subtasks[int(sys.argv[2]):int(sys.argv[3])] log.info(subtasks) + for task in subtasks: task = int(task) na, nb, nc, nd = na_list[task], nb_list[task], nc_list[task], nd_list[task] From c1bcbc2305e1c3b6c20c180e3b3168d249b4e42a Mon Sep 17 00:00:00 2001 From: xzackli Date: Wed, 26 Jul 2023 23:30:19 -0400 Subject: [PATCH 05/13] add more della configs --- .../data_analysis/paramfiles/della/README.md | 30 +++++++++++++++++-- .../paramfiles/della/global_dr6_v4.dict | 12 ++++---- 2 files changed, 33 insertions(+), 9 deletions(-) diff --git a/project/data_analysis/paramfiles/della/README.md b/project/data_analysis/paramfiles/della/README.md index 55d1308d..3f319d19 100644 --- a/project/data_analysis/paramfiles/della/README.md +++ b/project/data_analysis/paramfiles/della/README.md @@ -5,14 +5,14 @@ convenience, let's define a commonly used job script, ```bash alias 8core1hr="sbatch paramfiles/della/8core1hr.slurm $1" # test QOS -alias 8core2hr="sbatch paramfiles/della/8core2hr.slurm $1" # short QOS +alias 10core2hr="sbatch paramfiles/della/10core2hr.slurm $1" # short QOS ``` We need windows, ```bash for i in {0..4}; do \ - 8core2hr "srun --ntasks 1 --cpus-per-task 8 --cpu-bind=cores python -u python/get_window_dr6.py paramfiles/della/global_dr6_v4.dict $i $((i+1))" + 10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_window_dr6.py paramfiles/della/global_dr6_v4.dict $i $((i+1))" done ``` @@ -26,6 +26,30 @@ Next we'll need to compute the mode-coupling matrices, ```bash for i in {0..15}; do \ - 8core2hr "srun --ntasks 1 --cpus-per-task 8 --cpu-bind=cores python -u python/get_mcm_and_bbl.py paramfiles/della/global_dr6_v4.dict $i $((i+1))" + 10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_mcm_and_bbl.py paramfiles/della/global_dr6_v4.dict $i $((i+1))" +done +``` + +Next, we compute the alms of the maps. + +```bash +10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_alms.py paramfiles/della/global_dr6_v4.dict" +``` + +Now spectra, + +```bash +10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_spectra_from_alms.py paramfiles/della/global_dr6_v4.dict" +``` + +Now we compute the analytic covariances blocks assuming an isotropic noise model. + +```bash +10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_sq_windows_alms.py paramfiles/della/global_dr6_v4.dict" +``` + +```bash +for i in {0..0}; do \ + 10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_covariance_blocks.py paramfiles/della/global_dr6_v4.dict $i $((i+1))" done ``` diff --git a/project/data_analysis/paramfiles/della/global_dr6_v4.dict b/project/data_analysis/paramfiles/della/global_dr6_v4.dict index 04192f19..fed279e1 100644 --- a/project/data_analysis/paramfiles/della/global_dr6_v4.dict +++ b/project/data_analysis/paramfiles/della/global_dr6_v4.dict @@ -60,12 +60,12 @@ mm_tf_dr6_pa6_f150 = data_dir + "transfer_fcns/tf_unity.dat" n_splits_dr6 = 4 src_free_maps_dr6 = True -maps_dr6_pa4_f150 = [map_dir + 'cmb_night_pa4_f150_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] -maps_dr6_pa4_f220 = [map_dir + 'cmb_night_pa4_f220_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] -maps_dr6_pa5_f090 = [map_dir + 'cmb_night_pa5_f090_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] -maps_dr6_pa5_f150 = [map_dir + 'cmb_night_pa5_f150_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] -maps_dr6_pa6_f090 = [map_dir + 'cmb_night_pa6_f090_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] -maps_dr6_pa6_f150 = [map_dir + 'cmb_night_pa6_f150_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] +maps_dr6_pa4_f150 = [data_dir + 'src_free_maps/cmb_night_pa4_f150_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] +maps_dr6_pa4_f220 = [data_dir + 'src_free_maps/cmb_night_pa4_f220_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] +maps_dr6_pa5_f090 = [data_dir + 'src_free_maps/cmb_night_pa5_f090_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] +maps_dr6_pa5_f150 = [data_dir + 'src_free_maps/cmb_night_pa5_f150_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] +maps_dr6_pa6_f090 = [data_dir + 'src_free_maps/cmb_night_pa6_f090_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] +maps_dr6_pa6_f150 = [data_dir + 'src_free_maps/cmb_night_pa6_f150_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] cal_dr6_pa4_f150 = 1.0072 From cc06bba48bc224556203a82fb36224ecaec07cf9 Mon Sep 17 00:00:00 2001 From: xzackli Date: Fri, 4 Aug 2023 14:56:10 -0400 Subject: [PATCH 06/13] covmat script --- .../data_analysis/paramfiles/della/README.md | 9 +- .../paramfiles/della/global_dr6_v4.dict | 6 + .../python/get_split_covariance_aniso.py | 169 ++++++++++++++++++ 3 files changed, 183 insertions(+), 1 deletion(-) create mode 100644 project/data_analysis/python/get_split_covariance_aniso.py diff --git a/project/data_analysis/paramfiles/della/README.md b/project/data_analysis/paramfiles/della/README.md index 3f319d19..0fd707d3 100644 --- a/project/data_analysis/paramfiles/della/README.md +++ b/project/data_analysis/paramfiles/della/README.md @@ -42,6 +42,10 @@ Now spectra, 10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_spectra_from_alms.py paramfiles/della/global_dr6_v4.dict" ``` +```bash +10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/split_nulls/get_per_split_noise.py paramfiles/della/global_dr6_v4.dict" +``` + Now we compute the analytic covariances blocks assuming an isotropic noise model. ```bash @@ -49,7 +53,10 @@ Now we compute the analytic covariances blocks assuming an isotropic noise model ``` ```bash -for i in {0..0}; do \ +for i in {0..119}; do \ 10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_covariance_blocks.py paramfiles/della/global_dr6_v4.dict $i $((i+1))" done ``` + + +srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores --pty ipython diff --git a/project/data_analysis/paramfiles/della/global_dr6_v4.dict b/project/data_analysis/paramfiles/della/global_dr6_v4.dict index fed279e1..7ee4b869 100644 --- a/project/data_analysis/paramfiles/della/global_dr6_v4.dict +++ b/project/data_analysis/paramfiles/della/global_dr6_v4.dict @@ -67,6 +67,12 @@ maps_dr6_pa5_f150 = [data_dir + 'src_free_maps/cmb_night_pa5_f150_3pass_4way_set maps_dr6_pa6_f090 = [data_dir + 'src_free_maps/cmb_night_pa6_f090_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] maps_dr6_pa6_f150 = [data_dir + 'src_free_maps/cmb_night_pa6_f150_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] +ivar_dr6_pa4_f150 = [map_dir + 'cmb_night_pa4_f150_3pass_4way_set%d_ivar.fits' % (i) for i in range(4)] +ivar_dr6_pa4_f220 = [map_dir + 'cmb_night_pa4_f220_3pass_4way_set%d_ivar.fits' % (i) for i in range(4)] +ivar_dr6_pa5_f090 = [map_dir + 'cmb_night_pa5_f090_3pass_4way_set%d_ivar.fits' % (i) for i in range(4)] +ivar_dr6_pa5_f150 = [map_dir + 'cmb_night_pa5_f150_3pass_4way_set%d_ivar.fits' % (i) for i in range(4)] +ivar_dr6_pa6_f090 = [map_dir + 'cmb_night_pa6_f090_3pass_4way_set%d_ivar.fits' % (i) for i in range(4)] +ivar_dr6_pa6_f150 = [map_dir + 'cmb_night_pa6_f150_3pass_4way_set%d_ivar.fits' % (i) for i in range(4)] cal_dr6_pa4_f150 = 1.0072 cal_dr6_pa4_f220 = 1.0340 diff --git a/project/data_analysis/python/get_split_covariance_aniso.py b/project/data_analysis/python/get_split_covariance_aniso.py new file mode 100644 index 00000000..0a49a45d --- /dev/null +++ b/project/data_analysis/python/get_split_covariance_aniso.py @@ -0,0 +1,169 @@ +""" +This script compute the analytical covariance matrix elements +between split power spectra +""" +import sys +import numpy as np +from pspipe_utils import best_fits, log +from pspy import pspy_utils, so_cov, so_dict, so_map, so_mcm, so_mpi, so_spectra +from itertools import combinations_with_replacement as cwr +from itertools import combinations +from pspipe_utils import best_fits, pspipe_list + +d = so_dict.so_dict() +d.read_from_file(sys.argv[1]) + +log = log.get_logger(**d) + +windows_dir = "windows" +mcms_dir = "mcms" +spectra_dir = "spectra" +noise_dir = "split_noise" +cov_dir = "split_covariances" +bestfit_dir = "best_fits" +sq_win_alms_dir = "sq_win_alms" + +pspy_utils.create_directory(cov_dir) +surveys = d["surveys"] +binning_file = d["binning_file"] +lmax = d["lmax"] +niter = d["niter"] +binned_mcm = d["binned_mcm"] +apply_kspace_filter = d["apply_kspace_filter"] +cov_T_E_only = d["cov_T_E_only"] + +spectra = ["TT", "TE", "TB", "ET", "BT", "EE", "EB", "BE", "BB"] +spin_pairs = ["spin0xspin0", "spin0xspin2", "spin2xspin0", "spin2xspin2"] + + +# l_cmb, cmb_dict = best_fits.cmb_dict_from_file(bestfit_dir + "/cmb.dat", lmax, spectra) + +arrays = {sv: d[f"arrays_{sv}"] for sv in surveys} +n_splits = {sv: d[f"n_splits_{sv}"] for sv in surveys} +array_list = [f"{sv}_{ar}" for sv in surveys for ar in arrays[sv]] + +spec_name_list = pspipe_list.get_spec_name_list(d) +sv_array_list = [f"{sv}_{ar}" for sv in surveys for ar in arrays[sv]] +l_fg, fg_dict = best_fits.fg_dict_from_files(bestfit_dir + "/fg_{}x{}.dat", sv_array_list, lmax, spectra) + +l_cmb, cmb_dict = best_fits.cmb_dict_from_file(bestfit_dir + "/cmb.dat", lmax, spectra) +l, ps_all = best_fits.get_all_best_fit(spec_name_list, + l_cmb, + cmb_dict, + fg_dict, + spectra) + +noise_dict = {} +f_name_noise = f"{noise_dir}/Dl_" + "{}x{}_{}_noise_model.dat" +for sv in surveys: + for ar in arrays[sv]: + split_list = {sv: [f"{ar}_{i}" for i in range(n_splits[sv])]} + _, nlth = best_fits.noise_dict_from_files(f_name_noise, [sv], split_list, lmax=d["lmax"],spectra=spectra) + noise_dict.update(nlth) + + +bl_dict = {} +for sv in surveys: + for ar in arrays[sv]: + l_beam, bl_dict[sv, ar] = pspy_utils.read_beam_file(d[f"beam_{sv}_{ar}"]) + id_beam = np.where((l_beam >= 2) & (l_beam < lmax)) + bl_dict[sv, ar] = bl_dict[sv, ar][id_beam] + +cov_name = [] +for sv in surveys: + for ar in arrays[sv]: + for id1, xsplit1 in enumerate(combinations(range(n_splits[sv]), 2)): + for id2, xsplit2 in enumerate(combinations(range(n_splits[sv]), 2)): + if id1 > id2: continue + + + for pol in ("T",): + + na = f"{sv}&{ar}&{xsplit1[0]}&{pol}" + nb = f"{sv}&{ar}&{xsplit1[1]}&{pol}" + nc = f"{sv}&{ar}&{xsplit2[0]}&{pol}" + nd = f"{sv}&{ar}&{xsplit2[1]}&{pol}" + + cov_name.append((na, nb, nc, nd)) + +ncovs = len(cov_name) + + +log.info(f"number of covariance matrices to compute : {ncovs}") + +so_mpi.init(True) +subtasks = so_mpi.taskrange(imin=0, imax=ncovs - 1) + +if len(sys.argv) == 4: + log.info(f"computing only the covariance matrices : {int(sys.argv[2])}:{int(sys.argv[3])}") + subtasks = subtasks[int(sys.argv[2]):int(sys.argv[3])] +log.info(subtasks) + +for task in subtasks: + na, nb, nc, nd = cov_name[task] + + + sv_a, ar_a, split_a, pol_a = na.split("&") + sv_b, ar_b, split_b, pol_b = nb.split("&") + sv_c, ar_c, split_c, pol_c = nc.split("&") + sv_d, ar_d, split_d, pol_d = nd.split("&") + + na = f"{sv_a}&{ar_a}" + nb = f"{sv_b}&{ar_b}" + nc = f"{sv_c}&{ar_c}" + nd = f"{sv_d}&{ar_d}" + + na_r, nb_r, nc_r, nd_r = na.replace("&", "_"), nb.replace("&", "_"), nc.replace("&", "_"), nd.replace("&", "_") + log.info(f"[task] cov element ({na_r} x {nb_r}, {nc_r} x {nd_r}) {split_a}{split_b}{split_c}{split_d} {pol_a}{pol_b}{pol_c}{pol_d}") + + splits = [split_a, split_b, split_c, split_d] + survey_id = [pol_a + "a", pol_b + "b", pol_c + "c", pol_d + "d"] + splitname2splitindex = dict(zip(splits, range(len(splits)))) + + survey_combo = sv_a, sv_b, sv_c, sv_d + array_combo = ar_a, ar_b, ar_c, ar_d + + win, var = {}, {} + white_noise = {} + for splitname1, id1, sv1, ar1 in zip(splits, survey_id, survey_combo, array_combo): + # log.info(f"{name1}, {id1}, {sv1}, {ar1}") + split_index = splitname2splitindex[splitname1] + ivar_fname = d[f"ivar_{sv1}_{ar1}"][split_index] + ivar1 = so_map.read_map(ivar_fname).data + var1 = np.reciprocal(ivar1,where=(ivar1!=0)) + maskpol = "T" if id1[0] == "T" else "pol" + win[id1] = so_map.read_map(d[f"window_{maskpol}_{sv1}_{ar1}"]) + white_noise[id1] = so_cov.measure_white_noise_level(var1, win[id1].data) + var[id1] = so_cov.make_weighted_variance_map(var1, win[id1]) + + + print(white_noise) + + + Clth_dict = {} + Rl_dict = {} + + for splitname1, id1, sv1, ar1 in zip(splits, survey_id, survey_combo, array_combo): + s1 = splitname1.replace("set", "") + X1 = id1[0] + ndl = noise_dict[sv1, (f"{ar1}_{s1}"), (f"{ar1}_{s1}")][X1 + X1] + ell = np.arange(2, lmax) + dlfac = (ell * (ell + 1) / (2 * np.pi)) + nl = ndl[:(lmax-2)] / dlfac + + Rl_dict[id1] = np.sqrt(nl / white_noise[id1]) + + for name2, id2, sv2, ar2 in zip(splits, survey_id, survey_combo, array_combo): + s2 = name2.replace("set", "") + X2 = id2[0] + # k = (f"{sv1}&{ar1}&{s1}", f"{sv2}&{ar2}&{s2}", X1+X2) + dlth = ps_all[f"{sv1}&{ar1}", f"{sv2}&{ar2}", X1+X2] + Clth_dict[id1 + id2] = dlth / dlfac[:(lmax-2)] + + + couplings = so_cov.generate_aniso_couplings_TTTT(survey_id, splits, win, var, lmax) + + cov_tttt = so_cov.coupled_cov_aniso_same_pol(survey_id, Clth_dict, Rl_dict, couplings) + + np.save(f"{cov_dir}/analytic_aniso_coupled_cov_{na_r}_{split_a}x{nb_r}_{split_b}_{nc_r}_{split_c}x{nd_r}_{split_d}.npy", + cov_tttt) From eca2f60df1361ffabfdaff55a21bf4420ff5be57 Mon Sep 17 00:00:00 2001 From: xzackli Date: Mon, 7 Aug 2023 16:05:23 -0400 Subject: [PATCH 07/13] TTTT aniso slurms --- .../data_analysis/paramfiles/della/README.md | 5 ++++- .../python/get_split_covariance_aniso.py | 19 +++++++++++++++---- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/project/data_analysis/paramfiles/della/README.md b/project/data_analysis/paramfiles/della/README.md index 0fd707d3..0d9c5d86 100644 --- a/project/data_analysis/paramfiles/della/README.md +++ b/project/data_analysis/paramfiles/della/README.md @@ -58,5 +58,8 @@ for i in {0..119}; do \ done ``` +For anisotropic covariances, -srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores --pty ipython +```bash +10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_split_covariance_aniso.py paramfiles/della/global_dr6_v4.dict 100 101" +``` diff --git a/project/data_analysis/python/get_split_covariance_aniso.py b/project/data_analysis/python/get_split_covariance_aniso.py index 0a49a45d..a09c4f7d 100644 --- a/project/data_analysis/python/get_split_covariance_aniso.py +++ b/project/data_analysis/python/get_split_covariance_aniso.py @@ -77,7 +77,7 @@ if id1 > id2: continue - for pol in ("T",): + for pol in ("T",): # TODO: add back E na = f"{sv}&{ar}&{xsplit1[0]}&{pol}" nb = f"{sv}&{ar}&{xsplit1[1]}&{pol}" @@ -101,8 +101,7 @@ for task in subtasks: na, nb, nc, nd = cov_name[task] - - + sv_a, ar_a, split_a, pol_a = na.split("&") sv_b, ar_b, split_b, pol_b = nb.split("&") sv_c, ar_c, split_c, pol_c = nc.split("&") @@ -114,6 +113,13 @@ nd = f"{sv_d}&{ar_d}" na_r, nb_r, nc_r, nd_r = na.replace("&", "_"), nb.replace("&", "_"), nc.replace("&", "_"), nd.replace("&", "_") + + try: mbb_inv_ab, Bbl_ab = so_mcm.read_coupling(prefix=f"{mcms_dir}/{na_r}x{nb_r}", spin_pairs=spin_pairs) + except: mbb_inv_ab, Bbl_ab = so_mcm.read_coupling(prefix=f"{mcms_dir}/{nb_r}x{na_r}", spin_pairs=spin_pairs) + + try: mbb_inv_cd, Bbl_cd = so_mcm.read_coupling(prefix=f"{mcms_dir}/{nc_r}x{nd_r}", spin_pairs=spin_pairs) + except: mbb_inv_cd, Bbl_cd = so_mcm.read_coupling(prefix=f"{mcms_dir}/{nd_r}x{nc_r}", spin_pairs=spin_pairs) + log.info(f"[task] cov element ({na_r} x {nb_r}, {nc_r} x {nd_r}) {split_a}{split_b}{split_c}{split_d} {pol_a}{pol_b}{pol_c}{pol_d}") splits = [split_a, split_b, split_c, split_d] @@ -137,7 +143,7 @@ var[id1] = so_cov.make_weighted_variance_map(var1, win[id1]) - print(white_noise) + log.info(white_noise) Clth_dict = {} @@ -167,3 +173,8 @@ np.save(f"{cov_dir}/analytic_aniso_coupled_cov_{na_r}_{split_a}x{nb_r}_{split_b}_{nc_r}_{split_c}x{nd_r}_{split_d}.npy", cov_tttt) + + full_analytic_cov = mbb_inv_ab["spin0xspin0"] @ cov_tttt @ mbb_inv_cd["spin0xspin0"].T + analytic_cov = so_cov.bin_mat(full_analytic_cov, binning_file, lmax) + np.save(f"{cov_dir}/analytic_aniso_cov_{na_r}_{split_a}x{nb_r}_{split_b}_{nc_r}_{split_c}x{nd_r}_{split_d}.npy", + analytic_cov) From af593a67000269f8f5a9b3fce08c55f27d7ee738 Mon Sep 17 00:00:00 2001 From: Zachary Atkins Date: Wed, 16 Aug 2023 14:31:18 -0400 Subject: [PATCH 08/13] first analytic covariance comparison commit: moving to new PSpipe project (and adding vscode workspace to gitignore of PSpipe) --- .gitignore | 3 + .../ana_cov_comp/paramfiles/10core2hr.slurm | 23 ++ .../paramfiles/1physicsnode.slurm | 20 ++ .../ana_cov_comp/paramfiles/8core1hr.slurm | 23 ++ .../ana_cov_comp/paramfiles/8core2hr.slurm | 23 ++ project/ana_cov_comp/paramfiles/README.md | 64 +++++ .../paramfiles/global_dr6_v4.dict | 170 ++++++++++++ project/ana_cov_comp/python/get_window_dr6.py | 245 ++++++++++++++++++ 8 files changed, 571 insertions(+) create mode 100644 project/ana_cov_comp/paramfiles/10core2hr.slurm create mode 100644 project/ana_cov_comp/paramfiles/1physicsnode.slurm create mode 100644 project/ana_cov_comp/paramfiles/8core1hr.slurm create mode 100644 project/ana_cov_comp/paramfiles/8core2hr.slurm create mode 100644 project/ana_cov_comp/paramfiles/README.md create mode 100644 project/ana_cov_comp/paramfiles/global_dr6_v4.dict create mode 100644 project/ana_cov_comp/python/get_window_dr6.py diff --git a/.gitignore b/.gitignore index 67d71e98..8a3dfaaa 100644 --- a/.gitignore +++ b/.gitignore @@ -106,3 +106,6 @@ venv.bak/ # mypy .mypy_cache/ + +# GUI +*.code-workspace \ No newline at end of file diff --git a/project/ana_cov_comp/paramfiles/10core2hr.slurm b/project/ana_cov_comp/paramfiles/10core2hr.slurm new file mode 100644 index 00000000..63f31561 --- /dev/null +++ b/project/ana_cov_comp/paramfiles/10core2hr.slurm @@ -0,0 +1,23 @@ +#! /bin/bash +# +#SBATCH --nodes=1 # node count +#SBATCH --ntasks-per-node=1 # total number of tasks per node +#SBATCH --cpus-per-task=10 # cpu-cores per task (>1 if multi-threaded tasks) +#SBATCH --mem-per-cpu=8G # memory per cpu-core (4G per cpu-core is default) +#SBATCH -t 2:00:00 # QOS is determined by time +#SBATCH -p physics +#SBATCH --output=slurmoutput/R-%j.out + +cd /home/zatkins/repos/simonsobs/PSpipe/project/data_analysis + +### set environment ### +module purge +module load cmb-della8/220426 + +# activate conda +conda activate pspy-della8 + +export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK +export JULIA_NUM_THREADS=$SLURM_CPUS_PER_TASK + +$1 diff --git a/project/ana_cov_comp/paramfiles/1physicsnode.slurm b/project/ana_cov_comp/paramfiles/1physicsnode.slurm new file mode 100644 index 00000000..27170816 --- /dev/null +++ b/project/ana_cov_comp/paramfiles/1physicsnode.slurm @@ -0,0 +1,20 @@ +#! /bin/bash +# +#SBATCH --nodes=1 # node count +#SBATCH --ntasks-per-node=1 # total number of tasks per node +#SBATCH -p physics +#SBATCH --output=slurmoutput/R-%j.out + +cd /home/zatkins/repos/simonsobs/PSpipe/project/data_analysis + +### set environment ### +module purge +module load cmb-della8/220426 + +# activate conda +conda activate pspy-della8 + +export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK +export JULIA_NUM_THREADS=$SLURM_CPUS_PER_TASK + +$1 diff --git a/project/ana_cov_comp/paramfiles/8core1hr.slurm b/project/ana_cov_comp/paramfiles/8core1hr.slurm new file mode 100644 index 00000000..8a132827 --- /dev/null +++ b/project/ana_cov_comp/paramfiles/8core1hr.slurm @@ -0,0 +1,23 @@ +#! /bin/bash +# +#SBATCH --nodes=1 # node count +#SBATCH --ntasks-per-node=1 # total number of tasks per node +#SBATCH --cpus-per-task=8 # cpu-cores per task (>1 if multi-threaded tasks) +#SBATCH --mem-per-cpu=6G # memory per cpu-core (4G per cpu-core is default) +#SBATCH -t 1:00:00 +#SBATCH -p physics +#SBATCH --output=slurmoutput/R-%j.out + +cd /home/zatkins/repos/simonsobs/PSpipe/project/data_analysis + +### set environment ### +module purge +module load cmb-della8/220426 + +# activate conda +conda activate pspy-della8 + +export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK +export JULIA_NUM_THREADS=$SLURM_CPUS_PER_TASK + +$1 diff --git a/project/ana_cov_comp/paramfiles/8core2hr.slurm b/project/ana_cov_comp/paramfiles/8core2hr.slurm new file mode 100644 index 00000000..ca5f0c7b --- /dev/null +++ b/project/ana_cov_comp/paramfiles/8core2hr.slurm @@ -0,0 +1,23 @@ +#! /bin/bash +# +#SBATCH --nodes=1 # node count +#SBATCH --ntasks-per-node=1 # total number of tasks per node +#SBATCH --cpus-per-task=8 # cpu-cores per task (>1 if multi-threaded tasks) +#SBATCH --mem-per-cpu=6G # memory per cpu-core (4G per cpu-core is default) +#SBATCH -t 2:00:00 # QOS is determined by time +#SBATCH -p physics +#SBATCH --output=slurmoutput/R-%j.out + +cd /home/zatkins/repos/simonsobs/PSpipe/project/data_analysis + +### set environment ### +module purge +module load cmb-della8/220426 + +# activate conda +conda activate pspy-della8 + +export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK +export JULIA_NUM_THREADS=$SLURM_CPUS_PER_TASK + +$1 diff --git a/project/ana_cov_comp/paramfiles/README.md b/project/ana_cov_comp/paramfiles/README.md new file mode 100644 index 00000000..7e7b1b09 --- /dev/null +++ b/project/ana_cov_comp/paramfiles/README.md @@ -0,0 +1,64 @@ + + +On Della, it's better to submit SLURM jobs via script instead of interactively. For +convenience, let's define a commonly used job script, + +```bash +alias 1pn="sbatch /home/zatkins/repos/simonsobs/PSpipe/project/data_analysis/paramfiles/della/1physicsnode.slurm $1" +``` + +We need windows. Note, this will generate "PSpipe" style windows. A particular dict file may load other, pre-generated windows. + +```bash +for i in {0..4}; do \ + 1pn "srun --cpus-per-task 1 --mem 48G --time 20:00 python -u python/get_window_dr6.py paramfiles/della/global_dr6_v4.dict $i $((i+1))" +done +``` + +We need to generate some signal spectra for the covariance matrix, + +```bash +8core1hr "srun --ntasks 1 --cpus-per-task 8 --cpu-bind=cores python -u python/get_best_fit_mflike.py paramfiles/della/global_dr6_v4.dict" +``` + +Next we'll need to compute the mode-coupling matrices, + +```bash +for i in {0..15}; do \ + 10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_mcm_and_bbl.py paramfiles/della/global_dr6_v4.dict $i $((i+1))" +done +``` + +Next, we compute the alms of the maps. + +```bash +10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_alms.py paramfiles/della/global_dr6_v4.dict" +``` + +Now spectra, + +```bash +10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_spectra_from_alms.py paramfiles/della/global_dr6_v4.dict" +``` + +```bash +10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/split_nulls/get_per_split_noise.py paramfiles/della/global_dr6_v4.dict" +``` + +Now we compute the analytic covariances blocks assuming an isotropic noise model. + +```bash +10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_sq_windows_alms.py paramfiles/della/global_dr6_v4.dict" +``` + +```bash +for i in {0..119}; do \ + 10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_covariance_blocks.py paramfiles/della/global_dr6_v4.dict $i $((i+1))" +done +``` + +For anisotropic covariances, + +```bash +10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_split_covariance_aniso.py paramfiles/della/global_dr6_v4.dict 100 101" +``` diff --git a/project/ana_cov_comp/paramfiles/global_dr6_v4.dict b/project/ana_cov_comp/paramfiles/global_dr6_v4.dict new file mode 100644 index 00000000..972ab63a --- /dev/null +++ b/project/ana_cov_comp/paramfiles/global_dr6_v4.dict @@ -0,0 +1,170 @@ +surveys = ["dr6"] + +arrays_dr6 = ["pa4_f220", "pa5_f090", "pa5_f150", "pa6_f090", "pa6_f150"] + +map_dir = '/scratch/gpfs/ACT/dr6v4/maps/dr6v4_20230316/release/' +# data_dir = '/tigress/zequnl/cmb/data/dr6/' +data_dir = '/scratch/gpfs/zatkins/data/simonsobs/PSpipe/project/data_analysis/ana_cov_comp/' +window_dir = data_dir + 'windows' + +deconvolve_pixwin = True +pixwin_dr6 = {"pix": 'CAR', "order": 0} +binning_file = data_dir + "binning/BIN_ACTPOL_50_4_SC_large_bin_at_low_ell" +niter = 0 +remove_mean = False +binned_mcm = False +lmax = 8500 +type = 'Dl' +write_splits_spectra = True +cov_T_E_only = False +multistep_path = data_dir +use_toeplitz_mcm = False +use_toeplitz_cov = True +use_beam_covariance = True + +#window parameters + +ps_mask_dr6_pa4_f150 = data_dir + 'masks/act_pts_mask_fluxcut_15.0mJy_at150Ghz_rad_5.0_monster_dust_proj.fits' +ps_mask_dr6_pa4_f220 = data_dir + 'masks/act_pts_mask_fluxcut_15.0mJy_at150Ghz_rad_5.0_monster_dust_proj.fits' +ps_mask_dr6_pa5_f090 = data_dir + 'masks/act_pts_mask_fluxcut_15.0mJy_at150Ghz_rad_5.0_monster_dust_proj.fits' +ps_mask_dr6_pa5_f150 = data_dir + 'masks/act_pts_mask_fluxcut_15.0mJy_at150Ghz_rad_5.0_monster_dust_proj.fits' +ps_mask_dr6_pa6_f090 = data_dir + 'masks/act_pts_mask_fluxcut_15.0mJy_at150Ghz_rad_5.0_monster_dust_proj.fits' +ps_mask_dr6_pa6_f150 = data_dir + 'masks/act_pts_mask_fluxcut_15.0mJy_at150Ghz_rad_5.0_monster_dust_proj.fits' + +gal_mask_dr6_pa4_f150 = data_dir + "masks/mask_galactic_equatorial_car_halfarcmin_pixelmatch_proj.fits" +gal_mask_dr6_pa4_f220 = data_dir + "masks/mask_galactic_equatorial_car_halfarcmin_pixelmatch_proj.fits" +gal_mask_dr6_pa5_f090 = data_dir + "masks/mask_galactic_equatorial_car_halfarcmin_pixelmatch_proj.fits" +gal_mask_dr6_pa5_f150 = data_dir + "masks/mask_galactic_equatorial_car_halfarcmin_pixelmatch_proj.fits" +gal_mask_dr6_pa6_f090 = data_dir + "masks/mask_galactic_equatorial_car_halfarcmin_pixelmatch_proj.fits" +gal_mask_dr6_pa6_f150 = data_dir + "masks/mask_galactic_equatorial_car_halfarcmin_pixelmatch_proj.fits" + +coord_mask_dr6_pa4_f150 = None +coord_mask_dr6_pa4_f220 = None +coord_mask_dr6_pa5_f090 = None +coord_mask_dr6_pa5_f150 = None +coord_mask_dr6_pa6_f090 = None +coord_mask_dr6_pa6_f150 = None + + +apod_pts_source_degree = 0.3 +apod_survey_degree = 2 +edge_skip_rescale = 1 +cross_link_threshold = 0.97 +n_med_ivar = 3 + +# kspace filter parameters +apply_kspace_filter = True +kspace_tf_path = "analytical" +k_filter_dr6 = {"type":"binary_cross","vk_mask":[-90, 90], "hk_mask":[-50, 50], "weighted":False} + +deconvolve_map_maker_tf_dr6 = False + +mm_tf_dr6_pa4_f150 = data_dir + "transfer_fcns/tf_unity.dat" +mm_tf_dr6_pa4_f220 = data_dir + "transfer_fcns/tf_unity.dat" +mm_tf_dr6_pa5_f090 = data_dir + "transfer_fcns/tf_unity.dat" +mm_tf_dr6_pa5_f150 = data_dir + "transfer_fcns/tf_unity.dat" +mm_tf_dr6_pa6_f090 = data_dir + "transfer_fcns/tf_unity.dat" +mm_tf_dr6_pa6_f150 = data_dir + "transfer_fcns/tf_unity.dat" + +# maps +n_splits_dr6 = 4 +src_free_maps_dr6 = True + +maps_dr6_pa4_f150 = [map_dir + 'cmb_night_pa4_f150_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] +maps_dr6_pa4_f220 = [map_dir + 'cmb_night_pa4_f220_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] +maps_dr6_pa5_f090 = [map_dir + 'cmb_night_pa5_f090_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] +maps_dr6_pa5_f150 = [map_dir + 'cmb_night_pa5_f150_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] +maps_dr6_pa6_f090 = [map_dir + 'cmb_night_pa6_f090_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] +maps_dr6_pa6_f150 = [map_dir + 'cmb_night_pa6_f150_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] + +ivar_dr6_pa4_f150 = [map_dir + 'cmb_night_pa4_f150_3pass_4way_set%d_ivar.fits' % (i) for i in range(4)] +ivar_dr6_pa4_f220 = [map_dir + 'cmb_night_pa4_f220_3pass_4way_set%d_ivar.fits' % (i) for i in range(4)] +ivar_dr6_pa5_f090 = [map_dir + 'cmb_night_pa5_f090_3pass_4way_set%d_ivar.fits' % (i) for i in range(4)] +ivar_dr6_pa5_f150 = [map_dir + 'cmb_night_pa5_f150_3pass_4way_set%d_ivar.fits' % (i) for i in range(4)] +ivar_dr6_pa6_f090 = [map_dir + 'cmb_night_pa6_f090_3pass_4way_set%d_ivar.fits' % (i) for i in range(4)] +ivar_dr6_pa6_f150 = [map_dir + 'cmb_night_pa6_f150_3pass_4way_set%d_ivar.fits' % (i) for i in range(4)] + +cal_dr6_pa4_f150 = 1.0072 +cal_dr6_pa4_f220 = 1.0340 +cal_dr6_pa5_f090 = 1.0219 +cal_dr6_pa5_f150 = 0.9877 +cal_dr6_pa6_f090 = 1.0182 +cal_dr6_pa6_f150 = 0.9699 + + +pol_eff_dr6_pa4_f150 = 0.9584 +pol_eff_dr6_pa4_f220 = 1.0 +pol_eff_dr6_pa5_f090 = 0.9646 +pol_eff_dr6_pa5_f150 = 0.9488 +pol_eff_dr6_pa6_f090 = 0.9789 +pol_eff_dr6_pa6_f150 = 0.9656 + + +passband_dir = data_dir + "passbands/" +do_bandpass_integration = True +freq_info_dr6_pa4_f150 = {"freq_tag": 150, "passband": passband_dir + "passband_dr6_pa4_f150.dat"} +freq_info_dr6_pa4_f220 = {"freq_tag": 220, "passband": passband_dir + "passband_dr6_pa4_f220.dat"} +freq_info_dr6_pa5_f090 = {"freq_tag": 90, "passband": passband_dir + "passband_dr6_pa5_f090.dat"} +freq_info_dr6_pa5_f150 = {"freq_tag": 150, "passband": passband_dir + "passband_dr6_pa5_f150.dat"} +freq_info_dr6_pa6_f090 = {"freq_tag": 90, "passband": passband_dir + "passband_dr6_pa6_f090.dat"} +freq_info_dr6_pa6_f150 = {"freq_tag": 150, "passband": passband_dir + "passband_dr6_pa6_f150.dat"} + + +beam_dr6_pa4_f150 = data_dir + 'beams/20230130_beams/coadd_pa4_f150_night_beam_tform_jitter_cmb.txt' +beam_dr6_pa4_f220 = data_dir + 'beams/20230130_beams/coadd_pa4_f220_night_beam_tform_jitter_cmb.txt' +beam_dr6_pa5_f090 = data_dir + 'beams/20230130_beams/coadd_pa5_f090_night_beam_tform_jitter_cmb.txt' +beam_dr6_pa5_f150 = data_dir + 'beams/20230130_beams/coadd_pa5_f150_night_beam_tform_jitter_cmb.txt' +beam_dr6_pa6_f090 = data_dir + 'beams/20230130_beams/coadd_pa6_f090_night_beam_tform_jitter_cmb.txt' +beam_dr6_pa6_f150 = data_dir + 'beams/20230130_beams/coadd_pa6_f150_night_beam_tform_jitter_cmb.txt' + +leakage_file_dir = data_dir + 'beams/20230130_beams/' + +leakage_beam_dr6_pa4_f150 = 'gamma_mp_uranus_pa4_f150.txt' +leakage_beam_dr6_pa4_f220 = 'gamma_mp_uranus_pa4_f220.txt' +leakage_beam_dr6_pa5_f090 = 'gamma_mp_uranus_pa5_f090.txt' +leakage_beam_dr6_pa5_f150 = 'gamma_mp_uranus_pa5_f150.txt' +leakage_beam_dr6_pa6_f090 = 'gamma_mp_uranus_pa6_f090.txt' +leakage_beam_dr6_pa6_f150 = 'gamma_mp_uranus_pa6_f150.txt' + + + +window_T_dr6_pa4_f150 = data_dir + "windows/window_dr6_pa4_f150.fits" +window_pol_dr6_pa4_f150 = data_dir + "windows/window_dr6_pa4_f150.fits" + +window_T_dr6_pa4_f220 = data_dir + "windows/window_dr6_pa4_f220.fits" +window_pol_dr6_pa4_f220 = data_dir + "windows/window_dr6_pa4_f220.fits" + +window_T_dr6_pa5_f090 = data_dir + "windows/window_dr6_pa5_f090.fits" +window_pol_dr6_pa5_f090 = data_dir + "windows/window_dr6_pa5_f090.fits" + +window_T_dr6_pa5_f150 = data_dir + "windows/window_dr6_pa5_f150.fits" +window_pol_dr6_pa5_f150 = data_dir + "windows/window_dr6_pa5_f150.fits" + +window_T_dr6_pa6_f090 = data_dir + "windows/window_dr6_pa6_f090.fits" +window_pol_dr6_pa6_f090 = data_dir + "windows/window_dr6_pa6_f090.fits" + +window_T_dr6_pa6_f150 = data_dir + "windows/window_dr6_pa6_f150.fits" +window_pol_dr6_pa6_f150 = data_dir + "windows/window_dr6_pa6_f150.fits" + + +# best fit params (only used for sim generation and covariances computation) +cosmo_params = {"cosmomc_theta":0.0104085, "logA": 3.044, "ombh2": 0.02237, "omch2": 0.1200, "ns": 0.9649, "Alens": 1.0, "tau": 0.0544} +fg_norm = {"nu_0": 150.0, "ell_0": 3000, "T_CMB": 2.725} +fg_components = {'tt': ['tSZ_and_CIB', 'cibp', 'kSZ', 'radio', 'dust'], 'te': ['radio', 'dust'], 'ee': ['radio', 'dust'], 'bb': ['radio', 'dust'], 'tb': ['radio', 'dust'], 'eb': []} +fg_params = {"a_tSZ": 3.30, "a_kSZ": 1.60, "a_p": 6.90, "beta_p": 2.08, "a_c": 4.90, "beta_c": 2.20, "a_s": 3.10, "a_gtt": 8.7, "xi": 0.1, "T_d": 9.60, "a_gte": 0, "a_gtb": 0, "a_gee": 0, "a_gbb": 0, "a_pste": 0, "a_pstb": 0, "a_psee": 0, "a_psbb": 0} + +#sim +iStart = 0 +iStop = 39 +sim_alm_dtype = "complex64" +read_noise_alms_from_disk = False +noise_sim_type = "fdw" +noise_model_parameters = {"downgrade": 4, "mask_est_name": "dr6v3_20220316_baseline_union_mask", "mask_obs_name": "dr6v3_xlink_union_mask_0.001", "union_sources": "regular_20220316", "notes": "20220619"} + +#plot +range_TT = [10, 8000] +range_TE = [-150, 150] +range_ET = [-150, 150] +range_EE = [-20, 50] + +planck_data_dir = data_dir + "planck_data/" diff --git a/project/ana_cov_comp/python/get_window_dr6.py b/project/ana_cov_comp/python/get_window_dr6.py new file mode 100644 index 00000000..f2cf0303 --- /dev/null +++ b/project/ana_cov_comp/python/get_window_dr6.py @@ -0,0 +1,245 @@ +""" +This script create the window functions used in the PS computation +They consist of a point source mask, a galactic mask, a mask based on the amount of cross linking in the data, and a coordinate mask, note that +we also produce a window that include the pixel weighting. +The different masks are apodized. +We also produce a kspace-mask that will later be used for the kspace filtering operation, in order to remove the edges of the survey and avoid nasty pixels. +""" + +import sys + +import numpy as np +from pspipe_utils import log, pspipe_list +from pspy import pspy_utils, so_dict, so_map, so_mpi, so_window +from pixell import enmap + +def create_crosslink_mask(xlink_map, cross_link_threshold): + """ + Create a mask to remove pixels with a small amount of x-linking + We compute this using product from the map maker which assess the amount + of scan direction that hits each pixels in the map + the product have 3 component and we compute sqrt(Q **2 + U ** 2) / I by analogy with the polarisation fraction + A high value of this quantity means low level of xlinking, we mask all pixels above a given threshold + note that the mask is designed on a downgraded version of the maps, this is to avoid small scale structure in the mask + Parameters + ---------- + xlink_map: so_map + 3 component so_map assessing the direction of scan hitting each pixels + cross_link_threshold: float + a threshold above which region of the sky are considered not x-linked + """ + + xlink = so_map.read_map(xlink_map) + xlink_lowres = xlink.downgrade(32) + with np.errstate(invalid="ignore"): + x_mask = ( + np.sqrt(xlink_lowres.data[1] ** 2 + xlink_lowres.data[2] ** 2) / xlink_lowres.data[0] + ) + x_mask[np.isnan(x_mask)] = 1 + x_mask[x_mask >= cross_link_threshold] = 1 + x_mask[x_mask < cross_link_threshold] = 0 + x_mask = 1 - x_mask + + xlink_lowres.data[0] = x_mask + xlink = so_map.car2car(xlink_lowres, xlink) + x_mask = xlink.data[0].copy() + id = np.where(x_mask > 0.9) + x_mask[:] = 0 + x_mask[id] = 1 + return x_mask + +def apply_coordinate_mask(mask, coord): + """ + Apply a mask based on coordinate + + Parameters + ---------- + mask: so_map + the mask on which the coordinate mask will be applied + coord: list of list offloat + format is [[dec0, ra0], [dec1, ra1]] + we create a rectangle mask from this coordinate + in the convention assumed in this code + dec0, ra0 are the coordiante of the top left corner + dec1, ra1 are the coordinate of the bottom right corner + """ + + dec_ra = np.deg2rad(coord) + pix1 = mask.data.sky2pix(dec_ra[0]) + pix2 = mask.data.sky2pix(dec_ra[1]) + min_pix = np.min([pix1, pix2], axis=0).astype(int) + max_pix = np.max([pix1, pix2], axis=0).astype(int) + mask.data[min_pix[0] : max_pix[0], min_pix[1] : max_pix[1]] = 0 + + return mask + +d = so_dict.so_dict() +d.read_from_file(sys.argv[1]) +log = log.get_logger(**d) + + +surveys = d["surveys"] +# the apodisation length of the point source mask in degree +apod_pts_source_degree = d["apod_pts_source_degree"] +# the apodisation length of the final survey mask +apod_survey_degree = d["apod_survey_degree"] +# the threshold on the amount of cross linking to keep the data in +cross_link_threshold = d["cross_link_threshold"] +# pixel weight with inverse variance above n_ivar * median are set to ivar +# this ensure that the window is not totally dominated by few pixels with too much weight +n_med_ivar = d["n_med_ivar"] + +# we will skip the edges of the survey where the noise is very difficult to model, the default is to skip 0.5 degree for +# constructing the kspace mask and 2 degree for the final survey mask, this parameter can be used to rescale the default +rescale = d["edge_skip_rescale"] + +window_dir = d["window_dir"] + +pspy_utils.create_directory(window_dir) + +patch = None +if "patch" in d: + patch = so_map.read_map(d["patch"]) + +# here we list the different windows that need to be computed, we will then do a MPI loops over this list +n_wins, sv_list, ar_list = pspipe_list.get_arrays_list(d) + +log.info(f"number of windows to compute : {n_wins}") + +my_masks = {} + +so_mpi.init(True) +subtasks = so_mpi.taskrange(imin=0, imax=n_wins - 1) + +if len(sys.argv) == 4: + log.info(f"computing only the windows : {int(sys.argv[2])}:{int(sys.argv[3])}") + subtasks = subtasks[int(sys.argv[2]):int(sys.argv[3])] + +for task in subtasks: + task = int(task) + sv, ar = sv_list[task], ar_list[task] + + log.info(f"[{task}] create windows for '{sv}' survey and '{ar}' array...") + + gal_mask = so_map.read_map(d[f"gal_mask_{sv}_{ar}"]) + + my_masks["baseline"] = gal_mask.copy() + my_masks["baseline"].data[:] = 1 + + maps = d[f"maps_{sv}_{ar}"] + + ivar_all = gal_mask.copy() + ivar_all.data[:] = 0 + + # the first step is to iterate on the maps of a given array to identify pixels with zero values + # we will form a first survey mask as the union of all these split masks + # we also compute the average ivar map + + log.info(f"[{task}] create survey mask from ivar maps") + + for k, map in enumerate(maps): + if d[f"src_free_maps_{sv}"] == True: + index = map.find("map_srcfree.fits") + else: + index = map.find("map.fits") + + ivar_map = map[:index] + "ivar.fits" + ivar_map = so_map.read_map(ivar_map) + my_masks["baseline"].data[ivar_map.data[:] == 0.0] = 0.0 + ivar_all.data[:] += ivar_map.data[:] + + ivar_all.data[:] /= np.max(ivar_all.data[:]) + + # optionnaly apply an extra coordinate mask + if d[f"coord_mask_{sv}_{ar}"] is not None: + log.info(f"[{task}] apply coord mask") + my_masks["baseline"] = apply_coordinate_mask(my_masks["baseline"], d[f"coord_mask_{sv}_{ar}"]) + + + log.info(f"[{task}] compute distance to the edges and remove {0.5*rescale:.2f} degree from the edges") + # compute the distance to the nearest 0 + dist = so_window.get_distance(my_masks["baseline"], rmax = 4 * rescale * np.pi / 180) + # here we remove pixels near the edges in order to avoid pixels with very low hit count + # note the hardcoded 0.5 degree value that can be rescale with the edge_skip_rescale argument in the dictfile. + my_masks["baseline"].data[dist.data < 0.5 * rescale] = 0 + + # apply the galactic mask + log.info(f"[{task}] apply galactic mask") + my_masks["baseline"].data *= gal_mask.data + + # with this we can create the k space mask this will only be used for applying the kspace filter + log.info(f"[{task}] appodize kspace mask with {rescale:.2f} apod and write it to disk") + my_masks["kspace"] = my_masks["baseline"].copy() + + # we apodize this k space mask with a 1 degree apodisation + + my_masks["kspace"] = so_window.create_apodization(my_masks["kspace"], "C1", 1 * rescale, use_rmax=True) + my_masks["kspace"].data = my_masks["kspace"].data.astype(np.float32) + my_masks["kspace"].write_map(f"{window_dir}/kspace_mask_{sv}_{ar}.fits") + + # compare to the kspace mask we will skip for the nominal mask + # an additional 2 degrees to avoid ringing from the filter + + dist = so_window.get_distance(my_masks["baseline"], rmax = 4 * rescale * np.pi / 180) + my_masks["baseline"].data[dist.data < 2 * rescale] = 0 + + # now we create a xlink mask based on the xlink threshold + + log.info(f"[{task}] create xlink mask") + + my_masks["xlink"] = my_masks["baseline"].copy() + for k, map in enumerate(maps): + if d[f"src_free_maps_{sv}"] == True: + index = map.find("map_srcfree.fits") + else: + index = map.find("map.fits") + + xlink_map = map[:index] + "xlink.fits" + x_mask = create_crosslink_mask(xlink_map, cross_link_threshold) + my_masks["xlink"].data *= x_mask + + + for mask_type in ["baseline", "xlink"]: + + # optionnaly apply a patch mask + + if patch is not None: + log.info(f"[{task}] apply patch mask") + + my_masks[mask_type].data *= patch.data + + + log.info(f"[{task}] apodize mask with {apod_survey_degree:.2f} apod") + + # apodisation of the final mask + my_masks[mask_type] = so_window.create_apodization(my_masks[mask_type], "C1", apod_survey_degree, use_rmax=True) + + # create a point source mask and apodise it + + log.info(f"[{task}] include ps mask") + + ps_mask = so_map.read_map(d[f"ps_mask_{sv}_{ar}"]) + ps_mask = so_window.create_apodization(ps_mask, "C1", apod_pts_source_degree, use_rmax=True) + my_masks[mask_type].data *= ps_mask.data + + my_masks[mask_type].data = my_masks[mask_type].data.astype(np.float32) + my_masks[mask_type].write_map(f"{window_dir}/window_{sv}_{ar}_{mask_type}.fits") + + # we also make a version of the windows taking into account the ivar of the maps + log.info(f"[{task}] include ivar ") + + mask_type_w = mask_type + "_ivar" + + my_masks[mask_type_w] = my_masks[mask_type].copy() + id = np.where(ivar_all.data[:] * my_masks[mask_type_w].data[:] != 0) + med = np.median(ivar_all.data[id]) + ivar_all.data[ivar_all.data[:] > n_med_ivar * med] = n_med_ivar * med + my_masks[mask_type_w].data[:] *= ivar_all.data[:] + + my_masks[mask_type_w].data = my_masks[mask_type_w].data.astype(np.float32) + my_masks[mask_type_w].write_map(f"{window_dir}/window_w_{sv}_{ar}_{mask_type}.fits") + + for mask_type, mask in my_masks.items(): + log.info(f"[{task}] downgrade and plot {mask_type} ") + mask = mask.downgrade(4) + mask.plot(file_name=f"{window_dir}/{mask_type}_mask_{sv}_{ar}") From feec62a28e2e3afe06e28f46dca81319a2921ac8 Mon Sep 17 00:00:00 2001 From: Zachary Atkins Date: Thu, 17 Aug 2023 11:55:09 -0400 Subject: [PATCH 09/13] updated della files/scripts, through spectra --- project/ana_cov_comp/README.md | 68 +++++++ .../ana_cov_comp/paramfiles/10core2hr.slurm | 23 --- .../paramfiles/1physicsnode.slurm | 7 +- .../ana_cov_comp/paramfiles/8core1hr.slurm | 23 --- .../ana_cov_comp/paramfiles/8core2hr.slurm | 23 --- project/ana_cov_comp/paramfiles/README.md | 64 ------ ...ict => sims_dr6_v4_lmax5400_20230816.dict} | 122 +++++++----- project/ana_cov_comp/python/get_alms.py | 120 +++++++++++ .../python/get_best_fit_mflike.py | 145 ++++++++++++++ .../ana_cov_comp/python/get_mcm_and_bbl.py | 64 ++++++ .../ana_cov_comp/python/get_noise_model.py | 115 +++++++++++ .../python/get_per_split_noise.py | 165 +++++++++++++++ .../python/get_spectra_from_alms.py | 188 ++++++++++++++++++ 13 files changed, 939 insertions(+), 188 deletions(-) create mode 100644 project/ana_cov_comp/README.md delete mode 100644 project/ana_cov_comp/paramfiles/10core2hr.slurm delete mode 100644 project/ana_cov_comp/paramfiles/8core1hr.slurm delete mode 100644 project/ana_cov_comp/paramfiles/8core2hr.slurm delete mode 100644 project/ana_cov_comp/paramfiles/README.md rename project/ana_cov_comp/paramfiles/{global_dr6_v4.dict => sims_dr6_v4_lmax5400_20230816.dict} (76%) create mode 100644 project/ana_cov_comp/python/get_alms.py create mode 100644 project/ana_cov_comp/python/get_best_fit_mflike.py create mode 100644 project/ana_cov_comp/python/get_mcm_and_bbl.py create mode 100644 project/ana_cov_comp/python/get_noise_model.py create mode 100644 project/ana_cov_comp/python/get_per_split_noise.py create mode 100644 project/ana_cov_comp/python/get_spectra_from_alms.py diff --git a/project/ana_cov_comp/README.md b/project/ana_cov_comp/README.md new file mode 100644 index 00000000..87fea728 --- /dev/null +++ b/project/ana_cov_comp/README.md @@ -0,0 +1,68 @@ + + +On Della, it's better to submit SLURM jobs via script instead of interactively. For +convenience, let's define a commonly used job script, + +```bash +export sbatch1pn="/home/zatkins/repos/simonsobs/PSpipe/project/ana_cov_comp/paramfiles/1physicsnode.slurm" +``` + +We need windows. Note, this will generate "PSpipe" style windows. A particular dict file may load other, pre-generated windows. + +```bash +for i in {0..4}; do \ + sbatch --cpus-per-task 1 --mem 48G --time 20:00 --job-name get_window_dr6 ${sbatch1pn} "python -u python/get_window_dr6.py paramfiles/global_dr6_v4.dict $i $((i+1))" +done +``` + +We need to generate some signal spectra for the covariance matrix, + +```bash +sbatch --cpus-per-task 1 --mem 2G --time 2:00 --job-name get_best_fit_mflike ${sbatch1pn} "python -u python/get_best_fit_mflike.py paramfiles/sims_dr6_v4_lmax5400_20230816.dict" +``` + +Next we'll need to compute the mode-coupling matrices, + +```bash +for i in {0..21}; do \ + sbatch --cpus-per-task 10 --mem 28G --time 5:00 --job-name get_mcm_and_bbl ${sbatch1pn} "python -u python/get_mcm_and_bbl.py paramfiles/sims_dr6_v4_lmax5400_20230816.dict $i $((i+1))" +done +``` + +Next, we compute the alms of the maps. + +```bash +sbatch --cpus-per-task 10 --mem 28G --time 20:00 --job-name get_alms ${sbatch1pn} "python -u python/get_alms.py paramfiles/sims_dr6_v4_lmax5400_20230816.dict" +``` + +Now spectra, + +```bash +sbatch --cpus-per-task 1 --mem 28G --time 10:00 --job-name get_spectra_from_alms ${sbatch1pn} "python -u python/get_spectra_from_alms.py paramfiles/sims_dr6_v4_lmax5400_20230816.dict" +``` + +```bash +sbatch --cpus-per-task 1 --mem 1G --time 2:00 --job-name get_noise_model ${sbatch1pn} "python -u python/get_noise_model.py paramfiles/sims_dr6_v4_lmax5400_20230816.dict" +``` + +```bash +sbatch --cpus-per-task 1 --mem 4G --time 10:00 --job-name get_per_split_noise ${sbatch1pn} "python -u python/get_per_split_noise.py paramfiles/sims_dr6_v4_lmax5400_20230816.dict" +``` + +Now we compute the analytic covariances blocks assuming an isotropic noise model. + +```bash +10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_sq_windows_alms.py paramfiles/della/global_dr6_v4.dict" +``` + +```bash +for i in {0..119}; do \ + 10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_covariance_blocks.py paramfiles/della/global_dr6_v4.dict $i $((i+1))" +done +``` + +For anisotropic covariances, + +```bash +10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_split_covariance_aniso.py paramfiles/della/global_dr6_v4.dict 100 101" +``` diff --git a/project/ana_cov_comp/paramfiles/10core2hr.slurm b/project/ana_cov_comp/paramfiles/10core2hr.slurm deleted file mode 100644 index 63f31561..00000000 --- a/project/ana_cov_comp/paramfiles/10core2hr.slurm +++ /dev/null @@ -1,23 +0,0 @@ -#! /bin/bash -# -#SBATCH --nodes=1 # node count -#SBATCH --ntasks-per-node=1 # total number of tasks per node -#SBATCH --cpus-per-task=10 # cpu-cores per task (>1 if multi-threaded tasks) -#SBATCH --mem-per-cpu=8G # memory per cpu-core (4G per cpu-core is default) -#SBATCH -t 2:00:00 # QOS is determined by time -#SBATCH -p physics -#SBATCH --output=slurmoutput/R-%j.out - -cd /home/zatkins/repos/simonsobs/PSpipe/project/data_analysis - -### set environment ### -module purge -module load cmb-della8/220426 - -# activate conda -conda activate pspy-della8 - -export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK -export JULIA_NUM_THREADS=$SLURM_CPUS_PER_TASK - -$1 diff --git a/project/ana_cov_comp/paramfiles/1physicsnode.slurm b/project/ana_cov_comp/paramfiles/1physicsnode.slurm index 27170816..8bc275b9 100644 --- a/project/ana_cov_comp/paramfiles/1physicsnode.slurm +++ b/project/ana_cov_comp/paramfiles/1physicsnode.slurm @@ -4,8 +4,11 @@ #SBATCH --ntasks-per-node=1 # total number of tasks per node #SBATCH -p physics #SBATCH --output=slurmoutput/R-%j.out +#SBATCH --mail-type=begin # send email when job begins +#SBATCH --mail-type=end # send email when job ends +#SBATCH --mail-user=zatkins@princeton.edu -cd /home/zatkins/repos/simonsobs/PSpipe/project/data_analysis +cd /home/zatkins/repos/simonsobs/PSpipe/project/ana_cov_comp ### set environment ### module purge @@ -17,4 +20,4 @@ conda activate pspy-della8 export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK export JULIA_NUM_THREADS=$SLURM_CPUS_PER_TASK -$1 +srun $1 diff --git a/project/ana_cov_comp/paramfiles/8core1hr.slurm b/project/ana_cov_comp/paramfiles/8core1hr.slurm deleted file mode 100644 index 8a132827..00000000 --- a/project/ana_cov_comp/paramfiles/8core1hr.slurm +++ /dev/null @@ -1,23 +0,0 @@ -#! /bin/bash -# -#SBATCH --nodes=1 # node count -#SBATCH --ntasks-per-node=1 # total number of tasks per node -#SBATCH --cpus-per-task=8 # cpu-cores per task (>1 if multi-threaded tasks) -#SBATCH --mem-per-cpu=6G # memory per cpu-core (4G per cpu-core is default) -#SBATCH -t 1:00:00 -#SBATCH -p physics -#SBATCH --output=slurmoutput/R-%j.out - -cd /home/zatkins/repos/simonsobs/PSpipe/project/data_analysis - -### set environment ### -module purge -module load cmb-della8/220426 - -# activate conda -conda activate pspy-della8 - -export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK -export JULIA_NUM_THREADS=$SLURM_CPUS_PER_TASK - -$1 diff --git a/project/ana_cov_comp/paramfiles/8core2hr.slurm b/project/ana_cov_comp/paramfiles/8core2hr.slurm deleted file mode 100644 index ca5f0c7b..00000000 --- a/project/ana_cov_comp/paramfiles/8core2hr.slurm +++ /dev/null @@ -1,23 +0,0 @@ -#! /bin/bash -# -#SBATCH --nodes=1 # node count -#SBATCH --ntasks-per-node=1 # total number of tasks per node -#SBATCH --cpus-per-task=8 # cpu-cores per task (>1 if multi-threaded tasks) -#SBATCH --mem-per-cpu=6G # memory per cpu-core (4G per cpu-core is default) -#SBATCH -t 2:00:00 # QOS is determined by time -#SBATCH -p physics -#SBATCH --output=slurmoutput/R-%j.out - -cd /home/zatkins/repos/simonsobs/PSpipe/project/data_analysis - -### set environment ### -module purge -module load cmb-della8/220426 - -# activate conda -conda activate pspy-della8 - -export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK -export JULIA_NUM_THREADS=$SLURM_CPUS_PER_TASK - -$1 diff --git a/project/ana_cov_comp/paramfiles/README.md b/project/ana_cov_comp/paramfiles/README.md deleted file mode 100644 index 7e7b1b09..00000000 --- a/project/ana_cov_comp/paramfiles/README.md +++ /dev/null @@ -1,64 +0,0 @@ - - -On Della, it's better to submit SLURM jobs via script instead of interactively. For -convenience, let's define a commonly used job script, - -```bash -alias 1pn="sbatch /home/zatkins/repos/simonsobs/PSpipe/project/data_analysis/paramfiles/della/1physicsnode.slurm $1" -``` - -We need windows. Note, this will generate "PSpipe" style windows. A particular dict file may load other, pre-generated windows. - -```bash -for i in {0..4}; do \ - 1pn "srun --cpus-per-task 1 --mem 48G --time 20:00 python -u python/get_window_dr6.py paramfiles/della/global_dr6_v4.dict $i $((i+1))" -done -``` - -We need to generate some signal spectra for the covariance matrix, - -```bash -8core1hr "srun --ntasks 1 --cpus-per-task 8 --cpu-bind=cores python -u python/get_best_fit_mflike.py paramfiles/della/global_dr6_v4.dict" -``` - -Next we'll need to compute the mode-coupling matrices, - -```bash -for i in {0..15}; do \ - 10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_mcm_and_bbl.py paramfiles/della/global_dr6_v4.dict $i $((i+1))" -done -``` - -Next, we compute the alms of the maps. - -```bash -10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_alms.py paramfiles/della/global_dr6_v4.dict" -``` - -Now spectra, - -```bash -10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_spectra_from_alms.py paramfiles/della/global_dr6_v4.dict" -``` - -```bash -10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/split_nulls/get_per_split_noise.py paramfiles/della/global_dr6_v4.dict" -``` - -Now we compute the analytic covariances blocks assuming an isotropic noise model. - -```bash -10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_sq_windows_alms.py paramfiles/della/global_dr6_v4.dict" -``` - -```bash -for i in {0..119}; do \ - 10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_covariance_blocks.py paramfiles/della/global_dr6_v4.dict $i $((i+1))" -done -``` - -For anisotropic covariances, - -```bash -10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_split_covariance_aniso.py paramfiles/della/global_dr6_v4.dict 100 101" -``` diff --git a/project/ana_cov_comp/paramfiles/global_dr6_v4.dict b/project/ana_cov_comp/paramfiles/sims_dr6_v4_lmax5400_20230816.dict similarity index 76% rename from project/ana_cov_comp/paramfiles/global_dr6_v4.dict rename to project/ana_cov_comp/paramfiles/sims_dr6_v4_lmax5400_20230816.dict index 972ab63a..96c4c23c 100644 --- a/project/ana_cov_comp/paramfiles/global_dr6_v4.dict +++ b/project/ana_cov_comp/paramfiles/sims_dr6_v4_lmax5400_20230816.dict @@ -1,29 +1,49 @@ surveys = ["dr6"] -arrays_dr6 = ["pa4_f220", "pa5_f090", "pa5_f150", "pa6_f090", "pa6_f150"] +arrays_dr6 = ["pa4_f150", "pa4_f220", "pa5_f090", "pa5_f150", "pa6_f090", "pa6_f150"] -map_dir = '/scratch/gpfs/ACT/dr6v4/maps/dr6v4_20230316/release/' -# data_dir = '/tigress/zequnl/cmb/data/dr6/' data_dir = '/scratch/gpfs/zatkins/data/simonsobs/PSpipe/project/data_analysis/ana_cov_comp/' -window_dir = data_dir + 'windows' -deconvolve_pixwin = True -pixwin_dr6 = {"pix": 'CAR', "order": 0} +# mcms +mcms_dir = data_dir + 'mcms' + +binned_mcm = False +lmax = 5400 +use_toeplitz_mcm = False binning_file = data_dir + "binning/BIN_ACTPOL_50_4_SC_large_bin_at_low_ell" niter = 0 -remove_mean = False -binned_mcm = False -lmax = 8500 type = 'Dl' + +# alms +alms_dir = data_dir + 'alms' +apply_kspace_filter = False +k_filter_dr6 = {"type": "binary_cross", "vk_mask": [-90, 90], "hk_mask": [-50, 50], "weighted": False} +deconvolve_pixwin = True +pixwin_dr6 = {"pix": 'CAR', "order": 0} +remove_mean = False + +# spectra +spectra_dir = data_dir + 'spectra' +noise_model_dir = data_dir + 'noise_model' +split_noise_dir = data_dir + 'split_noise' + write_splits_spectra = True cov_T_E_only = False +kspace_tf_path = "analytical" +deconvolve_map_maker_tf_dr6 = False +mm_tf_dr6_pa4_f150 = data_dir + "transfer_fcns/tf_unity.dat" +mm_tf_dr6_pa4_f220 = data_dir + "transfer_fcns/tf_unity.dat" +mm_tf_dr6_pa5_f090 = data_dir + "transfer_fcns/tf_unity.dat" +mm_tf_dr6_pa5_f150 = data_dir + "transfer_fcns/tf_unity.dat" +mm_tf_dr6_pa6_f090 = data_dir + "transfer_fcns/tf_unity.dat" +mm_tf_dr6_pa6_f150 = data_dir + "transfer_fcns/tf_unity.dat" + +# multistep_path = data_dir -use_toeplitz_mcm = False -use_toeplitz_cov = True +use_toeplitz_cov = True use_beam_covariance = True -#window parameters - +# window parameters ps_mask_dr6_pa4_f150 = data_dir + 'masks/act_pts_mask_fluxcut_15.0mJy_at150Ghz_rad_5.0_monster_dust_proj.fits' ps_mask_dr6_pa4_f220 = data_dir + 'masks/act_pts_mask_fluxcut_15.0mJy_at150Ghz_rad_5.0_monster_dust_proj.fits' ps_mask_dr6_pa5_f090 = data_dir + 'masks/act_pts_mask_fluxcut_15.0mJy_at150Ghz_rad_5.0_monster_dust_proj.fits' @@ -45,28 +65,41 @@ coord_mask_dr6_pa5_f150 = None coord_mask_dr6_pa6_f090 = None coord_mask_dr6_pa6_f150 = None - apod_pts_source_degree = 0.3 apod_survey_degree = 2 edge_skip_rescale = 1 cross_link_threshold = 0.97 n_med_ivar = 3 -# kspace filter parameters -apply_kspace_filter = True -kspace_tf_path = "analytical" -k_filter_dr6 = {"type":"binary_cross","vk_mask":[-90, 90], "hk_mask":[-50, 50], "weighted":False} +window_dir = '/scratch/gpfs/zatkins/data/simonsobs/mnms/masks/effective_est/' -deconvolve_map_maker_tf_dr6 = False +window_T_dr6_pa4_f150 = window_dir + "pa4_baseline_effective_est_20230816.fits" +window_pol_dr6_pa4_f150 = window_dir + "pa4_baseline_effective_est_20230816.fits" +window_kspace_dr6_pa4_f150 = None -mm_tf_dr6_pa4_f150 = data_dir + "transfer_fcns/tf_unity.dat" -mm_tf_dr6_pa4_f220 = data_dir + "transfer_fcns/tf_unity.dat" -mm_tf_dr6_pa5_f090 = data_dir + "transfer_fcns/tf_unity.dat" -mm_tf_dr6_pa5_f150 = data_dir + "transfer_fcns/tf_unity.dat" -mm_tf_dr6_pa6_f090 = data_dir + "transfer_fcns/tf_unity.dat" -mm_tf_dr6_pa6_f150 = data_dir + "transfer_fcns/tf_unity.dat" +window_T_dr6_pa4_f220 = window_dir + "pa4_baseline_effective_est_20230816.fits" +window_pol_dr6_pa4_f220 = window_dir + "pa4_baseline_effective_est_20230816.fits" +window_kspace_dr6_pa4_f220 = None + +window_T_dr6_pa5_f090 = window_dir + "pa5_baseline_effective_est_20230816.fits" +window_pol_dr6_pa5_f090 = window_dir + "pa5_baseline_effective_est_20230816.fits" +window_kspace_dr6_pa5_f090 = None + +window_T_dr6_pa5_f150 = window_dir + "pa5_baseline_effective_est_20230816.fits" +window_pol_dr6_pa5_f150 = window_dir + "pa5_baseline_effective_est_20230816.fits" +window_kspace_dr6_pa5_f150 = None + +window_T_dr6_pa6_f090 = window_dir + "pa6_baseline_effective_est_20230816.fits" +window_pol_dr6_pa6_f090 = window_dir + "pa6_baseline_effective_est_20230816.fits" +window_kspace_dr6_pa6_f090 = None + +window_T_dr6_pa6_f150 = window_dir + "pa6_baseline_effective_est_20230816.fits" +window_pol_dr6_pa6_f150 = window_dir + "pa6_baseline_effective_est_20230816.fits" +window_kspace_dr6_pa6_f150 = None # maps +map_dir = '/scratch/gpfs/ACT/dr6v4/maps/dr6v4_20230316/release/' + n_splits_dr6 = 4 src_free_maps_dr6 = True @@ -91,7 +124,6 @@ cal_dr6_pa5_f150 = 0.9877 cal_dr6_pa6_f090 = 1.0182 cal_dr6_pa6_f150 = 0.9699 - pol_eff_dr6_pa4_f150 = 0.9584 pol_eff_dr6_pa4_f220 = 1.0 pol_eff_dr6_pa5_f090 = 0.9646 @@ -99,7 +131,7 @@ pol_eff_dr6_pa5_f150 = 0.9488 pol_eff_dr6_pa6_f090 = 0.9789 pol_eff_dr6_pa6_f150 = 0.9656 - +# passbands passband_dir = data_dir + "passbands/" do_bandpass_integration = True freq_info_dr6_pa4_f150 = {"freq_tag": 150, "passband": passband_dir + "passband_dr6_pa4_f150.dat"} @@ -109,7 +141,7 @@ freq_info_dr6_pa5_f150 = {"freq_tag": 150, "passband": passband_dir + "passband_ freq_info_dr6_pa6_f090 = {"freq_tag": 90, "passband": passband_dir + "passband_dr6_pa6_f090.dat"} freq_info_dr6_pa6_f150 = {"freq_tag": 150, "passband": passband_dir + "passband_dr6_pa6_f150.dat"} - +# beams beam_dr6_pa4_f150 = data_dir + 'beams/20230130_beams/coadd_pa4_f150_night_beam_tform_jitter_cmb.txt' beam_dr6_pa4_f220 = data_dir + 'beams/20230130_beams/coadd_pa4_f220_night_beam_tform_jitter_cmb.txt' beam_dr6_pa5_f090 = data_dir + 'beams/20230130_beams/coadd_pa5_f090_night_beam_tform_jitter_cmb.txt' @@ -126,34 +158,15 @@ leakage_beam_dr6_pa5_f150 = 'gamma_mp_uranus_pa5_f150.txt' leakage_beam_dr6_pa6_f090 = 'gamma_mp_uranus_pa6_f090.txt' leakage_beam_dr6_pa6_f150 = 'gamma_mp_uranus_pa6_f150.txt' - - -window_T_dr6_pa4_f150 = data_dir + "windows/window_dr6_pa4_f150.fits" -window_pol_dr6_pa4_f150 = data_dir + "windows/window_dr6_pa4_f150.fits" - -window_T_dr6_pa4_f220 = data_dir + "windows/window_dr6_pa4_f220.fits" -window_pol_dr6_pa4_f220 = data_dir + "windows/window_dr6_pa4_f220.fits" - -window_T_dr6_pa5_f090 = data_dir + "windows/window_dr6_pa5_f090.fits" -window_pol_dr6_pa5_f090 = data_dir + "windows/window_dr6_pa5_f090.fits" - -window_T_dr6_pa5_f150 = data_dir + "windows/window_dr6_pa5_f150.fits" -window_pol_dr6_pa5_f150 = data_dir + "windows/window_dr6_pa5_f150.fits" - -window_T_dr6_pa6_f090 = data_dir + "windows/window_dr6_pa6_f090.fits" -window_pol_dr6_pa6_f090 = data_dir + "windows/window_dr6_pa6_f090.fits" - -window_T_dr6_pa6_f150 = data_dir + "windows/window_dr6_pa6_f150.fits" -window_pol_dr6_pa6_f150 = data_dir + "windows/window_dr6_pa6_f150.fits" - - # best fit params (only used for sim generation and covariances computation) +best_fits_dir = data_dir + 'best_fits' + cosmo_params = {"cosmomc_theta":0.0104085, "logA": 3.044, "ombh2": 0.02237, "omch2": 0.1200, "ns": 0.9649, "Alens": 1.0, "tau": 0.0544} fg_norm = {"nu_0": 150.0, "ell_0": 3000, "T_CMB": 2.725} fg_components = {'tt': ['tSZ_and_CIB', 'cibp', 'kSZ', 'radio', 'dust'], 'te': ['radio', 'dust'], 'ee': ['radio', 'dust'], 'bb': ['radio', 'dust'], 'tb': ['radio', 'dust'], 'eb': []} -fg_params = {"a_tSZ": 3.30, "a_kSZ": 1.60, "a_p": 6.90, "beta_p": 2.08, "a_c": 4.90, "beta_c": 2.20, "a_s": 3.10, "a_gtt": 8.7, "xi": 0.1, "T_d": 9.60, "a_gte": 0, "a_gtb": 0, "a_gee": 0, "a_gbb": 0, "a_pste": 0, "a_pstb": 0, "a_psee": 0, "a_psbb": 0} +fg_params = {"a_tSZ": 3.30, "a_kSZ": 1.60, "a_p": 6.90, "beta_p": 2.08, "a_c": 4.90, "beta_c": 2.20, "a_s": 3.10, "a_gtt": 8.7, "xi": 0.1, "T_d": 9.60, "a_gte": 0, "a_gtb": 0, "a_gee": 0, "a_gbb": 0, "a_pste": 0, "a_pstb": 0, "a_psee": 0, "a_psbb": 0} -#sim +# sim iStart = 0 iStop = 39 sim_alm_dtype = "complex64" @@ -161,10 +174,13 @@ read_noise_alms_from_disk = False noise_sim_type = "fdw" noise_model_parameters = {"downgrade": 4, "mask_est_name": "dr6v3_20220316_baseline_union_mask", "mask_obs_name": "dr6v3_xlink_union_mask_0.001", "union_sources": "regular_20220316", "notes": "20220619"} -#plot +# plot +plot_dir = data_dir + 'plots' + range_TT = [10, 8000] range_TE = [-150, 150] range_ET = [-150, 150] range_EE = [-20, 50] -planck_data_dir = data_dir + "planck_data/" +# planck +planck_data_dir = data_dir + "planck_data/" \ No newline at end of file diff --git a/project/ana_cov_comp/python/get_alms.py b/project/ana_cov_comp/python/get_alms.py new file mode 100644 index 00000000..dc2fe6d3 --- /dev/null +++ b/project/ana_cov_comp/python/get_alms.py @@ -0,0 +1,120 @@ +""" +This script compute all alms write them to disk. +It uses the window function provided in the dictionnary file. +Optionally, it applies a calibration to the maps, a kspace filter and deconvolve the CAR pixel window function. +""" + +import sys +import time + +import numpy as np +from pixell import enmap +from pspipe_utils import kspace, log, misc, pspipe_list +from pspy import pspy_utils, so_dict, so_map, so_mpi, sph_tools + +d = so_dict.so_dict() +d.read_from_file(sys.argv[1]) +log = log.get_logger(**d) + +surveys = d["surveys"] +lmax = d["lmax"] +deconvolve_pixwin = d["deconvolve_pixwin"] +niter = d["niter"] +apply_kspace_filter = d["apply_kspace_filter"] + + +window_dir = d["window_dir"] +alms_dir = d["alms_dir"] +pspy_utils.create_directory(alms_dir) + +n_ar, sv_list, ar_list = pspipe_list.get_arrays_list(d) + +log.info(f"number of arrays for the mpi loop : {n_ar}") +so_mpi.init(True) +subtasks = so_mpi.taskrange(imin=0, imax=n_ar-1) + +if len(sys.argv) == 4: + log.info(f"computing only the alms : {int(sys.argv[2])}:{int(sys.argv[3])}") + subtasks = subtasks[int(sys.argv[2]):int(sys.argv[3])] +log.info(subtasks) + +for task in subtasks: + task = int(task) + sv, ar = sv_list[task], ar_list[task] + + log.info(f"[{task}] Computing alm for '{sv}' survey and '{ar}' array") + + win_T = so_map.read_map(d[f"window_T_{sv}_{ar}"]) + win_pol = so_map.read_map(d[f"window_pol_{sv}_{ar}"]) + + window_tuple = (win_T, win_pol) + + if win_T.pixel == "CAR": + # if None, don't apply window in fourier operations, + # will result in exception if ks_f["weighted"] is True + if d[f"window_kspace_{sv}_{ar}"] is not None: + win_kspace = so_map.read_map(d[f"window_kspace_{sv}_{ar}"]) + else: + win_kspace = None + + if apply_kspace_filter: + ks_f = d[f"k_filter_{sv}"] + filter = kspace.get_kspace_filter(win_T, ks_f) + + inv_pixwin_lxly = None + if deconvolve_pixwin: + if d[f"pixwin_{sv}"]["pix"] == "CAR": + # compute the CAR pixel function in fourier space + wy, wx = enmap.calc_window(win_T.data.shape, order=d[f"pixwin_{sv}"]["order"]) + inv_pixwin_lxly = (wy[:,None] * wx[None,:]) ** (-1) + + + maps = d[f"maps_{sv}_{ar}"] + cal, pol_eff = d[f"cal_{sv}_{ar}"], d[f"pol_eff_{sv}_{ar}"] + + t0 = time.time() + for k, map in enumerate(maps): + + if win_T.pixel == "CAR": + split = so_map.read_map(map, geometry=win_T.data.geometry) + + ### NOTE: comment-out because we don't care about ps, and + ### *don't* want ps mask! + # if d[f"src_free_maps_{sv}"] == True: + # ps_map_name = map.replace("srcfree.fits", "model.fits") + # if ps_map_name == map: + # raise ValueError("No model map is provided! Check map names!") + # ps_map = so_map.read_map(ps_map_name) + # ps_mask = so_map.read_map(d[f"ps_mask_{sv}_{ar}"]) + # ps_map.data *= ps_mask.data + # split.data += ps_map.data + + if apply_kspace_filter: + log.info(f"[{task}] apply kspace filter on {map}") + split = kspace.filter_map(split, + filter, + win_kspace, + inv_pixwin=inv_pixwin_lxly, + weighted_filter=ks_f["weighted"], + use_ducc_rfft=True) + + else: + log.info(f"[{task}] WARNING: no kspace filter is applied") + if deconvolve_pixwin: + split = so_map.fourier_convolution(split, + inv_pixwin_lxly, + window=win_kspace, + use_ducc_rfft=True) + + elif win_T.pixel == "HEALPIX": + split = so_map.read_map(map) + + split = split.calibrate(cal=cal, pol_eff=pol_eff) + + if d["remove_mean"] == True: + split = split.subtract_mean(window_tuple) + + master_alms = sph_tools.get_alms(split, window_tuple, niter, lmax) + np.save(f"{alms_dir}/alms_{sv}_{ar}_{k}.npy", master_alms) + + log.info(f"[{task}] execution time {time.time() - t0} seconds") diff --git a/project/ana_cov_comp/python/get_best_fit_mflike.py b/project/ana_cov_comp/python/get_best_fit_mflike.py new file mode 100644 index 00000000..f9d84ca4 --- /dev/null +++ b/project/ana_cov_comp/python/get_best_fit_mflike.py @@ -0,0 +1,145 @@ +""" +This script compute best fit from theory and fg power spectra. +It uses camb and the foreground model of mflike based on fgspectra +""" +import matplotlib + +matplotlib.use("Agg") +import sys + +import numpy as np +import pylab as plt +from pspipe_utils import best_fits, log, pspipe_list +from pspy import pspy_utils, so_dict, so_spectra + +d = so_dict.so_dict() +d.read_from_file(sys.argv[1]) +log = log.get_logger(**d) + +# first let's get a list of all frequency we plan to study +surveys = d["surveys"] +lmax = d["lmax"] +type = d["type"] +spectra = ["TT", "TE", "TB", "ET", "BT", "EE", "EB", "BE", "BB"] + +# let's create the directories to write best fit to disk and for plotting purpose +bestfit_dir = d["best_fits_dir"] +plot_dir = d["plot_dir"] + "/best_fits" + +pspy_utils.create_directory(bestfit_dir) +pspy_utils.create_directory(plot_dir) + +cosmo_params = d["cosmo_params"] +log.info(f"Computing CMB spectra with cosmological parameters: {cosmo_params}") +l_th, ps_dict = pspy_utils.ps_from_params(cosmo_params, type, lmax + 500) + +f_name = f"{bestfit_dir}/cmb.dat" +so_spectra.write_ps(f_name, l_th, ps_dict, type, spectra=spectra) + + +fg_norm = d["fg_norm"] +fg_params = d["fg_params"] +fg_components = d["fg_components"] + +passbands = {} +do_bandpass_integration = d["do_bandpass_integration"] + +if do_bandpass_integration: + log.info("Doing bandpass integration") + +narrays, sv_list, ar_list = pspipe_list.get_arrays_list(d) +for sv, ar in zip(sv_list, ar_list): + + freq_info = d[f"freq_info_{sv}_{ar}"] + if do_bandpass_integration: + nu_ghz, pb = np.loadtxt(freq_info["passband"]).T + else: + nu_ghz, pb = np.array([freq_info["freq_tag"]]), np.array([1.]) + + passbands[f"{sv}_{ar}"] = [nu_ghz, pb] + +log.info("Getting foregrounds contribution") +fg_dict = best_fits.get_foreground_dict(l_th, passbands, fg_components, fg_params, fg_norm) + +for sv1, ar1 in zip(sv_list, ar_list): + for sv2, ar2 in zip(sv_list, ar_list): + name1 = f"{sv1}_{ar1}" + name2 = f"{sv2}_{ar2}" + fg = {} + for spec in spectra: + fg[spec] = fg_dict[spec.lower(), "all", name1, name2] + so_spectra.write_ps(f"{bestfit_dir}/fg_{name1}x{name2}.dat", l_th, fg, type, spectra=spectra) + +log.info("Writing best fit spectra") +spectra_list = pspipe_list.get_spec_name_list(d, char = "_") +best_fit_dict = {} +for ps_name in spectra_list: + best_fit_dict[ps_name] = {} + name1, name2 = ps_name.split("x") + for spec in spectra: + if spec.lower() in d["fg_components"].keys(): + fg = fg_dict[spec.lower(), "all", name1, name2] + else: + fg = fg_dict[spec.lower()[::-1], "all", name1, name2] + best_fit_dict[ps_name][spec] = ps_dict[spec] + fg + so_spectra.write_ps(f"{bestfit_dir}/cmb_and_fg_{ps_name}.dat", l_th, best_fit_dict[ps_name], type, spectra=spectra) + + +for spec in spectra: + plt.figure(figsize=(12, 12)) + if spec == "TT": + plt.semilogy() + for ps_name in spectra_list: + plt.plot(l_th, best_fit_dict[ps_name][spec], label=ps_name) + plt.legend() + plt.savefig(f"{plot_dir}/best_fit_{spec}.png") + plt.clf() + plt.close() + + + +fg_components["tt"].remove("tSZ_and_CIB") +for comp in ["tSZ", "cibc", "tSZxCIB"]: + fg_components["tt"].append(comp) + +for mode in ["tt", "te", "tb", "ee", "eb", "bb"]: + fig, axes = plt.subplots(narrays, narrays, sharex=True, sharey=True, figsize=(16, 16)) + indices = np.triu_indices(narrays)[::-1] + for i, cross in enumerate(spectra_list): + name1, name2 = cross.split("x") + idx = (indices[0][i], indices[1][i]) + ax = axes[idx] + + for comp in fg_components[mode]: + ax.plot(l_th, fg_dict[mode, comp, name1, name2]) + ax.plot(l_th, fg_dict[mode, "all", name1, name2], color="k") + ax.plot(l_th, ps_dict[mode.upper()], color="gray") + ax.set_title(cross) + if mode == "tt": + ax.set_yscale("log") + ax.set_ylim(1e-1, 1e4) + if mode == "ee": + ax.set_yscale("log") + if mode == "bb": + ax.set_yscale("log") + + if (mode[0] != mode[1]) and (name1 != name2): + ax = axes[idx[::-1]] + for comp in fg_components[mode]: + ax.plot(l_th, fg_dict[mode, comp, name2, name1]) + ax.plot(l_th, fg_dict[mode, "all", name2, name1]) + ax.plot(l_th, ps_dict[mode.upper()], color="gray") + ax.set_title(f"{name2}x{name1}") + + if mode[0] == mode[1]: + for idx in zip(*np.triu_indices(narrays, k=1)): + ax = axes[idx] + fig.delaxes(ax) + + for i in range(narrays): + axes[-1, i].set_xlabel(r"$\ell$") + axes[i, 0].set_ylabel(r"$D_\ell$") + fig.legend(fg_components[mode] + ["all"], title=mode.upper(), bbox_to_anchor=(1,1)) + plt.tight_layout() + plt.savefig(f"{plot_dir}/foregrounds_all_comps_{mode}.png", dpi=300) + plt.close() diff --git a/project/ana_cov_comp/python/get_mcm_and_bbl.py b/project/ana_cov_comp/python/get_mcm_and_bbl.py new file mode 100644 index 00000000..9e7e3a2d --- /dev/null +++ b/project/ana_cov_comp/python/get_mcm_and_bbl.py @@ -0,0 +1,64 @@ +""" +This script computes the mode coupling matrices and the binning matrices Bbl +for the different surveys and arrays. +""" + +import sys + +from pspipe_utils import log, pspipe_list +from pspy import pspy_utils, so_dict, so_map, so_mcm, so_mpi + +d = so_dict.so_dict() +d.read_from_file(sys.argv[1]) +log = log.get_logger(**d) + +mcm_dir = d["mcms_dir"] +pspy_utils.create_directory(mcm_dir) + +surveys = d["surveys"] +lmax = d["lmax"] +binned_mcm = d["binned_mcm"] + +if d["use_toeplitz_mcm"] == True: + log.info("we will use the toeplitz approximation") + l_exact, l_band, l_toep = 800, 2000, 2750 +else: + l_exact, l_band, l_toep = None, None, None + +n_mcms, sv1_list, ar1_list, sv2_list, ar2_list = pspipe_list.get_spectra_list(d) + +log.info(f"number of mcm matrices to compute : {n_mcms}") +so_mpi.init(True) +subtasks = so_mpi.taskrange(imin=0, imax=n_mcms - 1) + +if len(sys.argv) == 4: + log.info(f"computing only the mcm matrices : {int(sys.argv[2])}:{int(sys.argv[3])}") + subtasks = subtasks[int(sys.argv[2]):int(sys.argv[3])] + +for task in subtasks: + task = int(task) + sv1, ar1, sv2, ar2 = sv1_list[task], ar1_list[task], sv2_list[task], ar2_list[task] + + log.info(f"[{task:02d}] mcm matrix for {sv1}_{ar1} x {sv2}_{ar2}") + + l, bl1 = pspy_utils.read_beam_file(d[f"beam_{sv1}_{ar1}"]) + win1_T = so_map.read_map(d[f"window_T_{sv1}_{ar1}"]) + win1_pol = so_map.read_map(d[f"window_pol_{sv1}_{ar1}"]) + + l, bl2 = pspy_utils.read_beam_file(d[f"beam_{sv2}_{ar2}"]) + win2_T = so_map.read_map(d[f"window_T_{sv2}_{ar2}"]) + win2_pol = so_map.read_map(d[f"window_pol_{sv2}_{ar2}"]) + + mbb_inv, Bbl = so_mcm.mcm_and_bbl_spin0and2(win1=(win1_T, win1_pol), + win2=(win2_T, win2_pol), + bl1=(bl1, bl1), + bl2=(bl2, bl2), + binning_file=d["binning_file"], + niter=d["niter"], + lmax=d["lmax"], + type=d["type"], + l_exact=l_exact, + l_band=l_band, + l_toep=l_toep, + binned_mcm=binned_mcm, + save_file=f"{mcm_dir}/{sv1}_{ar1}x{sv2}_{ar2}") diff --git a/project/ana_cov_comp/python/get_noise_model.py b/project/ana_cov_comp/python/get_noise_model.py new file mode 100644 index 00000000..2747e02f --- /dev/null +++ b/project/ana_cov_comp/python/get_noise_model.py @@ -0,0 +1,115 @@ +#TO BE TESTED MORE + +import matplotlib + +matplotlib.use("Agg") +import sys + +import numpy as np +import pylab as plt +import scipy.interpolate +from pspipe_utils import log +from pspy import pspy_utils, so_dict, so_spectra + + +def interpolate_dict(lb, cb, lth, spectra, force_positive=True, l_inf_lmin_equal_lmin=True, discard_cross=True): + cl_dict = {} + for spec in spectra: + cl = scipy.interpolate.interp1d(lb, cb[spec], fill_value = "extrapolate") + cl_dict[spec] = cl(lth) + if l_inf_lmin_equal_lmin: + id = np.where(lth <= np.min(lb)) + cl_dict[spec][id]= cb[spec][0] + if force_positive: + cl_dict[spec] = np.abs(cl_dict[spec]) + if discard_cross: + if spec not in ["TT", "EE", "BB"]: + cl_dict[spec] = np.zeros(len(lth)) + return cl_dict + +d = so_dict.so_dict() +d.read_from_file(sys.argv[1]) +log = log.get_logger(**d) + +spectra_dir = d["spectra_dir"] + +ps_model_dir = d["noise_model_dir"] +plot_dir = d["plot_dir"] + "/noise_model" + +pspy_utils.create_directory(ps_model_dir) +pspy_utils.create_directory(plot_dir) + +spectra = ["TT", "TE", "TB", "ET", "BT", "EE", "EB", "BE", "BB"] +surveys = d["surveys"] +lmax = d["lmax"] +type = d["type"] +binning_file = d["binning_file"] + +lth = np.arange(2, lmax+2) + +for sv in surveys: + arrays = d[f"arrays_{sv}"] + for id_ar1, ar1 in enumerate(arrays): + for id_ar2, ar2 in enumerate(arrays): + if id_ar1 > id_ar2: continue + + log.info(f"Computing noise for '{sv}' survey and '{ar1}x{ar2}' arrays") + + l, bl_ar1 = pspy_utils.read_beam_file(d[f"beam_{sv}_{ar1}"]) + l, bl_ar2 = pspy_utils.read_beam_file(d[f"beam_{sv}_{ar2}"]) + + + lb, nbs_ar1xar1 = so_spectra.read_ps(f"{spectra_dir}/{type}_{sv}_{ar1}x{sv}_{ar1}_noise.dat", spectra=spectra) + lb, nbs_ar1xar2 = so_spectra.read_ps(f"{spectra_dir}/{type}_{sv}_{ar1}x{sv}_{ar2}_noise.dat", spectra=spectra) + lb, nbs_ar2xar2 = so_spectra.read_ps(f"{spectra_dir}/{type}_{sv}_{ar2}x{sv}_{ar2}_noise.dat", spectra=spectra) + + lb, bb_ar1 = pspy_utils.naive_binning(l, bl_ar1, binning_file, lmax) + lb, bb_ar2 = pspy_utils.naive_binning(l, bl_ar2, binning_file, lmax) + + Rb = {} + for spec in spectra: + nbs_ar1xar1[spec] *= bb_ar1 * bb_ar1 + nbs_ar1xar2[spec] *= bb_ar1 * bb_ar2 + nbs_ar2xar2[spec] *= bb_ar2 * bb_ar2 + Rb[spec] = nbs_ar1xar2[spec] / np.sqrt(np.abs(nbs_ar1xar1[spec] * nbs_ar2xar2[spec])) + + if ar1 == ar2: + nlth = interpolate_dict(lb, nbs_ar1xar1, lth, spectra) + else: + Rlth = interpolate_dict(lb, Rb, lth, spectra) + nlth_ar1xar1 = interpolate_dict(lb, nbs_ar1xar1, lth, spectra) + nlth_ar2xar2 = interpolate_dict(lb, nbs_ar2xar2, lth, spectra) + nlth = {} + for spec in spectra: + nlth[spec] = Rlth[spec] * np.sqrt(nlth_ar1xar1[spec] * nlth_ar2xar2[spec]) + + + for spec in spectra: + plt.figure(figsize=(12,12)) + plt.plot(lth, + nlth[spec], + label="interpolate", + color="lightblue") + if ar1 == ar2: + nbs= nbs_ar1xar1[spec] + else: + nbs= nbs_ar1xar2[spec] + + plt.plot(lb, + nbs, + ".", + label = f"{sv} {ar1}x{ar2}", + color="red") + plt.legend(fontsize=20) + plt.savefig(f"{plot_dir}/noise_interpolate_{ar1}x{ar2}_{sv}_{spec}.png", bbox_inches="tight") + plt.clf() + plt.close() + + spec_name_noise_mean = f"mean_{ar1}x{ar2}_{sv}_noise" + so_spectra.write_ps(ps_model_dir + f"/{spec_name_noise_mean}.dat", lth, nlth, type, spectra=spectra) + + if ar2 != ar1: + spec_name_noise_mean = f"mean_{ar2}x{ar1}_{sv}_noise" + TE, ET, TB, BT, EB, BE = nlth["ET"], nlth["TE"], nlth["BT"], nlth["TB"], nlth["BE"], nlth["EB"] + nlth["TE"], nlth["ET"], nlth["TB"], nlth["BT"], nlth["EB"], nlth["BE"] = TE, ET, TB, BT, EB, BE + so_spectra.write_ps(ps_model_dir + f"/{spec_name_noise_mean}.dat", lth, nlth, type, spectra=spectra) diff --git a/project/ana_cov_comp/python/get_per_split_noise.py b/project/ana_cov_comp/python/get_per_split_noise.py new file mode 100644 index 00000000..85d1aa0c --- /dev/null +++ b/project/ana_cov_comp/python/get_per_split_noise.py @@ -0,0 +1,165 @@ +""" +This script is used to compute the per-split noise +from pre-computed power spectra. `write_split_spectra` has to be +set to `True` to run this script. +""" +from pspy import so_dict, pspy_utils, so_spectra +import sys +from pspipe_utils import pspipe_list +import scipy.interpolate +import numpy as np +import matplotlib.pyplot as plt + +def interpolate_dict(lb, cb, lth, spectra, force_positive=True, l_inf_lmin_equal_lmin=True, discard_cross=True): + cl_dict = {} + for spec in spectra: + cl = scipy.interpolate.interp1d(lb, cb[spec], fill_value="extrapolate") + cl_dict[spec] = cl(lth) + if l_inf_lmin_equal_lmin: + id = np.where(lth <= np.min(lb)) + cl_dict[spec][id]= cb[spec][0] + if force_positive: + cl_dict[spec] = np.abs(cl_dict[spec]) + if discard_cross: + if spec not in ["TT", "EE", "BB"]: + cl_dict[spec] = np.zeros(len(lth)) + return cl_dict + +d = so_dict.so_dict() +d.read_from_file(sys.argv[1]) + +spectra_dir = d["spectra_dir"] +ps_model_dir = d["noise_model_dir"] + +split_noise_dir = d["split_noise_dir"] +plot_dir = d["plot_dir"] + "/split_noise" + +pspy_utils.create_directory(split_noise_dir) +pspy_utils.create_directory(plot_dir) + +spectra = ["TT", "TE", "TB", "ET", "BT", "EE", "EB", "BE", "BB"] +surveys = d["surveys"] +lmax = d["lmax"] +type = d["type"] +binning_file = d["binning_file"] + +lth = np.arange(2, lmax+2) + +arrays = {sv: d[f"arrays_{sv}"] for sv in surveys} +n_splits = {sv: d[f"n_splits_{sv}"] for sv in surveys} + +spec_name_list = pspipe_list.get_spec_name_list(d) + +split_noise_dict = {} +for spec_name in spec_name_list: + sv1ar1, sv2ar2 = spec_name.split("x") + sv1, ar1 = sv1ar1.split("&") + sv2, ar2 = sv2ar2.split("&") + + if sv1 != sv2: continue + + split_ps_dict = {} + for i in range(n_splits[sv1]): + for j in range(n_splits[sv2]): + if i > j and sv1ar1 == sv2ar2: continue + + lb, ps = so_spectra.read_ps(f"{spectra_dir}/Dl_{sv1}_{ar1}x{sv2}_{ar2}_{i}{j}.dat", + spectra=spectra) + split_ps_dict[i, j] = ps + + for i in range(n_splits[sv1]): + + cross_ps_dict = {spec: [] for spec in spectra} + + for id1, id2 in list(split_ps_dict.keys()): + if id1 == id2: continue + + for spec in spectra: + cross_ps_dict[spec].append(split_ps_dict[id1, id2][spec]) + + for spec in spectra: + cross_ps_dict[spec] = np.mean(cross_ps_dict[spec], axis=0) + + split_noise = {spec: split_ps_dict[i, i][spec] - cross_ps_dict[spec] for spec in spectra} + + split_noise_dict[sv1, ar1, sv2, ar2, i] = split_noise + + spec_name = f"Dl_{sv1}_{ar1}x{sv2}_{ar2}_{i}{i}_noise.dat" + so_spectra.write_ps(f"{split_noise_dir}/{spec_name}", lb, split_noise, type, spectra=spectra) + +for (sv1, ar1, sv2, ar2, split), split_noise in split_noise_dict.items(): + + # Noise model + l, bl1 = pspy_utils.read_beam_file(d[f"beam_{sv1}_{ar1}"]) + l, bl2 = pspy_utils.read_beam_file(d[f"beam_{sv2}_{ar2}"]) + + lb, bb1 = pspy_utils.naive_binning(l, bl1, binning_file, lmax) + lb, bb2 = pspy_utils.naive_binning(l, bl2, binning_file, lmax) + + if ar1 == ar2: + for spec in spectra: + split_noise[spec] = split_noise[spec] * bb1 * bb2 + nlth = interpolate_dict(lb, split_noise, lth, spectra) + + else: + nb_ar1xar1 = {spec: split_noise_dict[sv1, ar1, sv1, ar1, split][spec] * bb1 * bb1 for spec in spectra} + nb_ar2xar2 = {spec: split_noise_dict[sv2, ar2, sv2, ar2, split][spec] * bb2 * bb2 for spec in spectra} + nb_ar1xar2 = {spec: split_noise_dict[sv1, ar1, sv2, ar2, split][spec] * bb1 * bb2 for spec in spectra} + + Rb = {spec : nb_ar1xar2[spec] / np.sqrt(np.abs(nb_ar1xar1[spec] * nb_ar2xar2[spec])) for spec in spectra} + + Rlth = interpolate_dict(lb, Rb, lth, spectra) + nlth_ar1xar1 = interpolate_dict(lb, nb_ar1xar1, lth, spectra) + nlth_ar2xar2 = interpolate_dict(lb, nb_ar2xar2, lth, spectra) + + nlth = {spec: Rlth[spec] * np.sqrt(nlth_ar1xar1[spec] * nlth_ar2xar2[spec]) for spec in spectra} + + spec_model_name = f"Dl_{ar1}_{split}x{ar2}_{split}_{sv1}_noise_model.dat" + so_spectra.write_ps(f"{split_noise_dir}/{spec_model_name}", lth, nlth, type, spectra=spectra) + + for split2 in range(n_splits[sv1]): + if split == split2: continue + nl_xsplit = {spec: np.zeros_like(nlth[spec]) for spec in spectra} + spec_name = f"Dl_{ar1}_{split}x{ar2}_{split2}_{sv1}_noise_model.dat" + so_spectra.write_ps(f"{split_noise_dir}/{spec_name}", lth, nl_xsplit, type, spectra=spectra) + +# Plots +for spec_name in spec_name_list: + sv1ar1, sv2ar2 = spec_name.split("x") + sv1, ar1 = sv1ar1.split("&") + sv2, ar2 = sv2ar2.split("&") + + if sv1 != sv2: continue + + noise_model_file = f"{ps_model_dir}/mean_{ar1}x{ar2}_{sv1}_noise.dat" + ell, nl_mean = so_spectra.read_ps(noise_model_file, spectra=spectra) + + for spec in ["TT", "EE", "BB"]: + plt.figure(figsize = (13, 8)) + for i in range(n_splits[sv1]): + split_noise_model_file = f"{split_noise_dir}/Dl_{ar1}_{i}x{ar2}_{i}_{sv1}_noise_model.dat" + ell, nl = so_spectra.read_ps(split_noise_model_file, spectra=spectra) + plt.plot(ell, nl[spec], label=f"Split {i}") + + plt.plot(ell, nl_mean[spec] * n_splits[sv1], label="Mean noise", color="k", ls="--") + plt.title(f"{sv1ar1}x{sv2ar2}", fontsize=16.5) + plt.xlabel(r"$\ell$", fontsize=15) + plt.ylabel(f"$N_\ell^{{{spec}}}$", fontsize=15) + plt.yscale("log") + plt.legend() + plt.tight_layout() + plt.savefig(f"{plot_dir}/noise_{sv1ar1}x{sv2ar2}_{spec}.png", dpi=300) + + if sv1ar1 == sv2ar2: + plt.figure(figsize=(13,8)) + plt.axhline(1, color="k", ls="--") + for i in range(n_splits[sv1]): + split_noise_model_file = f"{split_noise_dir}/Dl_{ar1}_{i}x{ar2}_{i}_{sv1}_noise_model.dat" + ell, nl = so_spectra.read_ps(split_noise_model_file, spectra = spectra) + plt.plot(ell, nl[spec]/nl_mean[spec]/n_splits[sv1], label = f"Split {i}") + plt.title(f"{ar1}x{ar2}", fontsize = 16.5) + plt.xlabel(r"$\ell$", fontsize = 15) + plt.ylabel(r"$N_\ell^{%s}/N_\ell^{%s,\mathrm{mean}}$" % (spec, spec), fontsize = 15) + plt.legend() + plt.tight_layout() + plt.savefig(f"{plot_dir}/noise_ratio_{sv1ar1}x{sv2ar2}_{spec}.png", dpi = 300) diff --git a/project/ana_cov_comp/python/get_spectra_from_alms.py b/project/ana_cov_comp/python/get_spectra_from_alms.py new file mode 100644 index 00000000..fafcb02b --- /dev/null +++ b/project/ana_cov_comp/python/get_spectra_from_alms.py @@ -0,0 +1,188 @@ +""" +This script compute all power spectra and write them to disk. +It uses the window function provided in the dictionnary file. +Optionally, it applies a calibration to the maps, a kspace filter and deconvolve the pixel window function. +The spectra are then combined in mean auto, cross and noise power spectrum and written to disk. +If write_all_spectra=True, each individual spectrum is also written to disk. +""" + +import sys + +import healpy as hp +import numpy as np +from pixell import enmap +from pspipe_utils import kspace, log, pspipe_list, transfer_function +from pspy import pspy_utils, so_dict, so_map, so_mcm, so_mpi, so_spectra + +d = so_dict.so_dict() +d.read_from_file(sys.argv[1]) +log = log.get_logger(**d) + +surveys = d["surveys"] +lmax = d["lmax"] +type = d["type"] +binning_file = d["binning_file"] +write_all_spectra = d["write_splits_spectra"] +deconvolve_pixwin = d["deconvolve_pixwin"] +binned_mcm = d["binned_mcm"] +apply_kspace_filter = d["apply_kspace_filter"] +cov_T_E_only = d["cov_T_E_only"] + +mcm_dir = d["mcms_dir"] +spec_dir = d["spectra_dir"] +alms_dir = d["alms_dir"] + +pspy_utils.create_directory(spec_dir) + +master_alms, nsplit, arrays, templates, filter_dicts = {}, {}, {}, {}, {} +# read the alms + +for sv in surveys: + arrays[sv] = d[f"arrays_{sv}"] + if apply_kspace_filter == True: filter_dicts[sv] = d[f"k_filter_{sv}"] + templates[sv] = so_map.read_map(d[f"window_T_{sv}_{arrays[sv][0]}"]) + + for ar in arrays[sv]: + maps = d[f"maps_{sv}_{ar}"] + nsplit[sv] = len(maps) + log.info(f"{nsplit[sv]} split of survey: {sv}, array {ar}") + for k, map in enumerate(maps): + master_alms[sv, ar, k] = np.load(f"{alms_dir}/alms_{sv}_{ar}_{k}.npy") + log.debug(f"{alms_dir}/alms_{sv}_{ar}_{k}.npy") + +# compute the transfer functions +_, _, lb, _ = pspy_utils.read_binning_file(binning_file, lmax) +n_bins = len(lb) +spec_name_list = pspipe_list.get_spec_name_list(d, char="_") + +if apply_kspace_filter: + kspace_tf_path = d["kspace_tf_path"] + if kspace_tf_path == "analytical": + kspace_transfer_matrix = kspace.build_analytic_kspace_filter_matrices(surveys, + arrays, + templates, + filter_dicts, + binning_file, + lmax) + else: + kspace_transfer_matrix = {} + for spec_name in spec_name_list: + kspace_transfer_matrix[spec_name] = np.load(f"{kspace_tf_path}/kspace_matrix_{spec_name}.npy", allow_pickle=True) + + + # this will be used in the covariance computation + for spec_name in spec_name_list: + one_d_tf = kspace_transfer_matrix[spec_name].diagonal() + if cov_T_E_only == True: one_d_tf = one_d_tf[:4 * n_bins] + np.savetxt(f"{spec_dir}/one_dimension_kspace_tf_{spec_name}.dat", one_d_tf) + +# compute the power spectra + +spectra = ["TT", "TE", "TB", "ET", "BT", "EE", "EB", "BE", "BB"] + +n_spec, sv1_list, ar1_list, sv2_list, ar2_list = pspipe_list.get_spectra_list(d) +log.info(f"number of spectra to compute : {n_spec}") +so_mpi.init(True) +subtasks = so_mpi.taskrange(imin=0, imax=n_spec - 1) + +for task in subtasks: + task = int(task) + sv1, ar1, sv2, ar2 = sv1_list[task], ar1_list[task], sv2_list[task], ar2_list[task] + + log.info(f"[{task}] Computing spectra for {sv1}_{ar1} x {sv2}_{ar2}") + + xtra_pw1, xtra_pw2, mm_tf1, mm_tf2 = None, None, None, None + if deconvolve_pixwin: + if d[f"pixwin_{sv1}"]["pix"] == "HEALPIX": + pixwin_l = hp.pixwin(d[f"pixwin_{sv1}"]["nside"]) + lb, xtra_pw1 = pspy_utils.naive_binning(np.arange(len(pixwin_l)), pixwin_l, binning_file, lmax) + if d[f"pixwin_{sv2}"]["pix"] == "HEALPIX": + pixwin_l = hp.pixwin(d[f"pixwin_{sv2}"]["nside"]) + lb, xtra_pw2 = pspy_utils.naive_binning(np.arange(len(pixwin_l)), pixwin_l, binning_file, lmax) + + + if d[f"deconvolve_map_maker_tf_{sv1}"]: + mm_tf1 = so_spectra.read_ps(d[f"mm_tf_{sv1}_{ar1}.dat"], spectra=spectra) + if d[f"deconvolve_map_maker_tf_{sv2}"]: + mm_tf2 = so_spectra.read_ps(d[f"mm_tf_{sv2}_{ar2}.dat"], spectra=spectra) + + + ps_dict = {} + for spec in spectra: + ps_dict[spec, "auto"] = [] + ps_dict[spec, "cross"] = [] + + nsplits_1 = nsplit[sv1] + nsplits_2 = nsplit[sv2] + + spin_pairs = ["spin0xspin0", "spin0xspin2", "spin2xspin0", "spin2xspin2"] + mbb_inv, Bbl = so_mcm.read_coupling(prefix=f"{mcm_dir}/{sv1}_{ar1}x{sv2}_{ar2}", + spin_pairs=spin_pairs) + + + for s1 in range(nsplits_1): + for s2 in range(nsplits_2): + if (sv1 == sv2) & (ar1 == ar2) & (s1>s2) : continue + + l, ps_master = so_spectra.get_spectra_pixell(master_alms[sv1, ar1, s1], + master_alms[sv2, ar2, s2], + spectra=spectra) + + spec_name=f"{type}_{sv1}_{ar1}x{sv2}_{ar2}_{s1}{s2}" + + lb, ps = so_spectra.bin_spectra(l, + ps_master, + binning_file, + lmax, + type=type, + mbb_inv=mbb_inv, + spectra=spectra, + binned_mcm=binned_mcm) + if apply_kspace_filter: + lb, ps = kspace.deconvolve_kspace_filter_matrix(lb, + ps, + kspace_transfer_matrix[f"{sv1}_{ar1}x{sv2}_{ar2}"], + spectra) + + + lb, ps = transfer_function.deconvolve_xtra_tf(lb, + ps, + spectra, + xtra_pw1=xtra_pw1, + xtra_pw2=xtra_pw2, + mm_tf1=mm_tf1, + mm_tf2=mm_tf2) + + if write_all_spectra: + so_spectra.write_ps(spec_dir + f"/{spec_name}.dat", lb, ps, type, spectra=spectra) + + for count, spec in enumerate(spectra): + if (s1 == s2) & (sv1 == sv2): + if count == 0: log.debug(f"[{task}] auto {sv1}_{ar1} x {sv2}_{ar2} {s1}{s2}") + ps_dict[spec, "auto"] += [ps[spec]] + else: + if count == 0: log.debug(f"[{task}] cross {sv1}_{ar1} x {sv2}_{ar2} {s1}{s2}") + ps_dict[spec, "cross"] += [ps[spec]] + + ps_dict_auto_mean = {} + ps_dict_cross_mean = {} + ps_dict_noise_mean = {} + + for spec in spectra: + ps_dict_cross_mean[spec] = np.mean(ps_dict[spec, "cross"], axis=0) + spec_name_cross = f"{type}_{sv1}_{ar1}x{sv2}_{ar2}_cross" + + if ar1 == ar2 and sv1 == sv2: + # Average TE / ET so that for same array same season TE = ET + ps_dict_cross_mean[spec] = (np.mean(ps_dict[spec, "cross"], axis=0) + np.mean(ps_dict[spec[::-1], "cross"], axis=0)) / 2. + + if sv1 == sv2: + ps_dict_auto_mean[spec] = np.mean(ps_dict[spec, "auto"], axis=0) + spec_name_auto = f"{type}_{sv1}_{ar1}x{sv2}_{ar2}_auto" + ps_dict_noise_mean[spec] = (ps_dict_auto_mean[spec] - ps_dict_cross_mean[spec]) / nsplit[sv1] + spec_name_noise = f"{type}_{sv1}_{ar1}x{sv2}_{ar2}_noise" + + so_spectra.write_ps(spec_dir + f"/{spec_name_cross}.dat", lb, ps_dict_cross_mean, type, spectra=spectra) + if sv1 == sv2: + so_spectra.write_ps(spec_dir + f"/{spec_name_auto}.dat", lb, ps_dict_auto_mean, type, spectra=spectra) + so_spectra.write_ps(spec_dir + f"/{spec_name_noise}.dat", lb, ps_dict_noise_mean, type, spectra=spectra) From d9e4d7436097ad8dfec9507182ee6f364109b9a1 Mon Sep 17 00:00:00 2001 From: Zachary Atkins Date: Fri, 18 Aug 2023 09:55:00 -0400 Subject: [PATCH 10/13] updated scripts through get_covariance_blocks.py --- project/ana_cov_comp/README.md | 16 +- .../sims_dr6_v4_lmax5400_20230816.dict | 12 +- .../python/get_covariance_blocks.py | 156 +++++++++++++++ .../python/get_split_covariance_aniso.py | 179 ++++++++++++++++++ .../python/get_sq_windows_alms.py | 65 +++++++ 5 files changed, 418 insertions(+), 10 deletions(-) create mode 100644 project/ana_cov_comp/python/get_covariance_blocks.py create mode 100644 project/ana_cov_comp/python/get_split_covariance_aniso.py create mode 100644 project/ana_cov_comp/python/get_sq_windows_alms.py diff --git a/project/ana_cov_comp/README.md b/project/ana_cov_comp/README.md index 87fea728..290da446 100644 --- a/project/ana_cov_comp/README.md +++ b/project/ana_cov_comp/README.md @@ -10,7 +10,7 @@ export sbatch1pn="/home/zatkins/repos/simonsobs/PSpipe/project/ana_cov_comp/para We need windows. Note, this will generate "PSpipe" style windows. A particular dict file may load other, pre-generated windows. ```bash -for i in {0..4}; do \ +for i in {0..5}; do \ sbatch --cpus-per-task 1 --mem 48G --time 20:00 --job-name get_window_dr6 ${sbatch1pn} "python -u python/get_window_dr6.py paramfiles/global_dr6_v4.dict $i $((i+1))" done ``` @@ -24,7 +24,7 @@ sbatch --cpus-per-task 1 --mem 2G --time 2:00 --job-name get_best_fit_mflike ${s Next we'll need to compute the mode-coupling matrices, ```bash -for i in {0..21}; do \ +for i in {0..20}; do \ sbatch --cpus-per-task 10 --mem 28G --time 5:00 --job-name get_mcm_and_bbl ${sbatch1pn} "python -u python/get_mcm_and_bbl.py paramfiles/sims_dr6_v4_lmax5400_20230816.dict $i $((i+1))" done ``` @@ -46,23 +46,25 @@ sbatch --cpus-per-task 1 --mem 1G --time 2:00 --job-name get_noise_model ${sbatc ``` ```bash -sbatch --cpus-per-task 1 --mem 4G --time 10:00 --job-name get_per_split_noise ${sbatch1pn} "python -u python/get_per_split_noise.py paramfiles/sims_dr6_v4_lmax5400_20230816.dict" +sbatch --cpus-per-task 1 --mem 1G --time 2:00 --job-name get_per_split_noise ${sbatch1pn} "python -u python/get_per_split_noise.py paramfiles/sims_dr6_v4_lmax5400_20230816.dict" ``` Now we compute the analytic covariances blocks assuming an isotropic noise model. ```bash -10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_sq_windows_alms.py paramfiles/della/global_dr6_v4.dict" +sbatch --cpus-per-task 4 --mem 10G --time 15:00 --job-name get_sq_windows_alms ${sbatch1pn} "python -u python/get_sq_windows_alms.py paramfiles/sims_dr6_v4_lmax5400_20230816.dict" ``` ```bash -for i in {0..119}; do \ - 10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_covariance_blocks.py paramfiles/della/global_dr6_v4.dict $i $((i+1))" +for i in {0..230}; do \ + sbatch --cpus-per-task 10 --mem 32G --time 5:00 --job-name get_covariance_blocks ${sbatch1pn} "python -u python/get_covariance_blocks.py paramfiles/sims_dr6_v4_lmax5400_20230816.dict $i $((i+1))" done ``` For anisotropic covariances, ```bash -10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_split_covariance_aniso.py paramfiles/della/global_dr6_v4.dict 100 101" +for i in {0..230}; do \ + sbatch --cpus-per-task 4 --mem 10G --time 15:00 --job-name get_split_covariance_aniso ${sbatch1pn} "python -u python/get_split_covariance_aniso.py paramfiles/sims_dr6_v4_lmax5400_20230816.dict $i $((i+1))" +done ``` diff --git a/project/ana_cov_comp/paramfiles/sims_dr6_v4_lmax5400_20230816.dict b/project/ana_cov_comp/paramfiles/sims_dr6_v4_lmax5400_20230816.dict index 96c4c23c..3216bc89 100644 --- a/project/ana_cov_comp/paramfiles/sims_dr6_v4_lmax5400_20230816.dict +++ b/project/ana_cov_comp/paramfiles/sims_dr6_v4_lmax5400_20230816.dict @@ -4,6 +4,10 @@ arrays_dr6 = ["pa4_f150", "pa4_f220", "pa5_f090", "pa5_f150", "pa6_f090", "pa6_f data_dir = '/scratch/gpfs/zatkins/data/simonsobs/PSpipe/project/data_analysis/ana_cov_comp/' +# orphans +use_beam_covariance = True +multistep_path = data_dir + # mcms mcms_dir = data_dir + 'mcms' @@ -38,10 +42,12 @@ mm_tf_dr6_pa5_f150 = data_dir + "transfer_fcns/tf_unity.dat" mm_tf_dr6_pa6_f090 = data_dir + "transfer_fcns/tf_unity.dat" mm_tf_dr6_pa6_f150 = data_dir + "transfer_fcns/tf_unity.dat" -# -multistep_path = data_dir +# cov +sq_win_alms_dir = data_dir + 'sq_win_alms' +covariances_dir = data_dir + 'covariances' +split_covariances_dir = data_dir + 'split_covariances' + use_toeplitz_cov = True -use_beam_covariance = True # window parameters ps_mask_dr6_pa4_f150 = data_dir + 'masks/act_pts_mask_fluxcut_15.0mJy_at150Ghz_rad_5.0_monster_dust_proj.fits' diff --git a/project/ana_cov_comp/python/get_covariance_blocks.py b/project/ana_cov_comp/python/get_covariance_blocks.py new file mode 100644 index 00000000..f8ea1e0f --- /dev/null +++ b/project/ana_cov_comp/python/get_covariance_blocks.py @@ -0,0 +1,156 @@ +""" +This script compute the analytical covariance matrix elements. +""" +import sys + +import numpy as np +from pspipe_utils import best_fits, log, pspipe_list +from pspy import pspy_utils, so_cov, so_dict, so_map, so_mcm, so_mpi + +d = so_dict.so_dict() +d.read_from_file(sys.argv[1]) + +log = log.get_logger(**d) + +mcms_dir = d["mcms_dir"] +spectra_dir = d["spectra_dir"] +noise_dir = d["noise_model_dir"] +bestfit_dir = d["best_fits_dir"] +sq_win_alms_dir = d["sq_win_alms_dir"] + +cov_dir = d["covariances_dir"] + +pspy_utils.create_directory(cov_dir) + +surveys = d["surveys"] +binning_file = d["binning_file"] +lmax = d["lmax"] +niter = d["niter"] +binned_mcm = d["binned_mcm"] +apply_kspace_filter = d["apply_kspace_filter"] +cov_T_E_only = d["cov_T_E_only"] + + +spectra = ["TT", "TE", "TB", "ET", "BT", "EE", "EB", "BE", "BB"] +spin_pairs = ["spin0xspin0", "spin0xspin2", "spin2xspin0", "spin2xspin2"] + +# fast_coupling is designed to be fast but is not for general usage +# In particular we assume that the same window function is used in T and Pol +fast_coupling = True + +arrays, n_splits, bl_dict = {}, {}, {} +for sv in surveys: + arrays[sv] = d[f"arrays_{sv}"] + for ar in arrays[sv]: + l_beam, bl_dict[sv, ar] = pspy_utils.read_beam_file(d[f"beam_{sv}_{ar}"]) + id_beam = np.where((l_beam >= 2) & (l_beam < lmax)) + bl_dict[sv, ar] = bl_dict[sv, ar][id_beam] + n_splits[sv] = d[f"n_splits_{sv}"] + assert n_splits[sv] == len(d.get(f"maps_{sv}_{ar}", [0]*n_splits[sv])), "the number of splits does not correspond to the number of maps" + + if fast_coupling: + # This loop check that this is what was specified in the dictfile + assert d[f"window_T_{sv}_{ar}"] == d[f"window_pol_{sv}_{ar}"], "T and pol windows have to be the same" + +l_cmb, cmb_dict = best_fits.cmb_dict_from_file(bestfit_dir + "/cmb.dat", lmax, spectra) + +array_list = [f"{sv}_{ar}" for sv in surveys for ar in arrays[sv]] +l_fg, fg_dict = best_fits.fg_dict_from_files(bestfit_dir + "/fg_{}x{}.dat", array_list, lmax, spectra) + +f_name_noise = noise_dir + "/mean_{}x{}_{}_noise.dat" +l_noise, nl_dict = best_fits.noise_dict_from_files(f_name_noise, surveys, arrays, lmax, spectra, n_splits=n_splits) + +spec_name_list = pspipe_list.get_spec_name_list(d) +l, ps_all, nl_all = best_fits.get_all_best_fit(spec_name_list, + l_cmb, + cmb_dict, + fg_dict, + spectra, + nl_dict=nl_dict, + bl_dict=bl_dict) + +ncovs, na_list, nb_list, nc_list, nd_list = pspipe_list.get_covariances_list(d) + +if d["use_toeplitz_cov"] == True: + log.info("we will use the toeplitz approximation") + l_exact, l_band, l_toep = 800, 2000, 2750 +else: + l_exact, l_band, l_toep = None, None, None + +log.info(f"number of covariance matrices to compute : {ncovs}") +so_mpi.init(True) +subtasks = so_mpi.taskrange(imin=0, imax=ncovs - 1) + +if len(sys.argv) == 4: + log.info(f"computing only the covariance matrices : {int(sys.argv[2])}:{int(sys.argv[3])}") + subtasks = subtasks[int(sys.argv[2]):int(sys.argv[3])] +log.info(subtasks) + +for task in subtasks: + task = int(task) + na, nb, nc, nd = na_list[task], nb_list[task], nc_list[task], nd_list[task] + na_r, nb_r, nc_r, nd_r = na.replace("&", "_"), nb.replace("&", "_"), nc.replace("&", "_"), nd.replace("&", "_") + + log.info(f"[{task}] cov element ({na_r} x {nb_r}, {nc_r} x {nd_r})") + + if fast_coupling: + + coupling = so_cov.fast_cov_coupling_spin0and2(sq_win_alms_dir, + [na_r, nb_r, nc_r, nd_r], + lmax, + l_exact=l_exact, + l_band=l_band, + l_toep=l_toep) + + else: + win = {} + win["Ta"] = so_map.read_map(d[f"window_T_{na_r}"]) + win["Tb"] = so_map.read_map(d[f"window_T_{nb_r}"]) + win["Tc"] = so_map.read_map(d[f"window_T_{nc_r}"]) + win["Td"] = so_map.read_map(d[f"window_T_{nd_r}"]) + win["Pa"] = so_map.read_map(d[f"window_pol_{na_r}"]) + win["Pb"] = so_map.read_map(d[f"window_pol_{nb_r}"]) + win["Pc"] = so_map.read_map(d[f"window_pol_{nc_r}"]) + win["Pd"] = so_map.read_map(d[f"window_pol_{nd_r}"]) + + coupling = so_cov.cov_coupling_spin0and2_simple(win, + lmax, + niter=niter, + l_exact=l_exact, + l_band=l_band, + l_toep=l_toep) + + + + try: mbb_inv_ab, Bbl_ab = so_mcm.read_coupling(prefix=f"{mcms_dir}/{na_r}x{nb_r}", spin_pairs=spin_pairs) + except: mbb_inv_ab, Bbl_ab = so_mcm.read_coupling(prefix=f"{mcms_dir}/{nb_r}x{na_r}", spin_pairs=spin_pairs) + + try: mbb_inv_cd, Bbl_cd = so_mcm.read_coupling(prefix=f"{mcms_dir}/{nc_r}x{nd_r}", spin_pairs=spin_pairs) + except: mbb_inv_cd, Bbl_cd = so_mcm.read_coupling(prefix=f"{mcms_dir}/{nd_r}x{nc_r}", spin_pairs=spin_pairs) + + + analytic_cov = so_cov.generalized_cov_spin0and2(coupling, + [na, nb, nc, nd], + n_splits, + ps_all, + nl_all, + lmax, + binning_file, + mbb_inv_ab, + mbb_inv_cd, + binned_mcm=binned_mcm, + cov_T_E_only=cov_T_E_only, + dtype=np.float32) + + if apply_kspace_filter == True: + # Some heuristic correction for the number of modes lost due to the transfer function + # This should be tested against simulation and revisited + + one_d_tf_ab = np.loadtxt(f"{spectra_dir}/one_dimension_kspace_tf_{na_r}x{nb_r}.dat") + one_d_tf_cd = np.loadtxt(f"{spectra_dir}/one_dimension_kspace_tf_{nc_r}x{nd_r}.dat") + one_d_tf = np.minimum(one_d_tf_ab, one_d_tf_cd) + # sqrt(tf) is an approx for the number of modes masked in a given map so (2l+1)*fsky*sqrt(tf) + # is our proxy for the number of modes + analytic_cov /= np.outer(one_d_tf ** (1.0 / 4.0), one_d_tf ** (1.0 / 4.0)) + + np.save(f"{cov_dir}/analytic_cov_{na_r}x{nb_r}_{nc_r}x{nd_r}.npy", analytic_cov) diff --git a/project/ana_cov_comp/python/get_split_covariance_aniso.py b/project/ana_cov_comp/python/get_split_covariance_aniso.py new file mode 100644 index 00000000..a5ff3820 --- /dev/null +++ b/project/ana_cov_comp/python/get_split_covariance_aniso.py @@ -0,0 +1,179 @@ +""" +This script compute the analytical covariance matrix elements +between split power spectra +""" +import sys +import numpy as np +from pspipe_utils import best_fits, log +from pspy import pspy_utils, so_cov, so_dict, so_map, so_mcm, so_mpi, so_spectra +from itertools import combinations_with_replacement as cwr +from itertools import combinations +from pspipe_utils import best_fits, pspipe_list + +d = so_dict.so_dict() +d.read_from_file(sys.argv[1]) + +log = log.get_logger(**d) + +mcms_dir = d["mcms_dir"] +noise_dir = d["split_noise_dir"] +bestfit_dir = d["best_fits_dir"] + +cov_dir = d["split_covariances_dir"] + +pspy_utils.create_directory(cov_dir) + +surveys = d["surveys"] +binning_file = d["binning_file"] +lmax = d["lmax"] +niter = d["niter"] +binned_mcm = d["binned_mcm"] +apply_kspace_filter = d["apply_kspace_filter"] +cov_T_E_only = d["cov_T_E_only"] + +spectra = ["TT", "TE", "TB", "ET", "BT", "EE", "EB", "BE", "BB"] +spin_pairs = ["spin0xspin0", "spin0xspin2", "spin2xspin0", "spin2xspin2"] + + +# l_cmb, cmb_dict = best_fits.cmb_dict_from_file(bestfit_dir + "/cmb.dat", lmax, spectra) + +arrays = {sv: d[f"arrays_{sv}"] for sv in surveys} +n_splits = {sv: d[f"n_splits_{sv}"] for sv in surveys} +array_list = [f"{sv}_{ar}" for sv in surveys for ar in arrays[sv]] + +spec_name_list = pspipe_list.get_spec_name_list(d) +sv_array_list = [f"{sv}_{ar}" for sv in surveys for ar in arrays[sv]] +l_fg, fg_dict = best_fits.fg_dict_from_files(bestfit_dir + "/fg_{}x{}.dat", sv_array_list, lmax, spectra) + +l_cmb, cmb_dict = best_fits.cmb_dict_from_file(bestfit_dir + "/cmb.dat", lmax, spectra) +l, ps_all = best_fits.get_all_best_fit(spec_name_list, + l_cmb, + cmb_dict, + fg_dict, + spectra) + +noise_dict = {} +f_name_noise = f"{noise_dir}/Dl_" + "{}x{}_{}_noise_model.dat" +for sv in surveys: + for ar in arrays[sv]: + split_list = {sv: [f"{ar}_{i}" for i in range(n_splits[sv])]} + _, nlth = best_fits.noise_dict_from_files(f_name_noise, [sv], split_list, lmax=d["lmax"],spectra=spectra) + noise_dict.update(nlth) + + +bl_dict = {} +for sv in surveys: + for ar in arrays[sv]: + l_beam, bl_dict[sv, ar] = pspy_utils.read_beam_file(d[f"beam_{sv}_{ar}"]) + id_beam = np.where((l_beam >= 2) & (l_beam < lmax)) + bl_dict[sv, ar] = bl_dict[sv, ar][id_beam] + +cov_name = [] +for sv in surveys: + for ar in arrays[sv]: + for id1, xsplit1 in enumerate(combinations(range(n_splits[sv]), 2)): + for id2, xsplit2 in enumerate(combinations(range(n_splits[sv]), 2)): + if id1 > id2: continue + + + for pol in ("T",): # TODO: add back E + + na = f"{sv}&{ar}&{xsplit1[0]}&{pol}" + nb = f"{sv}&{ar}&{xsplit1[1]}&{pol}" + nc = f"{sv}&{ar}&{xsplit2[0]}&{pol}" + nd = f"{sv}&{ar}&{xsplit2[1]}&{pol}" + + cov_name.append((na, nb, nc, nd)) + +ncovs = len(cov_name) + + +log.info(f"number of covariance matrices to compute : {ncovs}") + +so_mpi.init(True) +subtasks = so_mpi.taskrange(imin=0, imax=ncovs - 1) + +if len(sys.argv) == 4: + log.info(f"computing only the covariance matrices : {int(sys.argv[2])}:{int(sys.argv[3])}") + subtasks = subtasks[int(sys.argv[2]):int(sys.argv[3])] +log.info(subtasks) + +for task in subtasks: + na, nb, nc, nd = cov_name[task] + + sv_a, ar_a, split_a, pol_a = na.split("&") + sv_b, ar_b, split_b, pol_b = nb.split("&") + sv_c, ar_c, split_c, pol_c = nc.split("&") + sv_d, ar_d, split_d, pol_d = nd.split("&") + + na = f"{sv_a}&{ar_a}" + nb = f"{sv_b}&{ar_b}" + nc = f"{sv_c}&{ar_c}" + nd = f"{sv_d}&{ar_d}" + + na_r, nb_r, nc_r, nd_r = na.replace("&", "_"), nb.replace("&", "_"), nc.replace("&", "_"), nd.replace("&", "_") + + try: mbb_inv_ab, Bbl_ab = so_mcm.read_coupling(prefix=f"{mcms_dir}/{na_r}x{nb_r}", spin_pairs=spin_pairs) + except: mbb_inv_ab, Bbl_ab = so_mcm.read_coupling(prefix=f"{mcms_dir}/{nb_r}x{na_r}", spin_pairs=spin_pairs) + + try: mbb_inv_cd, Bbl_cd = so_mcm.read_coupling(prefix=f"{mcms_dir}/{nc_r}x{nd_r}", spin_pairs=spin_pairs) + except: mbb_inv_cd, Bbl_cd = so_mcm.read_coupling(prefix=f"{mcms_dir}/{nd_r}x{nc_r}", spin_pairs=spin_pairs) + + log.info(f"[task] cov element ({na_r} x {nb_r}, {nc_r} x {nd_r}) {split_a}{split_b}{split_c}{split_d} {pol_a}{pol_b}{pol_c}{pol_d}") + + splits = [split_a, split_b, split_c, split_d] + survey_id = [pol_a + "a", pol_b + "b", pol_c + "c", pol_d + "d"] + splitname2splitindex = dict(zip(splits, range(len(splits)))) + + survey_combo = sv_a, sv_b, sv_c, sv_d + array_combo = ar_a, ar_b, ar_c, ar_d + + win, var = {}, {} + white_noise = {} + for splitname1, id1, sv1, ar1 in zip(splits, survey_id, survey_combo, array_combo): + # log.info(f"{name1}, {id1}, {sv1}, {ar1}") + split_index = splitname2splitindex[splitname1] + ivar_fname = d[f"ivar_{sv1}_{ar1}"][split_index] + ivar1 = so_map.read_map(ivar_fname).data + var1 = np.reciprocal(ivar1,where=(ivar1!=0)) + maskpol = "T" if id1[0] == "T" else "pol" + win[id1] = so_map.read_map(d[f"window_{maskpol}_{sv1}_{ar1}"]) + white_noise[id1] = so_cov.measure_white_noise_level(var1, win[id1].data) + var[id1] = so_cov.make_weighted_variance_map(var1, win[id1]) + + + log.info(white_noise) + + + Clth_dict = {} + Rl_dict = {} + + for splitname1, id1, sv1, ar1 in zip(splits, survey_id, survey_combo, array_combo): + s1 = splitname1.replace("set", "") + X1 = id1[0] + ndl = noise_dict[sv1, (f"{ar1}_{s1}"), (f"{ar1}_{s1}")][X1 + X1] + ell = np.arange(2, lmax) + dlfac = (ell * (ell + 1) / (2 * np.pi)) + nl = ndl[:(lmax-2)] / dlfac + + Rl_dict[id1] = np.sqrt(nl / white_noise[id1]) + + for name2, id2, sv2, ar2 in zip(splits, survey_id, survey_combo, array_combo): + s2 = name2.replace("set", "") + X2 = id2[0] + # k = (f"{sv1}&{ar1}&{s1}", f"{sv2}&{ar2}&{s2}", X1+X2) + dlth = ps_all[f"{sv1}&{ar1}", f"{sv2}&{ar2}", X1+X2] + Clth_dict[id1 + id2] = dlth / dlfac[:(lmax-2)] + + + couplings = so_cov.generate_aniso_couplings_TTTT(survey_id, splits, win, var, lmax) + + cov_tttt = so_cov.coupled_cov_aniso_same_pol(survey_id, Clth_dict, Rl_dict, couplings) + + np.save(f"{cov_dir}/analytic_aniso_coupled_cov_{na_r}_{split_a}x{nb_r}_{split_b}_{nc_r}_{split_c}x{nd_r}_{split_d}.npy", + cov_tttt) + + full_analytic_cov = mbb_inv_ab["spin0xspin0"] @ cov_tttt @ mbb_inv_cd["spin0xspin0"].T + analytic_cov = so_cov.bin_mat(full_analytic_cov, binning_file, lmax) + np.save(f"{cov_dir}/analytic_aniso_cov_{na_r}_{split_a}x{nb_r}_{split_b}_{nc_r}_{split_c}x{nd_r}_{split_d}.npy", + analytic_cov) diff --git a/project/ana_cov_comp/python/get_sq_windows_alms.py b/project/ana_cov_comp/python/get_sq_windows_alms.py new file mode 100644 index 00000000..09ffe47a --- /dev/null +++ b/project/ana_cov_comp/python/get_sq_windows_alms.py @@ -0,0 +1,65 @@ +""" +This script compute all alms squared windows, it's a necessary step of covariance computation. +""" +import sys + +import numpy as np +from pspipe_utils import log, pspipe_list +from pspy import pspy_utils, so_dict, so_map, so_mpi, sph_tools + + +def mult(map_a, map_b, log): + res_a = 1 / map_a.data.pixsize() + res_b = 1 / map_b.data.pixsize() + + if res_a == res_b: + prod = map_a.copy() + prod.data *= map_b.data + elif res_a < res_b: + log.info("resample map a") + prod = map_b.copy() + map_a_proj = so_map.car2car(map_a, map_b) + prod.data *= map_a_proj.data + elif res_b < res_a: + log.info("resample map b") + prod = map_a.copy() + map_b_proj = so_map.car2car(map_b, map_a) + prod.data *= map_b_proj.data + + return prod + + +d = so_dict.so_dict() +d.read_from_file(sys.argv[1]) +log = log.get_logger(**d) + +surveys = d["surveys"] +lmax = d["lmax"] +niter = d["niter"] + +sq_win_alms_dir = d["sq_win_alms_dir"] + +pspy_utils.create_directory(sq_win_alms_dir) + +n_sq_alms, sv1_list, ar1_list, sv2_list, ar2_list = pspipe_list.get_spectra_list(d) + + +log.info(f"number of sq win alms to compute : {n_sq_alms}") +so_mpi.init(True) +subtasks = so_mpi.taskrange(imin=0, imax=n_sq_alms - 1) +print(subtasks) +for task in subtasks: + task = int(task) + sv1, ar1, sv2, ar2 = sv1_list[task], ar1_list[task], sv2_list[task], ar2_list[task] + + log.info(f"[{task:02d}] Computing map2alm for {sv1}_{ar1}x{sv2}_{ar2}...") + + win_T1 = so_map.read_map(d["window_T_%s_%s" % (sv1, ar1)]) + win_T2 = so_map.read_map(d["window_T_%s_%s" % (sv2, ar2)]) + + sq_win = mult(win_T1, win_T2, log) + # sq_win = win_T1.copy() + # sq_win.data[:] *= win_T2.data[:] + sqwin_alm = sph_tools.map2alm(sq_win, niter=niter, lmax=lmax) + + np.save(f"{sq_win_alms_dir}/alms_{sv1}_{ar1}x{sv2}_{ar2}.npy", sqwin_alm) From f09c0baf59e4e61114fd396629a38844ee8255d5 Mon Sep 17 00:00:00 2001 From: xzackli Date: Thu, 2 Nov 2023 09:23:10 -0400 Subject: [PATCH 11/13] add della-based updates for showing on call --- .../paramfiles/della/10core2hr.slurm | 1 - .../paramfiles/della/8core1hr.slurm | 1 - .../paramfiles/della/8core2hr.slurm | 1 - .../data_analysis/paramfiles/della/README.md | 5 + .../paramfiles/della/global_dr6_v4.dict | 2 +- .../della/src_subtracted_maps.ipynb | 124 ++++++++++++++++++ .../python/get_split_covariance_aniso.py | 11 +- 7 files changed, 137 insertions(+), 8 deletions(-) create mode 100644 project/data_analysis/paramfiles/della/src_subtracted_maps.ipynb diff --git a/project/data_analysis/paramfiles/della/10core2hr.slurm b/project/data_analysis/paramfiles/della/10core2hr.slurm index 0e3206dc..01f22c42 100644 --- a/project/data_analysis/paramfiles/della/10core2hr.slurm +++ b/project/data_analysis/paramfiles/della/10core2hr.slurm @@ -10,7 +10,6 @@ cd /tigress/zequnl/PSpipe/project/data_analysis module load intel/2021.1 openmpi/intel-2021.1/4.1.0 anaconda3/2023.3 -export MPICC=$(which mpicc) conda activate ps export OMP_NUM_THREADS=10 diff --git a/project/data_analysis/paramfiles/della/8core1hr.slurm b/project/data_analysis/paramfiles/della/8core1hr.slurm index f01c0e3a..7d6d9a81 100644 --- a/project/data_analysis/paramfiles/della/8core1hr.slurm +++ b/project/data_analysis/paramfiles/della/8core1hr.slurm @@ -10,7 +10,6 @@ cd /tigress/zequnl/PSpipe/project/data_analysis module load intel/2021.1 openmpi/intel-2021.1/4.1.0 anaconda3/2023.3 -export MPICC=$(which mpicc) conda activate ps export OMP_NUM_THREADS=8 diff --git a/project/data_analysis/paramfiles/della/8core2hr.slurm b/project/data_analysis/paramfiles/della/8core2hr.slurm index a62537b9..94343d0b 100644 --- a/project/data_analysis/paramfiles/della/8core2hr.slurm +++ b/project/data_analysis/paramfiles/della/8core2hr.slurm @@ -10,7 +10,6 @@ cd /tigress/zequnl/PSpipe/project/data_analysis module load intel/2021.1 openmpi/intel-2021.1/4.1.0 anaconda3/2023.3 -export MPICC=$(which mpicc) conda activate ps export OMP_NUM_THREADS=8 diff --git a/project/data_analysis/paramfiles/della/README.md b/project/data_analysis/paramfiles/della/README.md index 0d9c5d86..8aa97c7c 100644 --- a/project/data_analysis/paramfiles/della/README.md +++ b/project/data_analysis/paramfiles/della/README.md @@ -63,3 +63,8 @@ For anisotropic covariances, ```bash 10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_split_covariance_aniso.py paramfiles/della/global_dr6_v4.dict 100 101" ``` +```bash +for i in {0..21}; do \ + 10core2hr "srun --ntasks 1 --cpus-per-task 10 --cpu-bind=cores python -u python/get_split_covariance_aniso.py paramfiles/della/global_dr6_v4.dict $i $((i+1))" +done +``` \ No newline at end of file diff --git a/project/data_analysis/paramfiles/della/global_dr6_v4.dict b/project/data_analysis/paramfiles/della/global_dr6_v4.dict index 7ee4b869..9ed70aca 100644 --- a/project/data_analysis/paramfiles/della/global_dr6_v4.dict +++ b/project/data_analysis/paramfiles/della/global_dr6_v4.dict @@ -1,6 +1,6 @@ surveys = ["dr6"] -arrays_dr6 = ["pa4_f220", "pa5_f090", "pa5_f150", "pa6_f090", "pa6_f150"] +arrays_dr6 = ["pa6_f150"] map_dir = '/scratch/gpfs/ACT/dr6v4/maps/dr6v4_20230316/release/' data_dir = '/tigress/zequnl/cmb/data/dr6/' diff --git a/project/data_analysis/paramfiles/della/src_subtracted_maps.ipynb b/project/data_analysis/paramfiles/della/src_subtracted_maps.ipynb new file mode 100644 index 00000000..2255527b --- /dev/null +++ b/project/data_analysis/paramfiles/della/src_subtracted_maps.ipynb @@ -0,0 +1,124 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "from pixell import enmap,utils\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "map_dir = \"/scratch/gpfs/ACT/dr6v4/maps/dr6v4_20230316/release/\"\n", + "\n", + "maps_dr6_pa4_f150 = [map_dir + \"cmb_night_pa4_f150_3pass_4way_set%d_map.fits\" % (i) for i in range(4)]\n", + "maps_dr6_pa4_f220 = [map_dir + \"cmb_night_pa4_f220_3pass_4way_set%d_map.fits\" % (i) for i in range(4)]\n", + "maps_dr6_pa5_f090 = [map_dir + \"cmb_night_pa5_f090_3pass_4way_set%d_map.fits\" % (i) for i in range(4)]\n", + "maps_dr6_pa5_f150 = [map_dir + \"cmb_night_pa5_f150_3pass_4way_set%d_map.fits\" % (i) for i in range(4)]\n", + "maps_dr6_pa6_f090 = [map_dir + \"cmb_night_pa6_f090_3pass_4way_set%d_map.fits\" % (i) for i in range(4)]\n", + "maps_dr6_pa6_f150 = [map_dir + \"cmb_night_pa6_f150_3pass_4way_set%d_map.fits\" % (i) for i in range(4)]\n", + "\n", + "fnames = maps_dr6_pa4_f150 + maps_dr6_pa4_f220 + maps_dr6_pa5_f090 + maps_dr6_pa5_f150 + maps_dr6_pa6_f090 + maps_dr6_pa6_f150" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "# plt.imshow(srcmap[0, 5500:6000,11500:12000])" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "data_dir = '/tigress/zequnl/cmb/data/dr6/src_free_maps/'" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "! rm /tigress/zequnl/cmb/data/dr6/src_free_maps/*" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "mkdir: cannot create directory ‘/tigress/zequnl/cmb/data/dr6/src_free_maps’: File exists\r\n" + ] + } + ], + "source": [ + "! mkdir /tigress/zequnl/cmb/data/dr6/src_free_maps" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "from pspy import so_map\n", + "from pixell import enmap, curvedsky\n", + "import os\n", + "\n", + "\n", + "arrays_dr6 = [\"pa4_f150\", \"pa4_f220\", \"pa5_f090\", \"pa5_f150\", \"pa6_f090\", \"pa6_f150\"]\n", + "\n", + "for ar in arrays_dr6:\n", + " for i in range(4):\n", + "\n", + " scr_name = f\"cmb_night_{ar}_3pass_4way_set{i}_srcs.fits\"\n", + " map_src = so_map.read_map(map_dir + scr_name)\n", + "\n", + " map = so_map.read_map(map_dir + f\"cmb_night_{ar}_3pass_4way_set{i}_map.fits\")\n", + " map.data -= map_src.data\n", + "\n", + " map.write_map(data_dir + f\"cmb_night_{ar}_3pass_4way_set{i}_map_srcfree.fits\")\n", + " map_src.write_map(data_dir + f\"cmb_night_{ar}_3pass_4way_set{i}_map_model.fits\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ps)", + "language": "python", + "name": "ps" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.4" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/project/data_analysis/python/get_split_covariance_aniso.py b/project/data_analysis/python/get_split_covariance_aniso.py index a09c4f7d..2c43e242 100644 --- a/project/data_analysis/python/get_split_covariance_aniso.py +++ b/project/data_analysis/python/get_split_covariance_aniso.py @@ -124,7 +124,6 @@ splits = [split_a, split_b, split_c, split_d] survey_id = [pol_a + "a", pol_b + "b", pol_c + "c", pol_d + "d"] - splitname2splitindex = dict(zip(splits, range(len(splits)))) survey_combo = sv_a, sv_b, sv_c, sv_d array_combo = ar_a, ar_b, ar_c, ar_d @@ -132,8 +131,13 @@ win, var = {}, {} white_noise = {} for splitname1, id1, sv1, ar1 in zip(splits, survey_id, survey_combo, array_combo): - # log.info(f"{name1}, {id1}, {sv1}, {ar1}") - split_index = splitname2splitindex[splitname1] + log.info(f"{splitname1}, {id1}, {sv1}, {ar1}") + + + all_splits = [f'set{i}' for i in range(n_splits[sv1])] + splitname2splitindex = dict(zip(all_splits, range(n_splits[sv1]))) + + split_index = int(splitname1) ivar_fname = d[f"ivar_{sv1}_{ar1}"][split_index] ivar1 = so_map.read_map(ivar_fname).data var1 = np.reciprocal(ivar1,where=(ivar1!=0)) @@ -142,7 +146,6 @@ white_noise[id1] = so_cov.measure_white_noise_level(var1, win[id1].data) var[id1] = so_cov.make_weighted_variance_map(var1, win[id1]) - log.info(white_noise) From 5fc3f9626cfa4f08132a5c2856a152468f0500b9 Mon Sep 17 00:00:00 2001 From: xzackli Date: Mon, 20 Nov 2023 15:27:00 -0800 Subject: [PATCH 12/13] additional window args --- project/data_analysis/python/get_window_dr6.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/project/data_analysis/python/get_window_dr6.py b/project/data_analysis/python/get_window_dr6.py index 6a7711ca..c36db29f 100644 --- a/project/data_analysis/python/get_window_dr6.py +++ b/project/data_analysis/python/get_window_dr6.py @@ -111,13 +111,10 @@ def apply_coordinate_mask(mask, coord): so_mpi.init(True) subtasks = so_mpi.taskrange(imin=0, imax=n_wins - 1) -<<<<<<< HEAD if len(sys.argv) == 4: log.info(f"computing only the windows : {int(sys.argv[2])}:{int(sys.argv[3])}") subtasks = subtasks[int(sys.argv[2]):int(sys.argv[3])] -======= ->>>>>>> master for task in subtasks: task = int(task) sv, ar = sv_list[task], ar_list[task] From 4944059e97ad5ee423ae18869b4b93e387a2d329 Mon Sep 17 00:00:00 2001 From: Zachary Atkins Date: Tue, 28 Nov 2023 14:17:47 -0500 Subject: [PATCH 13/13] add new param_dict, update through aniso_couplings.py --- .../paramfiles/cov_dr6_v4_20231128.dict | 192 ++++++++++++++++++ .../sims_dr6_v4_lmax5400_20230816.dict | 2 +- .../python/get_split_covariance_aniso.py | 8 - 3 files changed, 193 insertions(+), 9 deletions(-) create mode 100644 project/ana_cov_comp/paramfiles/cov_dr6_v4_20231128.dict diff --git a/project/ana_cov_comp/paramfiles/cov_dr6_v4_20231128.dict b/project/ana_cov_comp/paramfiles/cov_dr6_v4_20231128.dict new file mode 100644 index 00000000..52e58fbb --- /dev/null +++ b/project/ana_cov_comp/paramfiles/cov_dr6_v4_20231128.dict @@ -0,0 +1,192 @@ +surveys = ["dr6"] + +arrays_dr6 = ["pa4_f150", "pa4_f220", "pa5_f090", "pa5_f150", "pa6_f090", "pa6_f150"] + +data_dir = '/scratch/gpfs/zatkins/data/simonsobs/PSpipe/project/ana_cov_comp/' + +# orphans +use_beam_covariance = True +multistep_path = data_dir + +# mcms +mcms_dir = data_dir + 'mcms' + +binned_mcm = False +lmax = 5400 +use_toeplitz_mcm = False +binning_file = data_dir + "binning/BIN_ACTPOL_50_4_SC_large_bin_at_low_ell" +niter = 0 +type = 'Dl' + +# alms +alms_dir = data_dir + 'alms' +apply_kspace_filter = False +k_filter_dr6 = {"type": "binary_cross", "vk_mask": [-90, 90], "hk_mask": [-50, 50], "weighted": False} +deconvolve_pixwin = True +pixwin_dr6 = {"pix": 'CAR', "order": 0} +remove_mean = False + +# spectra +spectra_dir = data_dir + 'spectra' +noise_model_dir = data_dir + 'noise_model' +split_noise_dir = data_dir + 'split_noise' + +write_splits_spectra = True +cov_T_E_only = False +kspace_tf_path = "analytical" +deconvolve_map_maker_tf_dr6 = False +mm_tf_dr6_pa4_f150 = data_dir + "transfer_fcns/tf_unity.dat" +mm_tf_dr6_pa4_f220 = data_dir + "transfer_fcns/tf_unity.dat" +mm_tf_dr6_pa5_f090 = data_dir + "transfer_fcns/tf_unity.dat" +mm_tf_dr6_pa5_f150 = data_dir + "transfer_fcns/tf_unity.dat" +mm_tf_dr6_pa6_f090 = data_dir + "transfer_fcns/tf_unity.dat" +mm_tf_dr6_pa6_f150 = data_dir + "transfer_fcns/tf_unity.dat" + +# cov +sq_win_alms_dir = data_dir + 'sq_win_alms' +covariances_dir = data_dir + 'covariances' +split_covariances_dir = data_dir + 'split_covariances' + +use_toeplitz_cov = True + +# window parameters +ps_mask_dr6_pa4_f150 = data_dir + 'masks/act_pts_mask_fluxcut_15.0mJy_at150Ghz_rad_5.0_monster_dust_proj.fits' +ps_mask_dr6_pa4_f220 = data_dir + 'masks/act_pts_mask_fluxcut_15.0mJy_at150Ghz_rad_5.0_monster_dust_proj.fits' +ps_mask_dr6_pa5_f090 = data_dir + 'masks/act_pts_mask_fluxcut_15.0mJy_at150Ghz_rad_5.0_monster_dust_proj.fits' +ps_mask_dr6_pa5_f150 = data_dir + 'masks/act_pts_mask_fluxcut_15.0mJy_at150Ghz_rad_5.0_monster_dust_proj.fits' +ps_mask_dr6_pa6_f090 = data_dir + 'masks/act_pts_mask_fluxcut_15.0mJy_at150Ghz_rad_5.0_monster_dust_proj.fits' +ps_mask_dr6_pa6_f150 = data_dir + 'masks/act_pts_mask_fluxcut_15.0mJy_at150Ghz_rad_5.0_monster_dust_proj.fits' + +gal_mask_dr6_pa4_f150 = data_dir + "masks/mask_galactic_equatorial_car_halfarcmin_pixelmatch_proj.fits" +gal_mask_dr6_pa4_f220 = data_dir + "masks/mask_galactic_equatorial_car_halfarcmin_pixelmatch_proj.fits" +gal_mask_dr6_pa5_f090 = data_dir + "masks/mask_galactic_equatorial_car_halfarcmin_pixelmatch_proj.fits" +gal_mask_dr6_pa5_f150 = data_dir + "masks/mask_galactic_equatorial_car_halfarcmin_pixelmatch_proj.fits" +gal_mask_dr6_pa6_f090 = data_dir + "masks/mask_galactic_equatorial_car_halfarcmin_pixelmatch_proj.fits" +gal_mask_dr6_pa6_f150 = data_dir + "masks/mask_galactic_equatorial_car_halfarcmin_pixelmatch_proj.fits" + +coord_mask_dr6_pa4_f150 = None +coord_mask_dr6_pa4_f220 = None +coord_mask_dr6_pa5_f090 = None +coord_mask_dr6_pa5_f150 = None +coord_mask_dr6_pa6_f090 = None +coord_mask_dr6_pa6_f150 = None + +apod_pts_source_degree = 0.3 +apod_survey_degree = 2 +edge_skip_rescale = 1 +cross_link_threshold = 0.97 +n_med_ivar = 3 + +window_dir = '/scratch/gpfs/zatkins/data/simonsobs/mnms/masks/effective_est/' + +window_T_dr6_pa4_f150 = window_dir + "pa4_baseline_effective_est_20230816.fits" +window_pol_dr6_pa4_f150 = window_dir + "pa4_baseline_effective_est_20230816.fits" +window_kspace_dr6_pa4_f150 = None + +window_T_dr6_pa4_f220 = window_dir + "pa4_baseline_effective_est_20230816.fits" +window_pol_dr6_pa4_f220 = window_dir + "pa4_baseline_effective_est_20230816.fits" +window_kspace_dr6_pa4_f220 = None + +window_T_dr6_pa5_f090 = window_dir + "pa5_baseline_effective_est_20230816.fits" +window_pol_dr6_pa5_f090 = window_dir + "pa5_baseline_effective_est_20230816.fits" +window_kspace_dr6_pa5_f090 = None + +window_T_dr6_pa5_f150 = window_dir + "pa5_baseline_effective_est_20230816.fits" +window_pol_dr6_pa5_f150 = window_dir + "pa5_baseline_effective_est_20230816.fits" +window_kspace_dr6_pa5_f150 = None + +window_T_dr6_pa6_f090 = window_dir + "pa6_baseline_effective_est_20230816.fits" +window_pol_dr6_pa6_f090 = window_dir + "pa6_baseline_effective_est_20230816.fits" +window_kspace_dr6_pa6_f090 = None + +window_T_dr6_pa6_f150 = window_dir + "pa6_baseline_effective_est_20230816.fits" +window_pol_dr6_pa6_f150 = window_dir + "pa6_baseline_effective_est_20230816.fits" +window_kspace_dr6_pa6_f150 = None + +# maps +map_dir = '/scratch/gpfs/ACT/dr6v4/maps/dr6v4_20230316/release/' + +n_splits_dr6 = 4 +src_free_maps_dr6 = True + +maps_dr6_pa4_f150 = [map_dir + 'cmb_night_pa4_f150_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] +maps_dr6_pa4_f220 = [map_dir + 'cmb_night_pa4_f220_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] +maps_dr6_pa5_f090 = [map_dir + 'cmb_night_pa5_f090_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] +maps_dr6_pa5_f150 = [map_dir + 'cmb_night_pa5_f150_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] +maps_dr6_pa6_f090 = [map_dir + 'cmb_night_pa6_f090_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] +maps_dr6_pa6_f150 = [map_dir + 'cmb_night_pa6_f150_3pass_4way_set%d_map_srcfree.fits' % (i) for i in range(4)] + +ivar_dr6_pa4_f150 = [map_dir + 'cmb_night_pa4_f150_3pass_4way_set%d_ivar.fits' % (i) for i in range(4)] +ivar_dr6_pa4_f220 = [map_dir + 'cmb_night_pa4_f220_3pass_4way_set%d_ivar.fits' % (i) for i in range(4)] +ivar_dr6_pa5_f090 = [map_dir + 'cmb_night_pa5_f090_3pass_4way_set%d_ivar.fits' % (i) for i in range(4)] +ivar_dr6_pa5_f150 = [map_dir + 'cmb_night_pa5_f150_3pass_4way_set%d_ivar.fits' % (i) for i in range(4)] +ivar_dr6_pa6_f090 = [map_dir + 'cmb_night_pa6_f090_3pass_4way_set%d_ivar.fits' % (i) for i in range(4)] +ivar_dr6_pa6_f150 = [map_dir + 'cmb_night_pa6_f150_3pass_4way_set%d_ivar.fits' % (i) for i in range(4)] + +cal_dr6_pa4_f150 = 1.0072 +cal_dr6_pa4_f220 = 1.0340 +cal_dr6_pa5_f090 = 1.0219 +cal_dr6_pa5_f150 = 0.9877 +cal_dr6_pa6_f090 = 1.0182 +cal_dr6_pa6_f150 = 0.9699 + +pol_eff_dr6_pa4_f150 = 0.9584 +pol_eff_dr6_pa4_f220 = 1.0 +pol_eff_dr6_pa5_f090 = 0.9646 +pol_eff_dr6_pa5_f150 = 0.9488 +pol_eff_dr6_pa6_f090 = 0.9789 +pol_eff_dr6_pa6_f150 = 0.9656 + +# passbands +passband_dir = data_dir + "passbands/" +do_bandpass_integration = True +freq_info_dr6_pa4_f150 = {"freq_tag": 150, "passband": passband_dir + "passband_dr6_pa4_f150.dat"} +freq_info_dr6_pa4_f220 = {"freq_tag": 220, "passband": passband_dir + "passband_dr6_pa4_f220.dat"} +freq_info_dr6_pa5_f090 = {"freq_tag": 90, "passband": passband_dir + "passband_dr6_pa5_f090.dat"} +freq_info_dr6_pa5_f150 = {"freq_tag": 150, "passband": passband_dir + "passband_dr6_pa5_f150.dat"} +freq_info_dr6_pa6_f090 = {"freq_tag": 90, "passband": passband_dir + "passband_dr6_pa6_f090.dat"} +freq_info_dr6_pa6_f150 = {"freq_tag": 150, "passband": passband_dir + "passband_dr6_pa6_f150.dat"} + +# beams +beam_dr6_pa4_f150 = data_dir + 'beams/20230130_beams/coadd_pa4_f150_night_beam_tform_jitter_cmb.txt' +beam_dr6_pa4_f220 = data_dir + 'beams/20230130_beams/coadd_pa4_f220_night_beam_tform_jitter_cmb.txt' +beam_dr6_pa5_f090 = data_dir + 'beams/20230130_beams/coadd_pa5_f090_night_beam_tform_jitter_cmb.txt' +beam_dr6_pa5_f150 = data_dir + 'beams/20230130_beams/coadd_pa5_f150_night_beam_tform_jitter_cmb.txt' +beam_dr6_pa6_f090 = data_dir + 'beams/20230130_beams/coadd_pa6_f090_night_beam_tform_jitter_cmb.txt' +beam_dr6_pa6_f150 = data_dir + 'beams/20230130_beams/coadd_pa6_f150_night_beam_tform_jitter_cmb.txt' + +leakage_file_dir = data_dir + 'beams/20230130_beams/' + +leakage_beam_dr6_pa4_f150 = 'gamma_mp_uranus_pa4_f150.txt' +leakage_beam_dr6_pa4_f220 = 'gamma_mp_uranus_pa4_f220.txt' +leakage_beam_dr6_pa5_f090 = 'gamma_mp_uranus_pa5_f090.txt' +leakage_beam_dr6_pa5_f150 = 'gamma_mp_uranus_pa5_f150.txt' +leakage_beam_dr6_pa6_f090 = 'gamma_mp_uranus_pa6_f090.txt' +leakage_beam_dr6_pa6_f150 = 'gamma_mp_uranus_pa6_f150.txt' + +# best fit params (only used for sim generation and covariances computation) +best_fits_dir = data_dir + 'best_fits' + +cosmo_params = {"cosmomc_theta":0.0104085, "logA": 3.044, "ombh2": 0.02237, "omch2": 0.1200, "ns": 0.9649, "Alens": 1.0, "tau": 0.0544} +fg_norm = {"nu_0": 150.0, "ell_0": 3000, "T_CMB": 2.725} +fg_components = {'tt': ['tSZ_and_CIB', 'cibp', 'kSZ', 'radio', 'dust'], 'te': ['radio', 'dust'], 'ee': ['radio', 'dust'], 'bb': ['radio', 'dust'], 'tb': ['radio', 'dust'], 'eb': []} +fg_params = {"a_tSZ": 3.30, "a_kSZ": 1.60, "a_p": 6.90, "beta_p": 2.08, "a_c": 4.90, "beta_c": 2.20, "a_s": 3.10, "a_gtt": 8.7, "xi": 0.1, "T_d": 9.60, "a_gte": 0, "a_gtb": 0, "a_gee": 0, "a_gbb": 0, "a_pste": 0, "a_pstb": 0, "a_psee": 0, "a_psbb": 0} + +# sim +iStart = 0 +iStop = 39 +sim_alm_dtype = "complex64" +read_noise_alms_from_disk = False +noise_sim_type = "fdw" +noise_model_parameters = {"downgrade": 4, "mask_est_name": "dr6v3_20220316_baseline_union_mask", "mask_obs_name": "dr6v3_xlink_union_mask_0.001", "union_sources": "regular_20220316", "notes": "20220619"} + +# plot +plot_dir = data_dir + 'plots' + +range_TT = [10, 8000] +range_TE = [-150, 150] +range_ET = [-150, 150] +range_EE = [-20, 50] + +# planck +planck_data_dir = data_dir + "planck_data/" \ No newline at end of file diff --git a/project/ana_cov_comp/paramfiles/sims_dr6_v4_lmax5400_20230816.dict b/project/ana_cov_comp/paramfiles/sims_dr6_v4_lmax5400_20230816.dict index 3216bc89..52e58fbb 100644 --- a/project/ana_cov_comp/paramfiles/sims_dr6_v4_lmax5400_20230816.dict +++ b/project/ana_cov_comp/paramfiles/sims_dr6_v4_lmax5400_20230816.dict @@ -2,7 +2,7 @@ surveys = ["dr6"] arrays_dr6 = ["pa4_f150", "pa4_f220", "pa5_f090", "pa5_f150", "pa6_f090", "pa6_f150"] -data_dir = '/scratch/gpfs/zatkins/data/simonsobs/PSpipe/project/data_analysis/ana_cov_comp/' +data_dir = '/scratch/gpfs/zatkins/data/simonsobs/PSpipe/project/ana_cov_comp/' # orphans use_beam_covariance = True diff --git a/project/ana_cov_comp/python/get_split_covariance_aniso.py b/project/ana_cov_comp/python/get_split_covariance_aniso.py index a5ff3820..750b437b 100644 --- a/project/ana_cov_comp/python/get_split_covariance_aniso.py +++ b/project/ana_cov_comp/python/get_split_covariance_aniso.py @@ -60,14 +60,6 @@ _, nlth = best_fits.noise_dict_from_files(f_name_noise, [sv], split_list, lmax=d["lmax"],spectra=spectra) noise_dict.update(nlth) - -bl_dict = {} -for sv in surveys: - for ar in arrays[sv]: - l_beam, bl_dict[sv, ar] = pspy_utils.read_beam_file(d[f"beam_{sv}_{ar}"]) - id_beam = np.where((l_beam >= 2) & (l_beam < lmax)) - bl_dict[sv, ar] = bl_dict[sv, ar][id_beam] - cov_name = [] for sv in surveys: for ar in arrays[sv]: