diff --git a/hera_notebook_templates/data/SSM_autocorrelations_downsampled_sum_pol_convention.uvh5 b/hera_notebook_templates/data/SSM_autocorrelations_downsampled_sum_pol_convention.uvh5 new file mode 100644 index 0000000..cbca7c5 Binary files /dev/null and b/hera_notebook_templates/data/SSM_autocorrelations_downsampled_sum_pol_convention.uvh5 differ diff --git a/hera_notebook_templates/notebooks/lststack.ipynb b/hera_notebook_templates/notebooks/lststack.ipynb index 7aa829e..827a2c1 100644 --- a/hera_notebook_templates/notebooks/lststack.ipynb +++ b/hera_notebook_templates/notebooks/lststack.ipynb @@ -66,6 +66,7 @@ "source": [ "import os\n", "import sys\n", + "import pickle\n", "from pathlib import Path\n", "from functools import partial\n", "from datetime import datetime\n", @@ -146,7 +147,6 @@ "outdir: str = \".\"\n", "bl_chunk_size: int = 0\n", "rephase: bool = True\n", - "vis_units: str = \"Jy\"\n", "fname_format: str = '{inpaint_mode}/zen.{kind}.{lst:7.5f}.{blchunk}.sum.uvh5'\n", "overwrite: bool = True\n", "write_med_mad: bool = False\n", @@ -759,6 +759,7 @@ "source": [ "%%time\n", "if do_lstcal and n_bl_chunks==1: # can't fit all the baselines in memory if not redavg'd, and need all of them at once to do lstcal\n", + " all_calibration_parameters = {}\n", " for i, (stack, lstavg_model, auto_model) in enumerate(zip(cross_stacks, cross_lstavg, autos_lstavg)):\n", " calibration_parameters, gains = lstbin_absolute_calibration(\n", " stack=stack, \n", @@ -768,8 +769,9 @@ " auto_stack=auto_stacks[i],\n", " auto_model=auto_model['data'],\n", " calibrate_inplace=True, # calibrate inplace\n", - " run_phase_cal=True, # run phase calibration\n", " run_amplitude_cal=True, # run amplitude calibration\n", + " run_phase_cal=True, # run phase calibration\n", + " run_cross_pol_phase_cal=True\n", " )\n", " \n", " # Write out calibration parameters and metadata\n", @@ -779,18 +781,22 @@ " calibration_parameters[\"antpairs\"] = stack.antpairs\n", " calibration_parameters[\"lst\"] = stackconf.lst_grid[i]\n", " \n", - " # Get the calibration filename\n", - " cal_fname = lstbin.io.format_outfile_name(\n", - " lst=stackconf.lst_grid_edges[i],\n", - " pols=stackconf.pols,\n", - " fname_format=fname_format.replace(\"sum.uvh5\", \"lstcal.npz\"),\n", - " inpaint_mode=True,\n", - " lst_branch_cut=stackconf.properties[\"lst_branch_cut\"],\n", - " kind=\"LST\"\n", - " )\n", - " outfile = outdir / cal_fname\n", + " all_calibration_parameters[stackconf.lst_grid[i]] = calibration_parameters\n", + " \n", + " # Get the calibration filename\n", + " cal_fname = lstbin.io.format_outfile_name(\n", + " lst=stackconf.lst_grid_edges[0],\n", + " pols=stackconf.pols,\n", + " fname_format=fname_format.replace(\"sum.uvh5\", \"lstcal.pkl\"),\n", + " inpaint_mode=True,\n", + " lst_branch_cut=stackconf.properties[\"lst_branch_cut\"],\n", + " kind=\"LST\"\n", + " )\n", + " outfile = outdir / cal_fname\n", " \n", - " np.savez(outfile, **calibration_parameters)\n", + " # Write out calibration parameters\n", + " with open(outfile, 'wb') as handle:\n", + " pickle.dump(all_calibration_parameters, handle, protocol=pickle.HIGHEST_PROTOCOL)\n", " \n", " \n", " # Recompute auto and cross averages after calibration - needed for STD files \n", @@ -1334,7 +1340,6 @@ " freq_max=freq_max,\n", " lst_branch_cut=stackconf.properties[\"lst_branch_cut\"],\n", " lsts=stackconf.lst_grid,\n", - " vis_units=vis_units\n", ")\n", "\n", "create_file = partial(\n", @@ -2754,9 +2759,9 @@ ], "metadata": { "kernelspec": { - "display_name": "h6c", + "display_name": "Python 3 (ipykernel)", "language": "python", - "name": "h6c" + "name": "python3" }, "language_info": { "codemirror_mode": { @@ -2768,7 +2773,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.6" + "version": "3.11.3" }, "toc": { "base_numbering": 1, diff --git a/notebooks/calibration_smoothing.ipynb b/notebooks/calibration_smoothing.ipynb index edc14d9..416ac44 100644 --- a/notebooks/calibration_smoothing.ipynb +++ b/notebooks/calibration_smoothing.ipynb @@ -7,7 +7,7 @@ "source": [ "# Calibration Smoothing\n", "\n", - "**by Josh Dillon**, last updated March 29, 2023\n", + "**by Josh Dillon**, last updated July 19, 2023\n", "\n", "This notebook runs calibration smoothing to the gains coming out of [file_calibration](https://github.com/HERA-Team/hera_notebook_templates/blob/master/notebooks/file_calibration.ipynb) notebook. It removes any flags founds on by that notebook and replaces them with flags generated from [full_day_rfi](https://github.com/HERA-Team/hera_notebook_templates/blob/master/notebooks/full_day_rfi.ipynb) and [full_day_antenna_flagging](https://github.com/HERA-Team/hera_notebook_templates/blob/master/notebooks/full_day_antenna_flagging.ipynb). It also plots the results for a couple of antennas.\n", "\n", @@ -71,7 +71,7 @@ "source": [ "# get files\n", "SUM_FILE = os.environ.get(\"SUM_FILE\", None)\n", - "# SUM_FILE = '/users/jsdillon/lustre/H6C/abscal/2459853/zen.2459853.25518.sum.uvh5'\n", + "# SUM_FILE = '/lustre/aoc/projects/hera/h6c-analysis/IDR2/2459867/zen.2459867.43004.sum.uvh5'\n", "SUM_SUFFIX = os.environ.get(\"SUM_SUFFIX\", 'sum.uvh5')\n", "CAL_SUFFIX = os.environ.get(\"CAL_SUFFIX\", 'sum.omni.calfits')\n", "SMOOTH_CAL_SUFFIX = os.environ.get(\"SMOOTH_CAL_SUFFIX\", 'sum.smooth.calfits')\n", @@ -80,9 +80,10 @@ "FREQ_SMOOTHING_SCALE = float(os.environ.get(\"FREQ_SMOOTHING_SCALE\", 10.0)) # MHz\n", "TIME_SMOOTHING_SCALE = float(os.environ.get(\"TIME_SMOOTHING_SCALE\", 6e5)) # seconds\n", "EIGENVAL_CUTOFF = float(os.environ.get(\"EIGENVAL_CUTOFF\", 1e-12))\n", + "PER_POL_REFANT = os.environ.get(\"PER_POL_REFANT\", \"False\").upper() == \"TRUE\"\n", "\n", "for setting in ['SUM_FILE', 'SUM_SUFFIX', 'CAL_SUFFIX', 'SMOOTH_CAL_SUFFIX', 'ANT_FLAG_SUFFIX',\n", - " 'RFI_FLAG_SUFFIX', 'FREQ_SMOOTHING_SCALE', 'TIME_SMOOTHING_SCALE', 'EIGENVAL_CUTOFF']:\n", + " 'RFI_FLAG_SUFFIX', 'FREQ_SMOOTHING_SCALE', 'TIME_SMOOTHING_SCALE', 'EIGENVAL_CUTOFF', 'PER_POL_REFANT']:\n", " print(f'{setting} = {eval(setting)}')" ] }, @@ -91,7 +92,7 @@ "id": "c6ecd16f", "metadata": {}, "source": [ - "## Load files" + "## Load files and select reference antenna(s)" ] }, { @@ -134,22 +135,41 @@ { "cell_type": "code", "execution_count": null, - "id": "ee831559", - "metadata": { - "scrolled": true - }, + "id": "0976d9b5-b08c-4899-a866-8badc6d9173d", + "metadata": {}, + "outputs": [], + "source": [ + "cs = smooth_cal.CalibrationSmoother(cal_files, flag_file_list=(ant_flag_files + rfi_flag_files),\n", + " ignore_calflags=True, pick_refant=False, load_chisq=True, load_cspa=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4c2af8ac-efb8-4489-80f6-d0f30d8b62a8", + "metadata": {}, "outputs": [], "source": [ - "cs = smooth_cal.CalibrationSmoother(cal_files, flag_file_list=(ant_flag_files + rfi_flag_files), ignore_calflags=True,\n", - " pick_refant=True, propagate_refant_flags=True, load_chisq=True, load_cspa=True)\n", + "cs.refant = smooth_cal.pick_reference_antenna(cs.gain_grids, cs.flag_grids, cs.freqs, per_pol=True)\n", "for pol in cs.refant:\n", - " print(f'Reference antenna {cs.refant[pol][0]} selected for {pol}.')" + " print(f'Reference antenna {cs.refant[pol][0]} selected for smoothing {pol} gains.')\n", + "\n", + "if not PER_POL_REFANT:\n", + " # in this case, rephase both pols separately before smoothing, but also smooth the relative polarization calibration phasor\n", + " overall_refant = smooth_cal.pick_reference_antenna({ant: cs.gain_grids[ant] for ant in cs.refant.values()}, \n", + " {ant: cs.flag_grids[ant] for ant in cs.refant.values()}, \n", + " cs.freqs, per_pol=False)\n", + " print(f'Overall reference antenna {overall_refant} selected.')\n", + " other_refant = [ant for ant in cs.refant.values() if ant != overall_refant][0]\n", + "\n", + " relative_pol_phasor = cs.gain_grids[overall_refant] * cs.gain_grids[other_refant].conj() # TODO: is this conjugation right?\n", + " relative_pol_phasor /= np.abs(relative_pol_phasor)" ] }, { "cell_type": "code", "execution_count": null, - "id": "482760b0", + "id": "9f459577-0ba2-4c55-a767-df9d10c7226d", "metadata": {}, "outputs": [], "source": [ @@ -160,7 +180,21 @@ "candidate_ants = [ant for ant, nflags in zip(antnums, flags_per_antnum) if (ant not in refant_nums) and (nflags <= np.percentile(flags_per_antnum, 25))\n", " and not np.all(cs.flag_grids[ant, 'Jee']) and not np.all(cs.flag_grids[ant, 'Jnn'])]\n", "ants_to_plot = [func(candidate_ants) for func in (np.min, np.max)]\n", - "abscal_gains = {(ant, pol): np.array(cs.gain_grids[(ant, pol)]) for ant in ants_to_plot for pol in ['Jee', 'Jnn']}" + "abscal_gains = {}\n", + "for pol in ['Jee', 'Jnn']:\n", + " for antnum in ants_to_plot:\n", + " refant_here = (cs.refant[pol] if PER_POL_REFANT else overall_refant)\n", + " abscal_gains[antnum, pol] = cs.gain_grids[(antnum, pol)] * np.abs(cs.gain_grids[refant_here]) / cs.gain_grids[refant_here]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ff18e5a9-23e5-41f5-ab0b-35f09ab67404", + "metadata": {}, + "outputs": [], + "source": [ + "cs.rephase_to_refant(propagate_refant_flags=True)" ] }, { @@ -171,6 +205,19 @@ "## Perform smoothing" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "178ce9a5-570e-4cbc-a7d0-978da71033a6", + "metadata": {}, + "outputs": [], + "source": [ + "if not PER_POL_REFANT:\n", + " # treat the relative_pol_phasor as if it were antenna -1\n", + " cs.gain_grids[(-1, other_refant[1])] = relative_pol_phasor\n", + " cs.flag_grids[(-1, other_refant[1])] = cs.flag_grids[overall_refant] | cs.flag_grids[other_refant]" + ] + }, { "cell_type": "code", "execution_count": null, @@ -182,6 +229,23 @@ " method='DPSS', fit_method='lu_solve', fix_phase_flips=True, flag_phase_flip_ints=True)" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "10ff0705-f3fd-4570-b47a-e96e360d183c", + "metadata": {}, + "outputs": [], + "source": [ + "if not PER_POL_REFANT:\n", + " # put back in the smoothed phasor, ensuring the amplitude is 1 and that data are flagged anywhere either polarization's refant is flagged\n", + " smoothed_relative_pol_phasor = cs.gain_grids[(-1, other_refant[-1])] / np.abs(cs.gain_grids[(-1, other_refant[-1])])\n", + " for ant in cs.gain_grids:\n", + " if ant[0] >= 0 and ant[1] == other_refant[1]:\n", + " cs.gain_grids[ant] /= smoothed_relative_pol_phasor\n", + " cs.flag_grids[ant] |= (cs.flag_grids[(-1, other_refant[1])])\n", + " cs.refant = overall_refant" + ] + }, { "cell_type": "markdown", "id": "c40160e2", @@ -205,9 +269,7 @@ "cell_type": "code", "execution_count": null, "id": "4fe13af1", - "metadata": { - "scrolled": false - }, + "metadata": {}, "outputs": [], "source": [ "def amplitude_plot(ant_to_plot):\n", @@ -289,14 +351,16 @@ " warnings.simplefilter(\"ignore\") \n", " display(HTML(f'

Antenna {ant_to_plot} Phase Waterfalls

'))\n", " fig, axes = plt.subplots(4, 2, figsize=(14,14), gridspec_kw={'height_ratios': [1, 1, .4, .4]})\n", - "\n", + " \n", " # Plot phase waterfalls for a single antenna \n", " for ax, pol in zip(axes[0], ['Jee', 'Jnn']):\n", " ant = (ant_to_plot, pol)\n", " extent=[cs.freqs[0]/1e6, cs.freqs[-1]/1e6, lst_grid[-1], lst_grid[0]]\n", " im = ax.imshow(np.where(cs.flag_grids[ant], np.nan, np.angle(cs.gain_grids[ant])), aspect='auto', cmap='inferno', \n", " interpolation='nearest', vmin=-np.pi, vmax=np.pi, extent=extent)\n", - " ax.set_title(f'Smoothcal Gain Phase of Ant {ant[0]} / Ant {cs.refant[pol][0]}: {pol[-1]}-polarized')\n", + "\n", + " refant = (cs.refant[pol] if isinstance(cs.refant, dict) else cs.refant)\n", + " ax.set_title(f'Smoothcal Gain Phase of Ant {ant[0]}{pol[-1]} / Ant {refant[0]}{refant[1][-1]}')\n", " ax.set_xlabel('Frequency (MHz)')\n", " ax.set_ylabel('LST (Hours)')\n", " ax.set_xlim([cs.freqs[0]/1e6, cs.freqs[-1]/1e6])\n", @@ -309,7 +373,8 @@ " extent=[cs.freqs[0]/1e6, cs.freqs[-1]/1e6, lst_grid[-1], lst_grid[0]]\n", " im = ax.imshow(np.where(cs.flag_grids[ant], np.nan, np.angle(abscal_gains[ant])), aspect='auto', cmap='inferno', \n", " interpolation='nearest', vmin=-np.pi, vmax=np.pi, extent=extent)\n", - " ax.set_title(f'Abscal Gain Phase of Ant {ant[0]} / Ant {cs.refant[pol][0]}: {pol[-1]}-polarized')\n", + " refant = (cs.refant[pol] if isinstance(cs.refant, dict) else cs.refant)\n", + " ax.set_title(f'Abscal Gain Phase of Ant {ant[0]}{pol[-1]} / Ant {refant[0]}{refant[1][-1]}')\n", " ax.set_xlabel('Frequency (MHz)')\n", " ax.set_ylabel('LST (Hours)')\n", " ax.set_xlim([cs.freqs[0]/1e6, cs.freqs[-1]/1e6])\n", @@ -326,8 +391,9 @@ " ax.set_ylim([-np.pi, np.pi])\n", " ax.set_xlim([cs.freqs[0]/1e6, cs.freqs[-1]/1e6]) \n", " ax.set_xlabel('Frequency (MHz)')\n", - " ax.set_ylabel(f'Phase of g$_{{{ant[0]}}}$ / g$_{{{cs.refant[pol][0]}}}$')\n", - " ax.set_title(f'Median Infrequently-Flagged Gain Phase of Ant {ant[0]} / Ant {cs.refant[pol][0]}: {pol[-1]}-polarized')\n", + " refant = (cs.refant[pol] if isinstance(cs.refant, dict) else cs.refant)\n", + " ax.set_ylabel(f'Phase of g$_{{{ant[0]}{pol[-1]}}}$ / g$_{{{refant[0]}{refant[1][-1]}}}$')\n", + " ax.set_title(f'Median Infrequently-Flagged Gain Phase of Ant {ant[0]}{pol[-1]} / Ant {refant[0]}{refant[1][-1]}')\n", " ax.legend(loc='upper left')\n", "\n", " # # Now plot median gain time series\n", @@ -339,8 +405,9 @@ " ax.plot(lst_grid[to_plot], np.nanmean(np.where(cs.flag_grids[ant], np.nan, np.angle(cs.gain_grids[ant])), axis=1)[to_plot], 'k.', ms=2, label='Smoothed') \n", " ax.set_ylim([-np.pi, np.pi]) \n", " ax.set_xlabel('LST (hours)')\n", - " ax.set_ylabel(f'Phase of g$_{{{ant[0]}}}$ / g$_{{{cs.refant[pol][0]}}}$')\n", - " ax.set_title(f'Mean Infrequently-Flagged Gain Phase of Ant {ant[0]} / Ant {cs.refant[pol][0]}: {pol[-1]}-polarized')\n", + " refant = (cs.refant[pol] if isinstance(cs.refant, dict) else cs.refant)\n", + " ax.set_ylabel(f'Phase of g$_{{{ant[0]}{pol[-1]}}}$ / g$_{{{refant[0]}{refant[1][-1]}}}$')\n", + " ax.set_title(f'Mean Infrequently-Flagged Gain Phase of Ant {ant[0]}{pol[-1]} / Ant {refant[0]}{refant[1][-1]}')\n", " ax.set_xticklabels(ax.get_xticks() % 24) \n", " ax.legend(loc='upper left')\n", "\n", @@ -362,9 +429,7 @@ "cell_type": "code", "execution_count": null, "id": "43438042", - "metadata": { - "scrolled": false - }, + "metadata": {}, "outputs": [], "source": [ "for ant_to_plot in ants_to_plot:\n", @@ -385,9 +450,7 @@ "cell_type": "code", "execution_count": null, "id": "5c6fa5d0", - "metadata": { - "scrolled": false - }, + "metadata": {}, "outputs": [], "source": [ "for ant_to_plot in ants_to_plot:\n", @@ -413,8 +476,8 @@ " fig, axes = plt.subplots(1, 2, figsize=(14, 10), sharex=True, sharey=True)\n", " extent = [cs.freqs[0]/1e6, cs.freqs[-1]/1e6, lst_grid[-1], lst_grid[0]]\n", " for ax, pol in zip(axes, ['Jee', 'Jnn']):\n", - "\n", - " im = ax.imshow(np.where(cs.flag_grids[cs.refant[pol]], np.nan, cs.chisq_grids[pol]), vmin=1, vmax=5, \n", + " refant = (cs.refant[pol] if isinstance(cs.refant, dict) else cs.refant)\n", + " im = ax.imshow(np.where(cs.flag_grids[refant], np.nan, cs.chisq_grids[pol]), vmin=1, vmax=5, \n", " aspect='auto', cmap='turbo', interpolation='none', extent=extent)\n", " ax.set_yticklabels(ax.get_yticks() % 24)\n", " ax.set_title(f'{pol[1:]}-Polarized $\\\\chi^2$ / DoF')\n", @@ -605,7 +668,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.16" + "version": "3.10.12" }, "toc": { "base_numbering": 1, diff --git a/notebooks/file_calibration.ipynb b/notebooks/file_calibration.ipynb index 89ff723..f7345c6 100644 --- a/notebooks/file_calibration.ipynb +++ b/notebooks/file_calibration.ipynb @@ -7,7 +7,7 @@ "source": [ "# Single File Calibration\n", "\n", - "**by Josh Dillon, Aaron Parsons, Tyler Cox, and Zachary Martinot**, last updated May 27, 2024\n", + "**by Josh Dillon, Aaron Parsons, Tyler Cox, and Zachary Martinot**, last updated July 15, 2024\n", "\n", "This notebook is designed to infer as much information about the array from a single file, including pushing the calibration and RFI mitigation as far as possible. Calibration includes redundant-baseline calibration, RFI-based calibration of delay slopes, model-based calibration of overall amplitudes, and a full per-frequency phase gradient absolute calibration if abscal model files are available.\n", "\n", @@ -17,8 +17,9 @@ "# [• Figure 3: Summary of antenna classifications prior to calibration](#Figure-3:-Summary-of-antenna-classifications-prior-to-calibration)\n", "# [• Figure 4: Redundant calibration of a single baseline group](#Figure-4:-Redundant-calibration-of-a-single-baseline-group)\n", "# [• Figure 5: Absolute calibration of redcal degeneracies](#Figure-5:-Absolute-calibration-of-redcal-degeneracies)\n", - "# [• Figure 6: chi^2 per antenna across the array](#Figure-6:-chi^2-per-antenna-across-the-array)\n", - "# [• Figure 7: Summary of antenna classifications after redundant calibration](#Figure-7:-Summary-of-antenna-classifications-after-redundant-calibration)\n", + "# [• Figure 6: Relative Phase Calibration](#Figure-6:-Relative-Phase-Calibration)\n", + "# [• Figure 7: chi^2 per antenna across the array](#Figure-7:-chi^2-per-antenna-across-the-array)\n", + "# [• Figure 8: Summary of antenna classifications after redundant calibration](#Figure-8:-Summary-of-antenna-classifications-after-redundant-calibration)\n", "# [• Table 1: Complete summary of per antenna classifications](#Table-1:-Complete-summary-of-per-antenna-classifications)\n" ] }, @@ -105,9 +106,10 @@ "SAVE_RESULTS = os.environ.get(\"SAVE_RESULTS\", \"TRUE\").upper() == \"TRUE\"\n", "SAVE_OMNIVIS_FILE = os.environ.get(\"SAVE_OMNIVIS_FILE\", \"FALSE\").upper() == \"TRUE\"\n", "\n", + "\n", "# get infile names\n", "SUM_FILE = os.environ.get(\"SUM_FILE\", None)\n", - "# SUM_FILE = '/lustre/aoc/projects/hera/h6c-analysis/IDR2/2459866/zen.2459866.33010.sum.uvh5' # If sum_file is not defined in the environment variables, define it here.\n", + "# SUM_FILE = '/lustre/aoc/projects/hera/h6c-analysis/IDR2/2459867/zen.2459867.46002.sum.uvh5' # If sum_file is not defined in the environment variables, define it here.\n", "DIFF_FILE = SUM_FILE.replace('sum', 'diff')\n", "\n", "# get outfilenames\n", @@ -163,13 +165,14 @@ "ABSCAL_MODEL_FILES_GLOB = os.environ.get(\"ABSCAL_MODEL_FILES_GLOB\", None)\n", "ABSCAL_MIN_BL_LEN = float(os.environ.get(\"ABSCAL_MIN_BL_LEN\", 1.0))\n", "ABSCAL_MAX_BL_LEN = float(os.environ.get(\"ABSCAL_MAX_BL_LEN\", 140.0))\n", + "CALIBRATE_CROSS_POLS = os.environ.get(\"CALIBRATE_CROSS_POLS\", \"FALSE\").upper() == \"TRUE\"\n", "\n", "# print settings\n", "for setting in ['PLOT', 'SAVE_RESULTS', 'OC_MAX_DIMS', 'OC_MIN_DIM_SIZE', 'OC_SKIP_OUTRIGGERS', \n", " 'OC_MIN_BL_LEN', 'OC_MAX_BL_LEN', 'OC_MAXITER', 'OC_MAX_RERUN', 'OC_RERUN_MAXITER', \n", " 'OC_MAX_CHISQ_FLAGGING_DYNAMIC_RANGE', 'OC_USE_PRIOR_SOL', 'OC_PRIOR_SOL_FLAG_THRESH', \n", " 'OC_USE_GPU', 'RFI_DPSS_HALFWIDTH', 'RFI_NSIG', 'ABSCAL_MODEL_FILES_GLOB', \n", - " 'ABSCAL_MIN_BL_LEN', 'ABSCAL_MAX_BL_LEN']:\n", + " 'ABSCAL_MIN_BL_LEN', 'ABSCAL_MAX_BL_LEN', \"CALIBRATE_CROSS_POLS\"]:\n", " print(f'{setting} = {eval(setting)}')" ] }, @@ -516,7 +519,7 @@ " avg_auto = {(-1, -1, 'ee'): np.mean([data[bl] for bl in auto_bls_to_use], axis=0)}\n", " \n", " # Flag RFI first with channel differences and then with DPSS\n", - " antenna_flags, _ = xrfi.flag_autos(avg_auto, int_count=int_count, nsig=(RFI_NSIG * 5))\n", + " antenna_flags, _ = xrfi.flag_autos(avg_auto, int_count=int_count, nsig=(nsig * 5))\n", " if prior_flags is not None:\n", " antenna_flags[(-1, -1, 'ee')] = prior_flags\n", " _, rfi_flags = xrfi.flag_autos(avg_auto, int_count=int_count, flag_method='dpss_flagger',\n", @@ -1084,8 +1087,8 @@ "outputs": [], "source": [ "# Load simulated and then downsampled model of autocorrelations that includes receiver noise, then interpolate to upsample\n", - "hd_model = io.HERADataFastReader(f'{HNBT_DATA}/SSM_autocorrelations_downsampled.uvh5')\n", - "model, _, _ = hd_model.read(read_flags=False, read_nsamples=False)\n", + "hd_auto_model = io.HERAData(f'{HNBT_DATA}/SSM_autocorrelations_downsampled_sum_pol_convention.uvh5')\n", + "model, _, _ = hd_auto_model.read()\n", "per_pol_interpolated_model = {}\n", "for bl in model:\n", " sorted_lsts, lst_indices = np.unique(model.lsts, return_index=True)\n", @@ -1112,17 +1115,6 @@ " del redcaled_autos, g_abscal" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "848a4aad", - "metadata": {}, - "outputs": [], - "source": [ - "del hd_model, model\n", - "malloc_trim()" - ] - }, { "cell_type": "markdown", "id": "de05f0a8", @@ -1143,10 +1135,10 @@ " abscal_model_files = sorted(glob.glob(ABSCAL_MODEL_FILES_GLOB))\n", "else:\n", " # try to find files on site\n", - " abscal_model_files = sorted(glob.glob('/mnt/sn1/data1/abscal_models/H4C_1/abscal_files_unique_baselines/zen.2458894.?????.uvh5'))\n", + " abscal_model_files = sorted(glob.glob('/mnt/sn1/data1/abscal_models/H6C/zen.2458894.?????.uvh5'))\n", " if len(abscal_model_files) == 0:\n", " # try to find files at NRAO\n", - " abscal_model_files = sorted(glob.glob('/lustre/aoc/projects/hera/zmartino/hera_calib_model/H4C_1/abscal_files_unique_baselines/zen.2458894.?????.uvh5'))\n", + " abscal_model_files = sorted(glob.glob('/lustre/aoc/projects/hera/h6c-analysis/abscal_models/h6c_abscal_files_unique_baselines/zen.2458894.?????.uvh5'))\n", "print(f'Found {len(abscal_model_files)} abscal model files{\" in \" + os.path.dirname(abscal_model_files[0]) if len(abscal_model_files) > 0 else \"\"}.')" ] }, @@ -1217,12 +1209,89 @@ " sol.gains = {antpol : g * delta_gains.get(antpol, 1) for antpol, g in sol.gains.items()}\n", " apply_cal.calibrate_in_place(sol.vis, delta_gains) \n", " \n", - " del hdm, model, model_flags, delta_gains\n", + " del model, model_flags, delta_gains\n", " malloc_trim() \n", " \n", " print(f'Finished absolute calibration of tip-tilt phase slopes in {(time.time() - abscal_start) / 60:.2f} minutes.')" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "0dc18807", + "metadata": {}, + "outputs": [], + "source": [ + "if DO_FULL_ABSCAL and CALIBRATE_CROSS_POLS:\n", + " cross_pol_cal_start = time.time()\n", + "\n", + " # Compute reds for good antennas \n", + " cross_reds = redcal.get_reds(hd.data_antpos, pols=['en', 'ne']) \n", + " cross_reds = redcal.filter_reds(cross_reds, ex_ants=overall_class.bad_ants, pols=['en', 'ne'], antpos=hd.antpos, **fr_settings) \n", + " unflagged_data_bls = [red[0] for red in cross_reds]\n", + "\n", + " # Get cross-polarized model visibilities\n", + " model_bls = copy.deepcopy(hdm.bls)\n", + " model_antpos = hdm.data_antpos\n", + " if len(matched_model_files) > 1: # in this case, it's a dictionary\n", + " model_bls = list(set([bl for bls in list(hdm.bls.values()) for bl in bls]))\n", + " model_antpos = {ant: pos for antpos in hdm.data_antpos.values() for ant, pos in antpos.items()}\n", + "\n", + " data_bls, model_bls, data_to_model_bl_map = abscal.match_baselines(unflagged_data_bls, model_bls, data.antpos, model_antpos=model_antpos, \n", + " pols=['en', 'ne'], data_is_redsol=False, model_is_redundant=True, tol=1.0,\n", + " min_bl_cut=ABSCAL_MIN_BL_LEN, max_bl_cut=ABSCAL_MAX_BL_LEN, verbose=True)\n", + " \n", + " model, model_flags, _ = io.partial_time_io(hdm, np.unique([d2m_time_map[time] for time in data.times]), bls=model_bls)\n", + " model_bls = [data_to_model_bl_map[bl] for bl in data_bls]\n", + "\n", + " # rephase model to match in lsts\n", + " model_blvecs = {bl: model.antpos[bl[0]] - model.antpos[bl[1]] for bl in model.keys()}\n", + " utils.lst_rephase(model, model_blvecs, model.freqs, data.lsts - model.lsts, lat=hdm.telescope_location_lat_lon_alt_degrees[0], inplace=True)\n", + "\n", + "\n", + " wgts_here = {}\n", + " data_here = {}\n", + "\n", + " \n", + " for red in cross_reds:\n", + " data_bl = red[0]\n", + " if data_bl in data_to_model_bl_map:\n", + "\n", + " wgts_here[data_bl] = np.sum([\n", + " np.logical_not(omni_flags[utils.split_bl(bl)[0]] | omni_flags[utils.split_bl(bl)[1]])\n", + " for bl in red\n", + " ], axis=0)\n", + " data_here[data_bl] = np.nanmean([\n", + " np.where(\n", + " omni_flags[utils.split_bl(bl)[0]] | omni_flags[utils.split_bl(bl)[1]],\n", + " np.nan, sol.calibrate_bl(bl, data[bl])\n", + " ) \n", + " for bl in red\n", + " ], axis=0)\n", + " \n", + " # Run cross-polarized phase calibration\n", + " delta_gains = abscal.cross_pol_phase_cal(\n", + " model=model, data=data_here, wgts=wgts_here, data_bls=data_bls, model_bls=model_bls, return_gains=True,\n", + " gain_ants=sol.gains.keys()\n", + " )\n", + " \n", + " # apply gains\n", + " # \\Delta = \\phi_e - \\phi_n, where V_{en}^{cal} = V_{en}^{uncal} * e^{i \\Delta} \n", + " # and V_{ne}^{cal} = V_{ne}^{uncal} * e^{-i \\Delta}\n", + " sol.gains = {antpol: g * delta_gains[antpol] for antpol, g in sol.gains.items()}\n", + " apply_cal.calibrate_in_place(sol.vis, delta_gains)\n", + " del hdm, model, model_flags, delta_gains\n", + " print(f'Finished relative polarized phase calibration in {(time.time() - cross_pol_cal_start) / 60:.2f} minutes.')" + ] + }, + { + "cell_type": "markdown", + "id": "7f4635a5", + "metadata": {}, + "source": [ + "## Plotting" + ] + }, { "cell_type": "code", "execution_count": null, @@ -1282,6 +1351,25 @@ " plt.tight_layout()" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "fd42e27d", + "metadata": {}, + "outputs": [], + "source": [ + "def polarized_gain_phase_plot():\n", + " if CALIBRATE_CROSS_POLS and DO_FULL_ABSCAL:\n", + " plt.figure(figsize=(14, 4), dpi=100)\n", + " for i, time in enumerate(data.times):\n", + " plt.plot(data.freqs / 1e6, np.where(rfi_flags[i], np.nan, delta[i, :]), '.', ms=1.5, label=f'{time:.6f}')\n", + " plt.ylim(-np.pi-0.5, np.pi+0.5)\n", + " plt.xlabel('Frequency (MHz)')\n", + " plt.ylabel('Relative Phase $\\Delta \\ (\\phi_{ee} - \\phi_{nn})$')\n", + " plt.grid()\n", + " plt.legend()" + ] + }, { "cell_type": "markdown", "id": "61aca081", @@ -1320,6 +1408,26 @@ "if PLOT: abscal_degen_plot()" ] }, + { + "cell_type": "markdown", + "id": "3da83c03", + "metadata": {}, + "source": [ + "# *Figure 6: Relative Phase Calibration*\n", + "\n", + "This figure shows the relative phase calibration between the `ee` vs. `nn` polarizations." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "49cb9bfe", + "metadata": {}, + "outputs": [], + "source": [ + "if PLOT: polarized_gain_phase_plot()" + ] + }, { "cell_type": "markdown", "id": "21970839", @@ -1396,7 +1504,7 @@ "id": "4deeb7fa", "metadata": {}, "source": [ - "# *Figure 6: chi^2 per antenna across the array*\n", + "# *Figure 7: chi^2 per antenna across the array*\n", "\n", "This plot shows median (taken over time and frequency) of the normalized chi^2 per antenna. \n", "The expectation value for this quantity when the array is perfectly redundant is 1.0. \n", @@ -1422,7 +1530,7 @@ "id": "241c0a22", "metadata": {}, "source": [ - "# *Figure 7: Summary of antenna classifications after redundant calibration*\n", + "# *Figure 8: Summary of antenna classifications after redundant calibration*\n", "\n", "This figure is the same as [Figure 2](#Figure-2:-Summary-of-antenna-classifications-prior-to-calibration), except that it now includes additional suspect or bad antennas based on redundant calibration.\n", "This can include antennas with high chi^2, but it can also include antennas classified as \"bad\" because they would add extra degeneracies to calibration." @@ -1559,6 +1667,8 @@ " \n", " hd_vissol = io.HERAData(SUM_FILE)\n", " hc_omni = hd_vissol.init_HERACal(gain_convention='divide', cal_style='redundant')\n", + " hc_omni.pol_convention = hd_auto_model.pol_convention\n", + " hc_omni.gain_scale = hd_auto_model.vis_units\n", " hc_omni.update(gains=sol.gains, flags=omni_flags, quals=meta['chisq_per_ant'], total_qual=meta['chisq'])\n", " hc_omni.history += add_to_history\n", " hc_omni.write_calfits(OMNICAL_FILE, clobber=True)\n", @@ -1567,7 +1677,7 @@ " \n", " if SAVE_OMNIVIS_FILE:\n", " # output results, harmonizing keys over polarizations\n", - " all_reds = redcal.get_reds(hd.data_antpos, pols=['ee', 'nn'], pol_mode='2pol')\n", + " all_reds = redcal.get_reds(hd.data_antpos, pols=['ee', 'nn', 'en', 'ne'], pol_mode='4pol')\n", " bl_to_red_map = {bl: red[0] for red in all_reds for bl in red}\n", " hd_vissol.read(bls=[bl_to_red_map[bl] for bl in sol.vis], return_data=False)\n", " hd_vissol.empty_arrays()\n", @@ -1575,6 +1685,8 @@ " hd_vissol.update(data={bl_to_red_map[bl]: sol.vis[bl] for bl in sol.vis}, \n", " flags={bl_to_red_map[bl]: vissol_flags[bl] for bl in vissol_flags}, \n", " nsamples={bl_to_red_map[bl]: vissol_nsamples[bl] for bl in vissol_nsamples})\n", + " hd_vissol.pol_convention = hd_auto_model.pol_convention\n", + " hd_vissol.vis_units = hd_auto_model.vis_units\n", " hd_vissol.write_uvh5(OMNIVIS_FILE, clobber=True)" ] }, @@ -1632,7 +1744,7 @@ "metadata": {}, "outputs": [], "source": [ - "if SAVE_RESULTS and not os.path.exists(OMNIVIS_FILE):\n", + "if SAVE_RESULTS and SAVE_OMNIVIS_FILE and not os.path.exists(OMNIVIS_FILE):\n", " print(f'WARNING: No omnivis file produced at {OMNIVIS_FILE}. Creating an empty visibility solution file.')\n", " hd_writer = io.HERAData(SUM_FILE)\n", " hd_writer.initialize_uvh5_file(OMNIVIS_FILE, clobber=True)" @@ -1687,7 +1799,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.12" + "version": "3.9.6" }, "toc": { "base_numbering": 1, diff --git a/notebooks/file_postprocessing.ipynb b/notebooks/file_postprocessing.ipynb index df6edcb..89e0573 100644 --- a/notebooks/file_postprocessing.ipynb +++ b/notebooks/file_postprocessing.ipynb @@ -7,7 +7,7 @@ "source": [ "# Single File Post-Processing\n", "\n", - "**by Josh Dillon**, last updated April 24, 2024\n", + "**by Josh Dillon**, last updated July 15, 2024\n", "\n", "This notebook, the final step in a pre-LST-binning analysis, applies calibration solutions and flags to both sum and diff data, producing a variety of data products. These currently may include:\n", "\n", @@ -91,11 +91,12 @@ "SAVE_DIFF_RED_AVG = os.environ.get(\"SAVE_DIFF_RED_AVG\", \"TRUE\").upper() == \"TRUE\"\n", "SAVE_ABS_CAL_RED_AVG = os.environ.get(\"SAVE_ABS_CAL_RED_AVG\", \"TRUE\").upper() == \"TRUE\"\n", "SAVE_DLY_FILT_RED_AVG = os.environ.get(\"SAVE_DLY_FILT_RED_AVG\", \"TRUE\").upper() == \"TRUE\"\n", + "SAVE_CROSS_POLS = os.environ.get(\"SAVE_CROSS_POLS\", \"FALSE\").upper() == \"TRUE\"\n", "# TODO: in theory, if some of these are turned off, some memory and/or processing could be saved \n", "# by doing only a subset of what this notebook does. That's left for fuure work.\n", "\n", "add_to_history = 'Produced by file_prostprocessing notebook with the following environment:\\n' + '=' * 65 + '\\n' + os.popen('conda env export').read() + '=' * 65\n", - "for setting in ['SAVE_RESULTS', 'SAVE_DIFF_RED_AVG', 'SAVE_ABS_CAL_RED_AVG', 'SAVE_DLY_FILT_RED_AVG']:\n", + "for setting in ['SAVE_RESULTS', 'SAVE_DIFF_RED_AVG', 'SAVE_ABS_CAL_RED_AVG', 'SAVE_DLY_FILT_RED_AVG', 'SAVE_CROSS_POLS']:\n", " print(f'{setting} = {eval(setting)}')" ] }, @@ -124,6 +125,9 @@ "FILTER_CACHE = os.environ.get(\"FILTER_CACHE\", \"filter_cache\")\n", "filter_cache_dir = os.path.join(os.path.dirname(SUM_FILE), FILTER_CACHE)\n", "\n", + "# Get output reds pickle name for storing the actual reds used in the redundant average\n", + "REDS_PICKLE_SUFFIX = os.environ.get(\"REDS_PICKLE_SUFFIX\", 'reds_used.p')\n", + "\n", "# output abs-calibrated, redundantly-averaged files\n", "SUM_ABS_CAL_RED_AVG_SUFFIX = os.environ.get(\"SUM_ABS_CAL_RED_AVG_SUFFIX\", 'sum.abs_calibrated.red_avg.uvh5')\n", "DIFF_ABS_CAL_RED_AVG_SUFFIX = os.environ.get(\"DIFF_ABS_CAL_RED_AVG_SUFFIX\", 'diff.abs_calibrated.red_avg.uvh5')\n", @@ -145,7 +149,7 @@ "AVG_ABS_AUTO_SUFFIX = os.environ.get(\"AVG_ABS_AUTO_SUFFIX\", 'sum.smooth_calibrated.avg_abs_auto.uvh5')\n", "AVG_ABS_CROSS_SUFFIX = os.environ.get(\"AVG_ABS_CROSS_SUFFIX\", 'sum.smooth_calibrated.avg_abs_cross.uvh5')\n", "\n", - "for suffix in ['DIFF_SUFFIX', 'ABS_CAL_SUFFIX', 'SMOOTH_CAL_SUFFIX', \n", + "for suffix in ['DIFF_SUFFIX', 'ABS_CAL_SUFFIX', 'SMOOTH_CAL_SUFFIX', 'REDS_PICKLE_SUFFIX',\n", " 'SUM_ABS_CAL_RED_AVG_SUFFIX', 'DIFF_ABS_CAL_RED_AVG_SUFFIX',\n", " 'SUM_SMOOTH_CAL_RED_AVG_SUFFIX', 'DIFF_SMOOTH_CAL_RED_AVG_SUFFIX', \n", " 'SUM_ABS_CAL_RED_AVG_DLY_FILT_SUFFIX', 'DIFF_ABS_CAL_RED_AVG_DLY_FILT_SUFFIX', \n", @@ -194,12 +198,14 @@ "metadata": {}, "outputs": [], "source": [ + "pols = ['nn', 'ee', 'ne', 'en'] if SAVE_CROSS_POLS else ['ee', 'nn']\n", + "\n", "# Load sum and diff data\n", "hd = io.HERADataFastReader(SUM_FILE)\n", - "data, flags, nsamples = hd.read(pols=['ee', 'nn'])\n", + "data, flags, nsamples = hd.read(pols=pols)\n", "\n", "hd_diff = io.HERADataFastReader(DIFF_FILE)\n", - "diff_data, diff_flags, diff_nsamples = hd_diff.read(pols=['ee', 'nn'])" + "diff_data, diff_flags, diff_nsamples = hd_diff.read(pols=pols)" ] }, { @@ -210,7 +216,7 @@ "outputs": [], "source": [ "# pick all reds that might be in the data, using the same set across polarizations for easier output\n", - "reds = redcal.get_reds(hd.data_antpos, pols=['ee', 'nn'], include_autos=True)\n", + "reds = redcal.get_reds(hd.data_antpos, pols=pols, include_autos=True)\n", "ex_ants = set(read_a_priori_ant_flags(aposteriori_yaml_file))\n", "possibly_unflagged_bls = [bl for bl in data if utils.split_bl(bl)[0] not in ex_ants and utils.split_bl(bl)[1] not in ex_ants]\n", "possibly_unflagged_antpairs = set([ap for bl in possibly_unflagged_bls for ap in [bl[:2], bl[-2::-1]]])\n", @@ -227,9 +233,13 @@ "# Load calibration solutions and gain flags\n", "hc_smooth = io.HERACal(SMOOTH_CAL_FILE)\n", "smooth_gains, cal_flags, _, _ = hc_smooth.read()\n", + "pol_convention = hc_smooth.pol_convention\n", + "gain_scale = hc_smooth.gain_scale\n", "\n", "hc_abs = io.HERACal(ABS_CAL_FILE)\n", - "abs_gains, abs_cal_flags, _, _ = hc_abs.read()" + "abs_gains, abs_cal_flags, _, _ = hc_abs.read()\n", + "assert pol_convention == hc_abs.pol_convention, f'{pol_convention} != {hc_abs.pol_convention}'\n", + "assert gain_scale == hc_abs.gain_scale, f'{gain_scale} != {hc_abs.gain_scale}'" ] }, { @@ -304,6 +314,22 @@ "red_avg_abs_cal_diff_data, _, _ = red_average(reds, diff_data, nsamples, abs_gains, flags=flags, cal_flags=cal_flags)" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "e7bc8cf5-7527-4b5e-8463-cfc3294c723a", + "metadata": {}, + "outputs": [], + "source": [ + "# Save the actual reds used for this redundant average\n", + "flagged_bls = [bl for bl in flags if np.all(flags[bl])]\n", + "flagged_ants = [ant for ant in cal_flags if np.all(cal_flags[ant])]\n", + "reds_used_here = redcal.filter_reds(reds, ex_bls=flagged_bls, ex_ants=flagged_ants)\n", + "if SAVE_RESULTS:\n", + " with open(REDS_PICKLE_FILE, 'wb') as reds_pickle_file:\n", + " pickle.dump(reds_used_here, reds_pickle_file)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -322,13 +348,13 @@ "metadata": {}, "outputs": [], "source": [ - "def plot_red_avg_vis():\n", + "def plot_red_avg_vis(pols=['ee', 'nn']):\n", " if ALL_FLAGGED:\n", " print('All integrations are flagged. Nothing to plot.')\n", " return\n", " \n", " fig, axes = plt.subplots(2, 2, figsize=(14, 6), dpi=100, sharex='col', sharey='row', gridspec_kw={'hspace': 0, 'wspace': 0})\n", - " for i, pol in enumerate(['ee', 'nn']):\n", + " for i, pol in enumerate(pols):\n", "\n", " reds_here = redcal.filter_reds(reds, pols=[pol], antpos=hd.antpos, min_bl_cut=1)\n", " sol = redcal.RedSol(reds_here, gains=smooth_gains)\n", @@ -406,6 +432,16 @@ "plot_red_avg_vis()" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "6103f2c2", + "metadata": {}, + "outputs": [], + "source": [ + "plot_red_avg_vis(['en', 'ne'])" + ] + }, { "cell_type": "markdown", "id": "e5c3f26e", @@ -505,7 +541,7 @@ "dly_filt_red_avg_abs_cal_sum_data = copy.deepcopy(red_avg_abs_cal_sum_data)\n", "dly_filt_red_avg_abs_cal_diff_data = copy.deepcopy(red_avg_abs_cal_diff_data)\n", "\n", - "auto_bls = [bl for bl in dly_filt_red_avg_smooth_cal_sum_data if bl[0] == bl[1]]\n", + "auto_bls = [bl for bl in dly_filt_red_avg_smooth_cal_sum_data if utils.split_bl(bl)[0] == utils.split_bl(bl)[1]]\n", "gap_flag_warned = set()\n", "\n", "new_cache_keys = []\n", @@ -518,8 +554,9 @@ " \n", " for bl in sorted(red_avg_flags.keys(), key=lambda bl: np.sum(red_avg_flags[bl]), reverse=True):\n", " # inverse variance weight using smooth_calibrated autocorrealtions (which are proprotional to std of noise)\n", - " auto_bl = [abl for abl in auto_bls if abl[2] == bl[2]][0]\n", - " wgts = np.where(red_avg_flags[bl], 0, red_avg_smooth_cal_sum_data[auto_bl]**-2)\n", + " auto_bl1 = [abl for abl in auto_bls if abl[2] == utils.join_pol(utils.split_pol(bl[2])[0], utils.split_pol(bl[2])[0])][0]\n", + " auto_bl2 = [abl for abl in auto_bls if abl[2] == utils.join_pol(utils.split_pol(bl[2])[1], utils.split_pol(bl[2])[1])][0]\n", + " wgts = np.where(red_avg_flags[bl], 0, np.abs(red_avg_smooth_cal_sum_data[auto_bl1] * red_avg_smooth_cal_sum_data[auto_bl2])**-1)\n", " wgts /= np.nanmean(np.where(red_avg_flags[bl], np.nan, wgts)) # avoid dynamic range issues\n", " wgts[~np.isfinite(wgts)] = 0 \n", "\n", @@ -681,6 +718,8 @@ " hd_out.empty_arrays()\n", " hd_out.history += add_to_history\n", " hd_out.update(flags=red_avg_flags, nsamples=red_avg_nsamples)\n", + " hd_out.pol_convention = pol_convention\n", + " hd_out.vis_units = gain_scale\n", " if ALL_FLAGGED:\n", " # put back in all the flags that we had been ignoring up to this point\n", " hd_out.flag_array = np.ones_like(hd_out.flag_array)\n", @@ -742,6 +781,8 @@ " hd_out.read(bls=set([bl[0:2] for bl in avg_abs_data]), polarizations=['ee', 'nn'])\n", " hd_out.empty_arrays()\n", " hd_out.update(data=avg_abs_data, flags=avg_flags, nsamples=avg_nsamples)\n", + " hd_out.pol_convention = pol_convention\n", + " hd_out.vis_units = gain_scale\n", " if ALL_FLAGGED:\n", " # put back in all the flags that we had been ignoring up to this point\n", " hd_out.flag_array = np.ones_like(hd_out.flag_array) \n", diff --git a/notebooks/full_day_systematics_inspect.ipynb b/notebooks/full_day_systematics_inspect.ipynb index be70fb8..5779e67 100644 --- a/notebooks/full_day_systematics_inspect.ipynb +++ b/notebooks/full_day_systematics_inspect.ipynb @@ -7,7 +7,7 @@ "source": [ "# Full Day Systematics Inspection\n", "\n", - "__by Tyler Cox and Josh Dillon__, last updated May 10, 2024\n", + "__by Tyler Cox and Josh Dillon__, last updated July 20, 2024\n", "\n", "This notebooks is designed to inspect the full day delay spectra for a small number of redundantly-averaged baselines, by inpainting in frequency and time, and performing delay filtering, cross-talk filtering, and main-beam fringe rate filtering. It is based on [a similar notebook written by Josh Dillon for H1C analysis](https://github.com/HERA-Team/hera_notebook_templates/blob/master/notebooks/h1c_systematics_mitigation_inspect.ipynb).\n", "\n", @@ -39,7 +39,7 @@ "from copy import deepcopy\n", "from pyuvdata import uvbeam, uvdata\n", "from hera_filters import dspec\n", - "from hera_cal import io, frf, vis_clean, redcal\n", + "from hera_cal import io, frf, vis_clean, redcal, utils\n", "from hera_cal.datacontainer import DataContainer, RedDataContainer\n", "from IPython.display import display, HTML\n", "\n", @@ -114,11 +114,12 @@ " hd = io.HERADataFastReader(red_avg_files[(len(red_avg_files) // 2) + i])\n", " bls_to_plot = []\n", "\n", - " # Find 1, 2, and 4 units EW baselines\n", + " # Find 1, 2, and 4 units EW auto-pol baselines \n", " for bl in hd.bls:\n", - " bl_vec = (hd.antpos[bl[1]] - hd.antpos[bl[0]])\n", - " if (np.abs(bl_vec[1]) < 1) and int(np.round(bl_vec[0] / 14.6)) in [1, 2, 4]:\n", - " bls_to_plot.append(bl)\n", + " if utils.split_pol(bl[2])[0] == utils.split_pol(bl[2])[1]:\n", + " bl_vec = (hd.antpos[bl[1]] - hd.antpos[bl[0]])\n", + " if (np.abs(bl_vec[1]) < 1) and int(np.round(bl_vec[0] / 14.6)) in [1, 2, 4]:\n", + " bls_to_plot.append(bl)\n", "\n", " # Get intersector baseline\n", " _, _, nsamples = hd.read()\n", @@ -510,14 +511,6 @@ " return {'vmin': vmin, 'vmax': vmax}" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "60c4fab5", - "metadata": {}, - "outputs": [], - "source": [] - }, { "cell_type": "code", "execution_count": null, diff --git a/notebooks/single_baseline_postprocessing_and_pspec.ipynb b/notebooks/single_baseline_postprocessing_and_pspec.ipynb index 14aa80f..8d416ee 100644 --- a/notebooks/single_baseline_postprocessing_and_pspec.ipynb +++ b/notebooks/single_baseline_postprocessing_and_pspec.ipynb @@ -7,7 +7,7 @@ "source": [ "# Single Baseline Filtering and Power Spectrum Estimation\n", "\n", - "**by Josh Dillon**, last updated February 28, 2024\n", + "**by Josh Dillon**, last updated July 17, 2024\n", "\n", "This notebook is designed to take a single redundantly-averaged unique baseline (typically after LST-binning) and push it through all the way to the power spectrum. It operates on single files that contain a single baseline for all LSTs and both `'ee'` and `'nn'` polarizations. It then can:\n", "* Throw out highly flagged times and/or channels\n", @@ -163,7 +163,7 @@ "outputs": [], "source": [ "# Example set of settings\n", - "# SINGLE_BL_FILE = '/lustre/aoc/projects/hera/h6c-analysis/IDR2/lstbin-outputs/redavg-smoothcal/inpaint/single_baseline_files/zen.LST.baseline.0_4.sum.uvh5' \n", + "# SINGLE_BL_FILE = '/lustre/aoc/projects/hera/h6c-analysis/IDR2/lstbin-outputs/redavg-smoothcal-inpaint/inpaint/single_baseline_files/zen.LST.baseline.0_4.sum.uvh5' \n", "# OUT_PSPEC_FILE = '/lustre/aoc/projects/hera/jsdillon/H6C/PSPEC/pspec_out/zen.LST.baseline.0_4.sum.pspec.h5'\n", "# ALREADY_INPAINTED = True\n", "# PERFORM_INPAINT = False\n", @@ -861,6 +861,8 @@ "outputs": [], "source": [ "def form_pseudostokes(data=None, flags=None, nsamples=None):\n", + " # TODO: fix this to use pol_convention from data properly\n", + " # TODO: deprecate this in favor of a version in hera_pspec \n", " '''This function creates pseudo-Stokes I and Q versions corresponding to the ee and nn entries\n", " in the input data containers, and stores them in the same data containers.'''\n", " # data are averaged (or diffed in the case of pQ)\n", @@ -1203,7 +1205,6 @@ " \n", " # Main beam FRF and forming pseduostokes\n", " frf_d = main_beam_FR_filter(xtalk_filt_d, w, tslice=None)\n", - " form_pseudostokes(frf_d, f, n)\n", " deint_frf_data.append(frf_d)\n", " else:\n", " deint_xtalk_filt_data.append(d)\n", @@ -1394,7 +1395,7 @@ " avg_hd_copy.select(bls=[auto_aps][0])\n", " avg_hd_copy.ant_1_array = np.full_like(avg_hd_copy.ant_1_array, ant)\n", " avg_hd_copy.ant_2_array = np.full_like(avg_hd_copy.ant_2_array, ant)\n", - " avg_hd_copy.baseline_array = uvutils.antnums_to_baseline(avg_hd_copy.ant_1_array, avg_hd_copy.ant_2_array, avg_hd_copy.Nants_telescope)\n", + " avg_hd_copy.baseline_array = uvutils.antnums_to_baseline(avg_hd_copy.ant_1_array, avg_hd_copy.ant_2_array, Nants_telescope=avg_hd_copy.Nants_telescope)\n", " avg_hd.fast_concat(avg_hd_copy, 'blt', inplace=True)\n", " \n", " hds.append(avg_hd)"