Skip to content

Commit

Permalink
fix blvec bug
Browse files Browse the repository at this point in the history
  • Loading branch information
tyler-a-cox committed Jun 6, 2024
1 parent 5404169 commit 5f77d49
Showing 1 changed file with 40 additions and 22 deletions.
62 changes: 40 additions & 22 deletions hera_notebook_templates/notebooks/lststack.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -976,6 +976,7 @@
"metadata": {},
"outputs": [],
"source": [
"%%time\n",
"if do_lstcal:\n",
" for i, (stack, lstavg_model) in enumerate(zip(cross_stacks, cross_lstavg)):\n",
" calibration_parameters, gains = lstbin_calibration(\n",
Expand Down Expand Up @@ -1058,7 +1059,8 @@
" return_models=True, \n",
" cache={}, \n",
" filter_properties=None, \n",
" eigenval_cutoff=[1e-12]\n",
" eigenval_cutoff=[1e-12],\n",
" round_filter_half_width=False\n",
"):\n",
" \"\"\"\n",
" \"\"\" \n",
Expand Down Expand Up @@ -1086,16 +1088,22 @@
" inpainted_mean = np.zeros(stack.Nfreqs, dtype=stack.data.dtype)\n",
" total_nsamples = np.zeros(stack.Nfreqs, dtype=float)\n",
" \n",
" for iap, antpair in enumerate(stack.antpairs):\n",
" for iap, antpair in enumerate(stack.antpairs): \n",
" # Get the baseline vector and length\n",
" bl_vec = (antpos[antpair[1]] - antpos[antpair[0]])[:2]\n",
" bl_vec = (antpos[antpair[1]] - antpos[antpair[0]])[:]\n",
" bl_len = np.linalg.norm(bl_vec) / constants.c\n",
" filter_centers, filter_half_widths = vis_clean.gen_filter_properties(\n",
" ax='freq',\n",
" bl_len=max(bl_len, 7.0 / constants.c),\n",
" **filter_properties,\n",
" )\n",
" \n",
" # Round up filter half width to the nearest nanosecond\n",
" # allows the cache to be hit more frequently\n",
" if round_filter_half_width:\n",
" filter_half_widths = [np.ceil(filter_half_widths[0] * 1e9) * 1e-9]\n",
" \n",
" \n",
" for polidx, pol in enumerate(stack.pols):\n",
" # Get easy access to the data, flags, and nsamples for this baseline-pol pair\n",
" stackd = stack.data[:, iap, :, polidx] \n",
Expand Down Expand Up @@ -1136,7 +1144,7 @@
" flagged_stretches = true_stretches(convolved_flags)\n",
" longest_gap = np.max([ts.stop - ts.start for ts in flagged_stretches]) if len(flagged_stretches) > 0 else 0\n",
" \n",
" # Also flag if there is only 1 unflagged day\n",
" # Flag if combined gap is too large\n",
" if longest_gap > max_allowed_gap_size:\n",
" avg_flgs[band] = True \n",
" continue\n",
Expand All @@ -1149,24 +1157,30 @@
" cache=cache,\n",
" eigenval_cutoff=eigenval_cutoff\n",
" )[0].real\n",
"\n",
" # compute fits for dpss basis functions\n",
" hash_key = dspec._fourier_filter_hash(\n",
" filter_centers=filter_centers, \n",
" filter_half_widths=filter_half_widths, \n",
" x=stack.freq_array[band], \n",
" w=base_noise_var\n",
" )\n",
" \n",
" # If key exists in cache, load in filter and invese\n",
" if hash_key in cache:\n",
" CNinv_1sample_dpss, CNinv_1sample_dpss_inv = cache[hash_key]\n",
" else:\n",
" CNinv_1sample_dpss = np.array([\n",
" (basis.T * wgts).dot(basis) for wgts in mask[:, band] / base_noise_var[:, band]]\n",
" # Do the caching on a per night basis - find better way to do this\n",
" CNinv_1sample_dpss = []\n",
" CNinv_1sample_dpss_inv = []\n",
" for night_index, nights in enumerate(stack.nights):\n",
" # compute fits for dpss basis functions\n",
" hash_key = dspec._fourier_filter_hash(\n",
" filter_centers=filter_centers, \n",
" filter_half_widths=filter_half_widths, \n",
" x=stack.freq_array[band], \n",
" w=(base_noise_var[night_index, band] * mask[night_index, band])\n",
" )\n",
" CNinv_1sample_dpss_inv = np.linalg.pinv(CNinv_1sample_dpss)\n",
" cache[hash_key] = (CNinv_1sample_dpss, CNinv_1sample_dpss_inv)\n",
"\n",
" # If key exists in cache, load in filter and inverse\n",
" if hash_key in cache:\n",
" _CNinv_1sample_dpss, _CNinv_1sample_dpss_inv = cache[hash_key]\n",
" CNinv_1sample_dpss.append(_CNinv_1sample_dpss)\n",
" CNinv_1sample_dpss_inv.append(_CNinv_1sample_dpss_inv)\n",
" else:\n",
" _CNinv_1sample_dpss = np.dot(basis.T * mask[night_index, band] / base_noise_var[night_index, band], basis)\n",
" _CNinv_1sample_dpss_inv = np.linalg.pinv(_CNinv_1sample_dpss)\n",
" CNinv_1sample_dpss.append(_CNinv_1sample_dpss)\n",
" CNinv_1sample_dpss_inv.append(_CNinv_1sample_dpss_inv)\n",
" cache[hash_key] = (_CNinv_1sample_dpss, _CNinv_1sample_dpss_inv)\n",
" \n",
" # Compute data covariance \n",
" CNinv_dpss = CNinv_1sample_dpss * nsamples_by_night[:, np.newaxis]\n",
Expand Down Expand Up @@ -1201,7 +1215,8 @@
" ], axis=0)\n",
" \n",
" \n",
" effective_nights = np.sum(np.mean(mask[:, band], axis=1))\n",
" #effective_nights = np.sum(np.mean(mask[:, band], axis=1))\n",
" effective_nights = np.sum(is_unflagged_night)\n",
" \n",
" # TODO: Sample covariance is overestimated and has a fudge factor, this is probably close to correct\n",
" # but not exactly right. We should revisit this.\n",
Expand Down Expand Up @@ -1579,6 +1594,7 @@
},
"outputs": [],
"source": [
"%%time\n",
"if do_simultaneous_inpainting:\n",
" inpaint_dpss_models = []\n",
"\n",
Expand All @@ -1596,6 +1612,7 @@
" \"standoff\": inpaint_standoff, \n",
" },\n",
" eigenval_cutoff=[inpaint_eigencutoff], \n",
" round_filter_half_width=True,\n",
" )\n",
" inpaint_dpss_models.append(dpss_models)\n",
" cross_lstavg[i]['data'] = _avg['data']\n",
Expand Down Expand Up @@ -1658,6 +1675,7 @@
"execution_count": null,
"id": "e5e3fdbc-12d3-4364-bb47-74b8151b7d74",
"metadata": {
"scrolled": false,
"tags": []
},
"outputs": [],
Expand Down Expand Up @@ -1697,7 +1715,7 @@
" )\n",
" ax[kk].plot(fq, np.abs(cross_lstavg[0]['data'][apidx, band, polidx][subband]), lw=3, ls='--', color='red', label='simul. inpaint')\n",
"\n",
" ax[kk].plot(fq, np.abs(inpaint_dpss_models[0][blpol][band][subband]), lw=2, color='purple', ls=':', label='model')\n",
" #ax[kk].plot(fq, np.abs(inpaint_dpss_models[0][blpol][band][subband]), lw=2, color='purple', ls=':', label='model')\n",
"\n",
" gotlabel = False\n",
" for i, jd in enumerate(cross_stacks[0].nights):\n",
Expand Down

0 comments on commit 5f77d49

Please sign in to comment.