Add TFR and SN mock generation to BORG scripts

This commit is contained in:
Deaglan Bartlett 2025-02-10 21:30:27 +01:00
parent 071beeae80
commit 8d74403002
8 changed files with 1659 additions and 310 deletions

426
borg_velocity/borg_mock.py Normal file
View file

@ -0,0 +1,426 @@
import aquila_borg as borg
import numpy as np
from astropy.coordinates import SkyCoord
import astropy.units as apu
import jax.numpy as jnp
import jax
import configparser
import ast
import borg_velocity.poisson_process as poisson_process
import borg_velocity.projection as projection
import borg_velocity.utils as utils
from borg_velocity.utils import myprint
def tfr_create_mock(Nt, L, xmin, cpar, dens, vel, Rmax, alpha, mthresh,
a_TFR, b_TFR, sigma_TFR, sigma_m, sigma_eta,
hyper_eta_mu, hyper_eta_sigma, sigma_v, interp_order=1, bias_epsilon=1e-7):
"""
Create mock TFR catalogue from a density and velocity field
Args:
- Nt (int): Number of tracers to produce
- L (float): Box length (Mpc/h)
- xmin (float): Coordinate of corner of the box (Mpc/h)
- cpar (borg.cosmo.CosmologicalParameters): Cosmological parameters to use
- dens (np.ndarray): Over-density field (shape = (N, N, N))
- vel (np.ndarray): Velocity field (km/s) (shape = (3, N, N, N))
- Rmax (float): Maximum allowed comoving radius of a tracer (Mpc/h)
- alpha (float): Exponent for bias model
- mthresh (float): Threshold absolute magnitude in selection
- a_TFR (float): TFR relation intercept
- b_TFR (float): TFR relation slope
- sigma_TFR (float): Intrinsic scatter in the TFR
- sigma_m (float): Uncertainty on the apparent magnitude measurements
- sigma_eta (float): Uncertainty on the linewidth measurements
- hyper_eta_mu (float): Mean of the Gaussian hyper-prior for the true eta values
- hyper_eta_sigma (float): Std deviation of the Gaussian hyper-prior for the true eta values
- sigma_v (float): Uncertainty on the velocity field (km/s)
- interp_order (int, default=1): Order of interpolation from grid points to the line of sight
- bias_epsilon (float, default=1e-7): Small number to add to 1 + delta to prevent 0^#
Returns:
- all_RA (np.ndarrary): Right Ascension (degrees) of the tracers (shape = (Nt,))
- all_Dec (np.ndarrary): Dec (np.ndarray): Delination (degrees) of the tracers (shape = (Nt,))
- czCMB (np.ndarrary): Observed redshifts (km/s) of the tracers (shape = (Nt,))
- all_mtrue (np.ndarrary): True apparent magnitudes of the tracers (shape = (Nt,))
- all_etatrue (np.ndarrary): True linewidths of the tracers (shape = (Nt,))
- all_mobs (np.ndarrary): Observed apparent magnitudes of the tracers (shape = (Nt,))
- all_etaobs (np.ndarrary): Observed linewidths of the tracers (shape = (Nt,))
- all_xtrue (np.ndarrary): True comoving coordinates of the tracers (Mpc/h) (shape = (3, Nt))
"""
# Initialize lists to store valid positions and corresponding sig_mu values
all_xtrue = np.empty((3, Nt))
all_mtrue = np.empty(Nt)
all_etatrue = np.empty(Nt)
all_mobs = np.empty(Nt)
all_etaobs = np.empty(Nt)
all_RA = np.empty(Nt)
all_Dec = np.empty(Nt)
# Counter for accepted positions
accepted_count = 0
# Bias model
phi = (1. + dens + bias_epsilon) ** alpha
# Only use centre of box
x = np.linspace(xmin, xmin + L, dens.shape[0]+1)
i0 = np.argmin(np.abs(x + Rmax))
i1 = np.argmin(np.abs(x - Rmax))
L_small = x[i1] - x[i0]
xmin_small = x[i0]
phi_small = phi[i0:i1, i0:i1, i0:i1]
# Loop until we have Nt valid positions
while accepted_count < Nt:
# Generate positions (comoving)
xtrue = poisson_process.sample_3d(phi_small, Nt, L_small, (xmin_small, xmin_small, xmin_small))
# Convert to RA, Dec, Distance (comoving)
rtrue = np.sqrt(np.sum(xtrue** 2, axis=0)) # Mpc/h
c = SkyCoord(x=xtrue[0], y=xtrue[1], z=xtrue[2], representation_type='cartesian')
RA = c.spherical.lon.degree
Dec = c.spherical.lat.degree
r_hat = np.array(SkyCoord(ra=RA*apu.deg, dec=Dec*apu.deg).cartesian.xyz)
# Compute cosmological redshift
zcosmo = utils.z_cos(rtrue, cpar.omega_m)
# Compute luminosity distance
# DO I NEED TO DO /h???
dL = (1 + zcosmo) * rtrue / cpar.h # Mpc
# Compute true distance modulus
mutrue = 5 * np.log10(dL) + 25
# Sample true linewidth (eta) from its prior
etatrue = hyper_eta_mu + hyper_eta_sigma * np.random.randn(Nt)
# Obtain muTFR from mutrue using the intrinsic scatter
muTFR = mutrue + sigma_TFR * np.random.randn(Nt)
# Obtain apparent magnitude from the TFR
mtrue = muTFR + (a_TFR + b_TFR * etatrue)
# Scatter true observed apparent magnitudes and linewidths
mobs = mtrue + sigma_m * np.random.randn(Nt)
etaobs = etatrue + sigma_eta * np.random.randn(Nt)
# Apply apparement magnitude cut
m = mobs <= mthresh
mtrue = mtrue[m]
etatrue = etatrue[m]
mobs = mobs[m]
etaobs = etaobs[m]
xtrue = xtrue[:,m]
RA = RA[m]
Dec = Dec[m]
# Calculate how many valid positions we need to reach Nt
remaining_needed = Nt - accepted_count
selected_count = min(xtrue.shape[1], remaining_needed)
# Append only the needed number of valid positions
imin = accepted_count
imax = accepted_count + selected_count
all_xtrue[:,imin:imax] = xtrue[:,:selected_count]
all_mtrue[imin:imax] = mtrue[:selected_count]
all_etatrue[imin:imax] = etatrue[:selected_count]
all_mobs[imin:imax] = mobs[:selected_count]
all_etaobs[imin:imax] = etaobs[:selected_count]
all_RA[imin:imax] = RA[:selected_count]
all_Dec[imin:imax] = Dec[:selected_count]
# Update the accepted count
accepted_count += selected_count
myprint(f'\tMade {accepted_count} of {Nt}')
# Get the radial component of the peculiar velocity at the positions of the objects
myprint('Obtaining peculiar velocities')
tracer_vel = projection.interp_field(
vel,
np.expand_dims(all_xtrue, axis=2),
L,
np.array([xmin, xmin, xmin]),
interp_order
) # km/s
myprint('Radial projection')
vr_true = np.squeeze(projection.project_radial(
tracer_vel,
np.expand_dims(all_xtrue, axis=2),
np.zeros(3,)
)) # km/s
# Recompute cosmological redshift
rtrue = jnp.sqrt(jnp.sum(all_xtrue ** 2, axis=0))
zcosmo = utils.z_cos(rtrue, cpar.omega_m)
# Obtain total redshift
vr_noised = vr_true + sigma_v * np.random.randn(Nt)
czCMB = ((1 + zcosmo) * (1 + vr_noised / utils.speed_of_light) - 1) * utils.speed_of_light
return all_RA, all_Dec, czCMB, all_mtrue, all_etatrue, all_mobs, all_etaobs, all_xtrue
def sn_create_mock(Nt, L, xmin, cpar, dens, vel, Rmax, alpha,
a_tripp, b_tripp, M_SN, sigma_SN, sigma_m, sigma_stretch, sigma_c,
hyper_stretch_mu, hyper_stretch_sigma, hyper_c_mu, hyper_c_sigma,
sigma_v, interp_order=1, bias_epsilon=1e-7):
"""
Create mock TFR catalogue from a density and velocity field
Args:
- Nt (int): Number of tracers to produce
- L (float): Box length (Mpc/h)
- xmin (float): Coordinate of corner of the box (Mpc/h)
- cpar (borg.cosmo.CosmologicalParameters): Cosmological parameters to use
- dens (np.ndarray): Over-density field (shape = (N, N, N))
- vel (np.ndarray): Velocity field (km/s) (shape = (3, N, N, N))
- Rmax (float): Maximum allowed comoving radius of a tracer (Mpc/h)
- alpha (float): Exponent for bias model
- a_tripp (float): Coefficient of stretch in the Tripp relation
- b_tripp (float): Coefficient of colour in the Tripp relation
- M_SN (float): Absolute magnitude of supernovae
- sigma_SN (float): Intrinsic scatter in the Tripp relation
- sigma_m (float): Uncertainty on the apparent magnitude measurements
- sigma_stretch (float): Uncertainty on the stretch measurements
- sigma_c (float): Uncertainty on the colour measurements
- hyper_stretch_mu (float): Mean of Gaussian hyper prior for the true stretch values
- hyper_stretch_sigma (float): Std of Gaussian hyper prior for the true stretch values
- hyper_c_mu (float): Mean of hyper Gaussian prior for the true colour values
- hyper_c_sigma (float): Std of Gaussian hyper prior for the true colour values
- sigma_v (float): Uncertainty on the velocity field (km/s)
- interp_order (int, default=1): Order of interpolation from grid points to the line of sight
- bias_epsilon (float, default=1e-7): Small number to add to 1 + delta to prevent 0^#
Returns:
- all_RA (np.ndarrary): Right Ascension (degrees) of the tracers (shape = (Nt,))
- all_Dec (np.ndarrary): Dec (np.ndarray): Delination (degrees) of the tracers (shape = (Nt,))
- czCMB (np.ndarrary): Observed redshifts (km/s) of the tracers (shape = (Nt,))
- all_mtrue (np.ndarrary): True apparent magnitudes of the tracers (shape = (Nt,))
- all_mobs (np.ndarrary): Observed apparent magnitudes of the tracers (shape = (Nt,))
- all_xtrue (np.ndarrary): True comoving coordinates of the tracers (Mpc/h) (shape = (3, Nt))
"""
# Initialize lists to store valid positions and corresponding sig_mu values
all_xtrue = np.empty((3, Nt))
all_mtrue = np.empty(Nt)
all_stretchtrue = np.empty(Nt)
all_ctrue = np.empty(Nt)
all_mobs = np.empty(Nt)
all_stretchobs = np.empty(Nt)
all_cobs = np.empty(Nt)
all_RA = np.empty(Nt)
all_Dec = np.empty(Nt)
# Counter for accepted positions
accepted_count = 0
# Bias model
phi = (1. + dens + bias_epsilon) ** alpha
# Only use centre of box
x = np.linspace(xmin, xmin + L, dens.shape[0]+1)
i0 = np.argmin(np.abs(x + Rmax))
i1 = np.argmin(np.abs(x - Rmax))
L_small = x[i1] - x[i0]
xmin_small = x[i0]
phi_small = phi[i0:i1, i0:i1, i0:i1]
# Loop until we have Nt valid positions
while accepted_count < Nt:
# Generate positions (comoving)
xtrue = poisson_process.sample_3d(phi_small, Nt, L_small, (xmin_small, xmin_small, xmin_small))
# Convert to RA, Dec, Distance (comoving)
rtrue = np.sqrt(np.sum(xtrue** 2, axis=0)) # Mpc/h
c = SkyCoord(x=xtrue[0], y=xtrue[1], z=xtrue[2], representation_type='cartesian')
RA = c.spherical.lon.degree
Dec = c.spherical.lat.degree
r_hat = np.array(SkyCoord(ra=RA*apu.deg, dec=Dec*apu.deg).cartesian.xyz)
# Compute cosmological redshift
zcosmo = utils.z_cos(rtrue, cpar.omega_m)
# Compute luminosity distance
# DO I NEED TO DO /h???
dL = (1 + zcosmo) * rtrue / cpar.h # Mpc
# Compute true distance modulus
mutrue = 5 * np.log10(dL) + 25
# Sample true stretch and colour (c) from its prior
stretchtrue = hyper_stretch_mu + hyper_stretch_sigma * np.random.randn(Nt)
ctrue = hyper_c_mu + hyper_c_sigma * np.random.randn(Nt)
# Obtain muSN from mutrue using the intrinsic scatter
muSN = mutrue + sigma_SN * np.random.randn(Nt)
# Obtain apparent magnitude from the TFR
mtrue = muSN - (a_tripp * stretchtrue - b_tripp * ctrue) + M_SN
# Scatter true observed apparent magnitudes and linewidths
mobs = mtrue + sigma_m * np.random.randn(Nt)
stretchobs = stretchtrue + sigma_stretch * np.random.randn(Nt)
cobs = ctrue + sigma_c * np.random.randn(Nt)
# Calculate how many valid positions we need to reach Nt
remaining_needed = Nt - accepted_count
selected_count = min(xtrue.shape[1], remaining_needed)
# Append only the needed number of valid positions
imin = accepted_count
imax = accepted_count + selected_count
all_xtrue[:,imin:imax] = xtrue[:,:selected_count]
all_mtrue[imin:imax] = mtrue[:selected_count]
all_stretchtrue[imin:imax] = stretchtrue[:selected_count]
all_ctrue[imin:imax] = ctrue[:selected_count]
all_mobs[imin:imax] = mobs[:selected_count]
all_stretchobs[imin:imax] = stretchobs[:selected_count]
all_cobs[imin:imax] = cobs[:selected_count]
all_RA[imin:imax] = RA[:selected_count]
all_Dec[imin:imax] = Dec[:selected_count]
# Update the accepted count
accepted_count += selected_count
myprint(f'\tMade {accepted_count} of {Nt}')
# Get the radial component of the peculiar velocity at the positions of the objects
myprint('Obtaining peculiar velocities')
tracer_vel = projection.interp_field(
vel,
np.expand_dims(all_xtrue, axis=2),
L,
np.array([xmin, xmin, xmin]),
interp_order
) # km/s
myprint('Radial projection')
vr_true = np.squeeze(projection.project_radial(
tracer_vel,
np.expand_dims(all_xtrue, axis=2),
np.zeros(3,)
)) # km/s
# Recompute cosmological redshift
rtrue = jnp.sqrt(jnp.sum(all_xtrue ** 2, axis=0))
zcosmo = utils.z_cos(rtrue, cpar.omega_m)
# Obtain total redshift
vr_noised = vr_true + sigma_v * np.random.randn(Nt)
czCMB = ((1 + zcosmo) * (1 + vr_noised / utils.speed_of_light) - 1) * utils.speed_of_light
return all_RA, all_Dec, czCMB, all_mtrue, all_stretchtrue, all_ctrue, all_mobs, all_stretchobs, all_cobs, all_xtrue
def borg_mock(s_hat, state, fwd_model, fwd_vel, ini_file, seed=None):
"""
WRITE DOCSTRING
"""
myprint('Making mock from BORG')
config = configparser.ConfigParser()
config.read(ini_file)
# Ensure cosmology is correct
cosmo = utils.get_cosmopar(ini_file)
fwd_model.setCosmoParams(cosmo)
# Run BORG density field
dens = np.zeros(fwd_model.getOutputBoxModel().N)
fwd_model.forwardModel_v2(s_hat)
fwd_model.getDensityFinal(dens)
state["BORG_final_density"][:] = dens
# Get velocity field
cosmo = utils.get_cosmopar(ini_file)
vel = fwd_vel.getVelocityField()
# Obtain a bulk velocity
vbulk = np.array(ast.literal_eval(config['model']['bulk_flow'])).reshape((3, 1, 1, 1))
if seed is not None:
np.random.seed(seed)
L = float(config['system']['L0'])
xmin = float(config['system']['corner0'])
nsamp = int(config['run']['nsamp'])
Rmax = float(config['mock']['R_max'])
sigma_v = float(config['model']['sig_v'])
interp_order = int(config['model']['interp_order'])
bias_epsilon = float(config['model']['bias_epsilon'])
tracer_type = [None] * nsamp
RA = [None] * nsamp
Dec = [None] * nsamp
czCMB = [None] * nsamp
m_true = [None] * nsamp
m_obs = [None] * nsamp
eta_true = [None] * nsamp
eta_obs = [None] * nsamp
stretch_true = [None] * nsamp
stretch_obs = [None] * nsamp
c_true = [None] * nsamp
c_obs = [None] * nsamp
xtrue = [None] * nsamp
for i in range(nsamp):
myprint(f'Making mock for sample {i}')
Nt = int(config[f'sample_{i}']['Nt'])
alpha = float(config[f'sample_{i}']['alpha'])
tracer_type[i] = config[f'sample_{i}']['tracer_type']
myprint(f'Tracer type: {tracer_type[i]}')
if tracer_type[i] == 'tfr':
mthresh = float(config[f'sample_{i}']['mthresh'])
a_TFR = float(config[f'sample_{i}']['a_tfr'])
b_TFR = float(config[f'sample_{i}']['b_tfr'])
sigma_TFR = float(config[f'sample_{i}']['sigma_tfr'])
sigma_m = float(config[f'sample_{i}']['sigma_m'])
sigma_eta = float(config[f'sample_{i}']['sigma_eta'])
hyper_eta_mu = float(config[f'sample_{i}']['hyper_eta_mu'])
hyper_eta_sigma = float(config[f'sample_{i}']['hyper_eta_sigma'])
mthresh = float(config[f'sample_{i}']['mthresh'])
RA[i], Dec[i], czCMB[i], m_true[i], eta_true[i], m_obs[i], eta_obs[i], xtrue[i] = tfr_create_mock(
Nt, L, xmin, cosmo, dens, vel + vbulk,
Rmax, alpha, mthresh,
a_TFR, b_TFR, sigma_TFR, sigma_m, sigma_eta,
hyper_eta_mu, hyper_eta_sigma, sigma_v,
interp_order=interp_order, bias_epsilon=bias_epsilon)
elif tracer_type[i] == 'sn':
a_tripp = float(config[f'sample_{i}']['a_tripp'])
b_tripp = float(config[f'sample_{i}']['b_tripp'])
M_SN = float(config[f'sample_{i}']['m_sn'])
sigma_SN = float(config[f'sample_{i}']['sigma_sn'])
sigma_m = float(config[f'sample_{i}']['sigma_m'])
sigma_stretch = float(config[f'sample_{i}']['sigma_stretch'])
sigma_c = float(config[f'sample_{i}']['sigma_c'])
hyper_stretch_mu = float(config[f'sample_{i}']['hyper_stretch_mu'])
hyper_stretch_sigma = float(config[f'sample_{i}']['hyper_stretch_sigma'])
hyper_c_mu = float(config[f'sample_{i}']['hyper_c_mu'])
hyper_c_sigma = float(config[f'sample_{i}']['hyper_c_sigma'])
RA[i], Dec[i], czCMB[i], m_true[i], stretch_true[i], c_true[i], m_obs[i], stretch_obs[i], c_obs[i], xtrue[i] = sn_create_mock(
Nt, L, xmin, cosmo, dens, vel + vbulk, Rmax, alpha,
a_tripp, b_tripp, M_SN, sigma_SN, sigma_m, sigma_stretch, sigma_c,
hyper_stretch_mu, hyper_stretch_sigma, hyper_c_mu, hyper_c_sigma,
sigma_v, interp_order=1, bias_epsilon=1e-7)
else:
raise NotImplementedError
return tracer_type, RA, Dec, czCMB, m_true, m_obs, eta_true, eta_obs, stretch_true, stretch_obs, c_true, c_obs, xtrue

View file

@ -1,27 +1,19 @@
import numpy as np
import aquila_borg as borg
import jax
import numpy as np
import jax.numpy as jnp
import configparser
import warnings
import aquila_borg as borg
#import symbolic_pofk.linear
import jax
from jax import lax
import jaxlib
from functools import partial
import ast
import numbers
import h5py
import warnings
import ast
import re
import os
from functools import partial
import borg_velocity.utils as utils
from borg_velocity.utils import myprint, compute_As, get_sigma_bulk
from borg_velocity.utils import myprint, generateMBData, compute_As, get_sigma_bulk
from borg_velocity.borg_mock import borg_mock
import borg_velocity.forwards as forwards
import borg_velocity.mock_maker as mock_maker
import borg_velocity.projection as projection
from borg_velocity.samplers import HMCBiasSampler, derive_prior, MVSliceBiasSampler, BlackJaxBiasSampler, TransformedBlackJaxBiasSampler
import borg_velocity.samplers as samplers
class VelocityBORGLikelihood(borg.likelihood.BaseLikelihood):
"""
@ -39,50 +31,16 @@ class VelocityBORGLikelihood(borg.likelihood.BaseLikelihood):
ADD FW_VEL HERE
"""
# Load configuration file
self.ini_file = ini_file
myprint("Reading from configuration file: " + ini_file)
config = configparser.ConfigParser()
config.read(ini_file)
# Grid parameters
# Initialise grid parameters
self.N = [int(config['system'][f'N{i}']) for i in range(3)]
self.L = [float(config['system'][f'L{i}']) for i in range(3)]
# Catalogues
self.nsamp = int(config['run']['nsamp'])
assert self.nsamp > 0, "Need at least one sample to run"
self.Nt = [int(config[f'sample_{i}']['Nt']) for i in range(self.nsamp)]
self.alpha = [float(config[f'sample_{i}']['alpha']) for i in range(self.nsamp)]
self.lam = [float(config[f'sample_{i}']['lam']) for i in range(self.nsamp)]
self.muA = [float(config[f'sample_{i}']['muA']) for i in range(self.nsamp)]
self.frac_sig_rhMpc = [float(config[f'sample_{i}']['frac_sig_rhMpc']) for i in range(self.nsamp)]
# What type of run we're doing
self.run_type = config['run']['run_type']
# Seed if creating mocks
self.mock_seed = int(config['mock']['seed'])
self.R_max = float(config['mock']['r_max'])
# Model parameters
self.ai = float(config['model']['ai'])
self.af = float(config['model']['af'])
self.sig_v = float(config['model']['sig_v'])
if config['model']['R_lim'] == 'none':
self.R_lim = self.L[0]/2
else:
self.R_lim = float(config['model']['R_lim'])
self.Nint_points = int(config['model']['nint_points'])
self.Nsig = float(config['model']['Nsig'])
self.sig_v = float(config['model']['sig_v'])
self.smooth_R = float(config['model']['smooth_R'])
self.bias_epsilon = float(config['model']['bias_epsilon'])
self.interp_order = int(config['model']['interp_order'])
# Load bulk flow
self.bulk_flow = np.array(ast.literal_eval(config['model']['bulk_flow']))
# For log-likelihood values
self.bignum = float(config['mcmc']['bignum'])
@ -94,16 +52,6 @@ class VelocityBORGLikelihood(borg.likelihood.BaseLikelihood):
self.fwd_param = param_model
self.fwd_vel = fwd_vel
# Initialise model parameters
self.model_params = {
**{f'mua{i}':self.muA[i] for i in range(self.nsamp)},
**{f'alpha{i}':self.alpha[i] for i in range(self.nsamp)},
**{f'lam{i}':self.lam[i] for i in range(self.nsamp)},
**{f'bulk_flow_{d}':self.bulk_flow[i] for i, d in enumerate(['x', 'y', 'z'])},
'sig_v':self.sig_v
}
self.fwd_param.setModelParams(self.model_params)
# Initialise cosmological parameters
cpar = utils.get_cosmopar(self.ini_file)
self.fwd.setCosmoParams(cpar)
@ -111,6 +59,10 @@ class VelocityBORGLikelihood(borg.likelihood.BaseLikelihood):
self.updateCosmology(cpar)
myprint(f"Original cosmological parameters: {self.fwd.getCosmoParams()}")
# Initialise model parameters
self.sig_v = float(config['model']['sig_v'])
self.bulk_flow = np.array(ast.literal_eval(config['model']['bulk_flow']))
# Initialise derivative
self.grad_like = jax.grad(self.dens2like, argnums=(0,1))
@ -120,6 +72,25 @@ class VelocityBORGLikelihood(borg.likelihood.BaseLikelihood):
else:
self.action = None
# What type of run we're doing (mock/velmass/data)
self.run_type = config['run']['run_type']
# Seed if creating mocks
self.mock_seed = int(config['mock']['seed'])
# Number of samples to distance tracers
self.nsamp = int(config['run']['nsamp'])
# Initialise integration variables
if config['model']['R_lim'] == 'none':
self.R_lim = self.L[0]/2
else:
self.R_lim = float(config['model']['R_lim'])
self.Nint_points = int(config['model']['nint_points'])
self.Nsig = float(config['model']['Nsig'])
self.MB_pos = [None] * self.nsamp
self.frac_sigma_r = [float(config[f'sample_{i}']['frac_sig_rhMpc']) for i in range(self.nsamp)]
def initializeLikelihood(self, state: borg.likelihood.MarkovState) -> None:
"""
@ -130,21 +101,10 @@ class VelocityBORGLikelihood(borg.likelihood.BaseLikelihood):
"""
myprint("Init likelihood")
# for i in range(self.nsamp):
# state.newArray1d(f"tracer_vr_{i}", self.Nt[i], True)
# if self.run_type != 'data':
# state.newArray1d(f"data_x_{i}", self.Nt[i], True)
# state.newArray1d(f"data_y_{i}", self.Nt[i], True)
# state.newArray1d(f"data_z_{i}", self.Nt[i], True)
# state.newArray1d(f"true_x_{i}", self.Nt[i], True)
# state.newArray1d(f"true_y_{i}", self.Nt[i], True)
# state.newArray1d(f"true_z_{i}", self.Nt[i], True)
# self.data = [state[f"tracer_vr_{i}"] for i in range(self.nsamp)]
state.newArray3d("BORG_final_density", *self.fwd.getOutputBoxModel().N, True)
if self.run_type == 'data':
self.loadObservedData(make_plot=False)
self.loadObservedData()
elif borg.EMBEDDED:
if self.action == 'INIT':
pass # Data will be loaded later
@ -181,8 +141,6 @@ class VelocityBORGLikelihood(borg.likelihood.BaseLikelihood):
# Convert sigma8 to As
cpar = compute_As(cpar)
# cpar.A_s = 1.e-9 * symbolic_pofk.linear.sigma8_to_As(
# cpar.sigma8, cpar.omega_m, cpar.omega_b, cpar.h, cpar.n_s)
myprint(f"Updating cosmology Om = {cosmo.omega_m}, sig8 = {cosmo.sigma8}, As = {cosmo.A_s}")
cpar.omega_q = 1. - cpar.omega_m - cpar.omega_k
@ -192,43 +150,6 @@ class VelocityBORGLikelihood(borg.likelihood.BaseLikelihood):
self.sigma_bulk = get_sigma_bulk(self.L[0], self.fwd.getCosmoParams())
def generateMBData(self) -> None:
"""
Generate points along the line of sight of each tracer to enable marginalisation
over distance uncertainty. The central distance is given such that the observed
redshift equals the cosmological redshift at this distance. The range is then
+/- Nsig * sig, where
sig^2 = (sig_v/100)^2 + sig_r^2
and sig_v is the velocity uncertainty in km/s
"""
self.MB_pos = [None] * self.nsamp
self.r_integration = [None] * self.nsamp
self.RA = [None] * self.nsamp
self.DEC = [None] * self.nsamp
for i in range(self.nsamp):
myprint(f"Making MB data for sample {i}")
# Get angular coordinates of all points
r_hat = projection.get_radial_vectors(self.coord_meas[i])
# Get min and max distance to integrate over
robs = self.cz_obs[i] / 100
sigr = np.sqrt((self.sig_v / 100) ** 2 + (self.frac_sig_rhMpc[i] * robs)**2)
rmin = robs - self.Nsig * sigr
rmin = rmin.at[rmin <= 0].set(self.L[0] / self.N[0] / 100.)
rmax = robs + self.Nsig * sigr
rmax = rmax.at[rmax > self.R_lim].set(self.R_lim)
# Compute coordinates of integration points
self.r_integration[i] = np.linspace(rmin, rmax, self.Nint_points)
cartesian_pos_MB = np.expand_dims(self.r_integration[i], axis=2) * r_hat
self.r_integration[i] = self.r_integration[i].T
self.MB_pos[i] = cartesian_pos_MB
self.MB_pos[i] = jnp.transpose(self.MB_pos[i], (2, 1, 0))
def generateMockData(self, s_hat: np.ndarray, state: borg.likelihood.MarkovState,) -> None:
"""
Generates mock data by simulating the forward model with the given white noise,
@ -245,63 +166,39 @@ class VelocityBORGLikelihood(borg.likelihood.BaseLikelihood):
if self.run_type == 'data':
raise NotImplementedError
elif self.run_type == 'velmass':
self.coord_true, self.coord_meas, self.sig_mu, self.vr_true, self.cz_obs = \
mock_maker.velmass_mock(self.ini_file, seed=self.mock_seed)
raise NotImplementedError
elif self.run_type == 'mock':
self.coord_true, self.coord_meas, self.sig_mu, self.vr_true, self.cz_obs = \
mock_maker.borg_mock(s_hat, state, self.fwd, self.fwd_vel, self.ini_file, seed=self.mock_seed)
self.tracer_type, self.RA, self.Dec, self.cz_obs, self.m_true, self.m_obs, self.eta_true, self.eta_obs, self.stretch_true, self.stretch_obs, self.c_true, self.c_obs, self.xtrue = borg_mock(s_hat, state, self.fwd, self.fwd_vel, self.ini_file, seed=self.mock_seed)
else:
raise NotImplementedError
# Save mock to file
with h5py.File(f'tracer_data_{self.run_type}.h5', 'w') as h5file:
for i in range(self.nsamp):
sample_group = h5file.create_group(f'sample_{i}')
sample_group.create_dataset('coord_true', data=self.coord_true[i])
sample_group.create_dataset('coord_meas', data=self.coord_meas[i])
sample_group.create_dataset('sig_mu', data=self.sig_mu[i])
sample_group.create_dataset('vr_true', data=self.vr_true[i])
sample_group.create_dataset('cz_obs', data=self.cz_obs[i])
for i in range(self.nsamp):
self.MB_pos[i] = utils.generateMBData(
self.RA[i], self.Dec[i], self.cz_obs[i], self.L[0], self.N[0],
self.R_lim, self.Nsig, self.Nint_points, self.sig_v, self.frac_sigma_r[i])
self.r_hMpc = [np.sqrt(np.sum(self.coord_meas[i] ** 2, axis=0)) for i in range(self.nsamp)]
# Save mock to file
# with h5py.File(f'tracer_data_{self.run_type}.h5', 'w') as h5file:
# for i in range(self.nsamp):
# sample_group = h5file.create_group(f'sample_{i}')
# sample_group.create_dataset('coord_true', data=self.coord_true[i])
# Check that the measured coordinates all lie within the box
for i in range(self.nsamp):
if np.amax(self.r_hMpc[i]) > self.R_lim:
raise ValueError('All tracers must have measured coordinates within R_lim')
# for i in range(self.nsamp):
# if np.amax(self.r_hMpc[i]) > self.R_lim:
# raise ValueError('All tracers must have measured coordinates within R_lim')
self.generateMBData()
myprint('From mock')
self.saved_s_hat = s_hat.copy()
self.logLikelihoodComplex(s_hat, False)
quit()
def loadMockData(self, state: borg.likelihood.MarkovState) -> None:
myprint('Loading previously made mock data')
self.coord_true = [None] * self.nsamp
self.coord_meas = [None] * self.nsamp
self.sig_mu = [None] * self.nsamp
self.vr_true = [None] * self.nsamp
self.cz_obs = [None] * self.nsamp
self.r_hMpc = [None] * self.nsamp
with h5py.File(f'tracer_data_{self.run_type}.h5', 'r') as f:
for i in range(self.nsamp):
self.coord_true[i] = jnp.array(f[f'sample_{i}/coord_true'][:])
self.coord_meas[i] = jnp.array(f[f'sample_{i}/coord_meas'][:])
self.sig_mu[i] = f[f'sample_{i}/sig_mu'][()]
self.vr_true[i] = jnp.array(f[f'sample_{i}/vr_true'][:])
self.cz_obs[i] = jnp.array(f[f'sample_{i}/cz_obs'][:])
self.r_hMpc[i] = np.sqrt(np.sum(self.coord_meas[i] ** 2, axis=0))
# Check that the measured coordinates all lie within the box
for i in range(self.nsamp):
if np.amax(self.r_hMpc[i]) > self.R_lim:
raise ValueError('All tracers must have measured coordinates within R_lim')
self.generateMBData()
raise NotImplementedError
def dens2like(self, output_density: np.ndarray, output_velocity: np.ndarray):
"""
@ -326,35 +223,7 @@ class VelocityBORGLikelihood(borg.likelihood.BaseLikelihood):
omega_m = self.fwd.getCosmoParams().omega_m
# self.lkl_ind = [None] * self.nsamp
# global temp_lkl_ind
for i in range(self.nsamp):
muA = self.model_params[f'mua{i}']
alpha = self.model_params[f'alpha{i}']
lam = self.model_params[f'lam{i}']
lkl += vel2like(
self.cz_obs[i],
v,
output_density,
self.MB_pos[i],
self.r_integration[i],
self.r_hMpc[i],
self.sig_mu[i],
sig_v,
omega_m,
muA,
alpha,
lam,
self.L[0],
jnp.array(self.fwd.getOutputBoxModel().xmin),
self.interp_order,
self.bias_epsilon,
self.R_max
)
# self.lkl_ind[i] = temp_lkl_ind.copy()
raise NotImplementedError
# Add in bulk flow prior
lkl += jnp.sum(0.5 * jnp.log(2 * np.pi) + jnp.log(self.sigma_bulk / jnp.sqrt(3)) + self.bulk_flow ** 2 / 2. / (self.sigma_bulk / jnp.sqrt(3)) ** 2)
@ -393,11 +262,6 @@ class VelocityBORGLikelihood(borg.likelihood.BaseLikelihood):
self.model_params[k] = self.fwd_param.getModelParam('nullforward', k)
self.updateCosmology(self.fwd.getCosmoParams()) # for sigma8 -> As
# fname = f'{os.getcwd()}/s_hat.npy'
# myprint(f'Saving s_hat field to {fname}')
# np.save(fname, s_hat)
# myprint('Done')
if not skip_density:
myprint('Getting density field now')
@ -407,7 +271,7 @@ class VelocityBORGLikelihood(borg.likelihood.BaseLikelihood):
self.fwd.forwardModel_v2(s_hat)
self.fwd.getDensityFinal(output_density)
myprint('Getting velocity field now')
myprint('Getting velocity field')
# Get velocity field
output_velocity = self.fwd_vel.getVelocityField()
@ -477,123 +341,6 @@ class VelocityBORGLikelihood(borg.likelihood.BaseLikelihood):
state["BORG_final_density"][:] = self.delta
# @partial(jax.jit, static_argnames=['L_BOX', 'interp_order', 'bias_epsilon'])
def vel2like(cz_obs, v, MB_field, MB_pos, r, r_hMpc, sig_mu, sig_v, omega_m, muA, alpha, lam,
L_BOX, X_MIN, interp_order, bias_epsilon, R_max):
"""
Jitted part of dens2like
"""
tracer_vel = projection.interp_field(v,
MB_pos,
L_BOX,
X_MIN,
interp_order,
use_jitted=True,
)
tracer_vr = projection.project_radial(
tracer_vel,
MB_pos,
jnp.zeros(3,)
)
# Remove the velocity term
# tracer_vr = jnp.zeros(tracer_vr.shape)
# Convert velocities to redshifts
zco = utils.z_cos(r, omega_m)
cz_pred = utils.speed_of_light * zco + (1 + zco) * tracer_vr
delta_cz_sigv = (cz_pred - jnp.expand_dims(cz_obs, axis=1)) / sig_v
# Get p_r
delta_mu = 5. * jnp.log10(r / jnp.expand_dims(r_hMpc,axis=1) * muA)
# Get los biased density
los_density = projection.interp_field(
MB_field,
MB_pos,
L_BOX,
X_MIN,
interp_order,
use_jitted=True,
)
los_density = jax.nn.relu(1. + los_density)
los_density = jnp.power(los_density + bias_epsilon, alpha)
# Remove bias term
# los_density = jnp.ones(los_density.shape)
d2 = (delta_mu / sig_mu) ** 2
best = jnp.amin(jnp.abs(d2), axis=1)
# Multiply p_r by arbitrary number for numerical stability (cancels in p_r / p_r_norm)
d2 = d2 - jnp.expand_dims(jnp.nanmin(d2, axis=1), axis=1)
p_r = r ** 2 * jnp.exp(-0.5 * d2) * los_density * jnp.exp(- lam * r / R_max)
p_r_norm = jnp.expand_dims(jnp.trapezoid(p_r, r, axis=1), axis=1)
# Integrate to get likelihood
d2 = delta_cz_sigv**2
scale = jnp.nanmin(d2, axis=1)
d2 = d2 - jnp.expand_dims(scale, axis=1)
exp_delta_cz = jnp.exp(-0.5 * d2)
p_cz = jnp.trapezoid(exp_delta_cz * p_r / p_r_norm, r, axis=1)
lkl_ind = jnp.log(p_cz) - scale / 2 - 0.5 * jnp.log(2 * np.pi * sig_v**2)
lkl = - lkl_ind.sum()
# global temp_lkl_ind
# temp_lkl_ind = lkl_ind
return lkl
@derive_prior
def transform_mua(x):
a, b = model_params_prior['mua']
return samplers.transform_uniform(x, a, b)
def inv_transform_mua(alpha):
a, b = model_params_prior['mua']
return samplers.inv_transform_uniform(alpha, a, b)
@derive_prior
def transform_alpha(x):
a, b = model_params_prior['alpha']
return samplers.transform_uniform(x, a, b)
def inv_transform_alpha(alpha):
a, b = model_params_prior['alpha']
return samplers.inv_transform_uniform(alpha, a, b)
@derive_prior
def transform_lam(x):
a, b = model_params_prior['lam']
return samplers.transform_uniform(x, a, b)
def inv_transform_lam(lam):
a, b = model_params_prior['lam']
return samplers.inv_transform_uniform(lam, a, b)
@derive_prior
def transform_sig_v(x):
a, b = model_params_prior['sig_v']
return samplers.transform_uniform(x, a, b)
def inv_transform_sig_v(alpha):
a, b = model_params_prior['sig_v']
return samplers.inv_transform_uniform(alpha, a, b)
@derive_prior
def transform_bulk_flow(x):
a, b = model_params_prior['bulk_flow']
return samplers.transform_uniform(x, a, b)
def inv_transform_bulk_flow(alpha):
a, b = model_params_prior['bulk_flow']
return samplers.inv_transform_uniform(alpha, a, b)
@borg.registerGravityBuilder
def build_gravity_model(state: borg.likelihood.MarkovState, box: borg.forward.BoxModel, ini_file=None) -> borg.forward.BaseForwardModel:
"""
@ -725,7 +472,6 @@ def check_model_sampling(loop):
def check_cosmo_sampling(loop):
return loop.getStepID() > begin_cosmo
@borg.registerSamplerBuilder
def build_sampler(
state: borg.likelihood.MarkovState,
@ -947,3 +693,16 @@ def build_likelihood(state: borg.likelihood.MarkovState, info: borg.likelihood.L
likelihood = VelocityBORGLikelihood(chain, fwd_param, fwd_vel, borg.getIniConfigurationFilename())
return likelihood
"""
To Do:
- Add in the true colours etc. to the model parameter sampling
- Call to the correct log-likelihoods
- Sample the variables from priors
- Check global variables are used
- Create minimal ini file
- Docstrings
- Don't want all these samplers - only gradient ones
- Save mock to file
"""

View file

@ -0,0 +1,949 @@
import numpy as np
import aquila_borg as borg
import jax.numpy as jnp
import configparser
import warnings
import aquila_borg as borg
#import symbolic_pofk.linear
import jax
from jax import lax
import jaxlib
from functools import partial
import ast
import numbers
import h5py
import re
import os
import borg_velocity.utils as utils
from borg_velocity.utils import myprint, compute_As, get_sigma_bulk
import borg_velocity.forwards as forwards
import borg_velocity.mock_maker as mock_maker
import borg_velocity.projection as projection
from borg_velocity.samplers import HMCBiasSampler, derive_prior, MVSliceBiasSampler, BlackJaxBiasSampler, TransformedBlackJaxBiasSampler
import borg_velocity.samplers as samplers
class VelocityBORGLikelihood(borg.likelihood.BaseLikelihood):
"""
HADES likelihood for distance-tracers
"""
def __init__(self, fwd: borg.forward.BaseForwardModel, param_model: forwards.NullForward, fwd_vel: borg.forward.BaseForwardModel, ini_file: str) -> None:
"""
Initialises the VelocityBORGLikelihood class
Args:
- fwd (borg.forward.BaseForwardModel): The forward model to be used in the likelihood.
- param_model (forwards.NullForward): An empty forward model for storing model parameters.
- ini_file (str): The name of the ini file containing the model and borg parameters.
ADD FW_VEL HERE
"""
self.ini_file = ini_file
myprint("Reading from configuration file: " + ini_file)
config = configparser.ConfigParser()
config.read(ini_file)
# Grid parameters
self.N = [int(config['system'][f'N{i}']) for i in range(3)]
self.L = [float(config['system'][f'L{i}']) for i in range(3)]
# Catalogues
self.nsamp = int(config['run']['nsamp'])
assert self.nsamp > 0, "Need at least one sample to run"
self.Nt = [int(config[f'sample_{i}']['Nt']) for i in range(self.nsamp)]
self.alpha = [float(config[f'sample_{i}']['alpha']) for i in range(self.nsamp)]
self.lam = [float(config[f'sample_{i}']['lam']) for i in range(self.nsamp)]
self.muA = [float(config[f'sample_{i}']['muA']) for i in range(self.nsamp)]
self.frac_sig_rhMpc = [float(config[f'sample_{i}']['frac_sig_rhMpc']) for i in range(self.nsamp)]
# What type of run we're doing
self.run_type = config['run']['run_type']
# Seed if creating mocks
self.mock_seed = int(config['mock']['seed'])
self.R_max = float(config['mock']['r_max'])
# Model parameters
self.ai = float(config['model']['ai'])
self.af = float(config['model']['af'])
self.sig_v = float(config['model']['sig_v'])
if config['model']['R_lim'] == 'none':
self.R_lim = self.L[0]/2
else:
self.R_lim = float(config['model']['R_lim'])
self.Nint_points = int(config['model']['nint_points'])
self.Nsig = float(config['model']['Nsig'])
self.sig_v = float(config['model']['sig_v'])
self.smooth_R = float(config['model']['smooth_R'])
self.bias_epsilon = float(config['model']['bias_epsilon'])
self.interp_order = int(config['model']['interp_order'])
# Load bulk flow
self.bulk_flow = np.array(ast.literal_eval(config['model']['bulk_flow']))
# For log-likelihood values
self.bignum = float(config['mcmc']['bignum'])
myprint(f" Init {self.N}, {self.L}")
super().__init__(fwd, self.N, self.L)
# Define the forward models
self.fwd = fwd
self.fwd_param = param_model
self.fwd_vel = fwd_vel
# Initialise model parameters
self.model_params = {
**{f'mua{i}':self.muA[i] for i in range(self.nsamp)},
**{f'alpha{i}':self.alpha[i] for i in range(self.nsamp)},
**{f'lam{i}':self.lam[i] for i in range(self.nsamp)},
**{f'bulk_flow_{d}':self.bulk_flow[i] for i, d in enumerate(['x', 'y', 'z'])},
'sig_v':self.sig_v
}
self.fwd_param.setModelParams(self.model_params)
# Initialise cosmological parameters
cpar = utils.get_cosmopar(self.ini_file)
self.fwd.setCosmoParams(cpar)
self.fwd_param.setCosmoParams(cpar)
self.updateCosmology(cpar)
myprint(f"Original cosmological parameters: {self.fwd.getCosmoParams()}")
# Initialise derivative
self.grad_like = jax.grad(self.dens2like, argnums=(0,1))
# Find out what kind of run this is
if borg.EMBEDDED:
self.action = utils.get_action()
else:
self.action = None
def initializeLikelihood(self, state: borg.likelihood.MarkovState) -> None:
"""
Initialise the likelihood internal variables and MarkovState variables.
Args:
- state (borg.likelihood.MarkovState): The Markov state object to be used in the likelihood.
"""
myprint("Init likelihood")
# for i in range(self.nsamp):
# state.newArray1d(f"tracer_vr_{i}", self.Nt[i], True)
# if self.run_type != 'data':
# state.newArray1d(f"data_x_{i}", self.Nt[i], True)
# state.newArray1d(f"data_y_{i}", self.Nt[i], True)
# state.newArray1d(f"data_z_{i}", self.Nt[i], True)
# state.newArray1d(f"true_x_{i}", self.Nt[i], True)
# state.newArray1d(f"true_y_{i}", self.Nt[i], True)
# state.newArray1d(f"true_z_{i}", self.Nt[i], True)
# self.data = [state[f"tracer_vr_{i}"] for i in range(self.nsamp)]
state.newArray3d("BORG_final_density", *self.fwd.getOutputBoxModel().N, True)
if self.run_type == 'data':
self.loadObservedData(make_plot=False)
elif borg.EMBEDDED:
if self.action == 'INIT':
pass # Data will be loaded later
elif self.action == 'RESUME':
self.loadMockData(state) # load data from mock_data.h5
else:
myprint(f'Unknown action: {self.action}')
raise NotImplementedError
def updateMetaParameters(self, state: borg.likelihood.MarkovState) -> None:
"""
Update the meta parameters of the sampler (not sampled) from the MarkovState.
Args:
- state (borg.likelihood.MarkovState): The state object to be used in the likelihood.
"""
cpar = state['cosmology']
cpar.omega_q = 1. - cpar.omega_m - cpar.omega_k
self.fwd.setCosmoParams(cpar)
self.fwd_param.setCosmoParams(cpar)
def updateCosmology(self, cosmo: borg.cosmo.CosmologicalParameters) -> None:
"""
Updates the forward model's cosmological parameters with the given values.
Args:
- cosmo (borg.cosmo.CosmologicalParameters): The cosmological parameters.
"""
cpar = cosmo
# Convert sigma8 to As
cpar = compute_As(cpar)
# cpar.A_s = 1.e-9 * symbolic_pofk.linear.sigma8_to_As(
# cpar.sigma8, cpar.omega_m, cpar.omega_b, cpar.h, cpar.n_s)
myprint(f"Updating cosmology Om = {cosmo.omega_m}, sig8 = {cosmo.sigma8}, As = {cosmo.A_s}")
cpar.omega_q = 1. - cpar.omega_m - cpar.omega_k
self.fwd.setCosmoParams(cpar)
self.fwd_param.setCosmoParams(cpar)
self.sigma_bulk = get_sigma_bulk(self.L[0], self.fwd.getCosmoParams())
def generateMBData(self) -> None:
"""
Generate points along the line of sight of each tracer to enable marginalisation
over distance uncertainty. The central distance is given such that the observed
redshift equals the cosmological redshift at this distance. The range is then
+/- Nsig * sig, where
sig^2 = (sig_v/100)^2 + sig_r^2
and sig_v is the velocity uncertainty in km/s
"""
self.MB_pos = [None] * self.nsamp
self.r_integration = [None] * self.nsamp
self.RA = [None] * self.nsamp
self.DEC = [None] * self.nsamp
for i in range(self.nsamp):
myprint(f"Making MB data for sample {i}")
# Get angular coordinates of all points
r_hat = projection.get_radial_vectors(self.coord_meas[i])
# Get min and max distance to integrate over
robs = self.cz_obs[i] / 100
sigr = np.sqrt((self.sig_v / 100) ** 2 + (self.frac_sig_rhMpc[i] * robs)**2)
rmin = robs - self.Nsig * sigr
rmin = rmin.at[rmin <= 0].set(self.L[0] / self.N[0] / 100.)
rmax = robs + self.Nsig * sigr
rmax = rmax.at[rmax > self.R_lim].set(self.R_lim)
# Compute coordinates of integration points
self.r_integration[i] = np.linspace(rmin, rmax, self.Nint_points)
cartesian_pos_MB = np.expand_dims(self.r_integration[i], axis=2) * r_hat
self.r_integration[i] = self.r_integration[i].T
self.MB_pos[i] = cartesian_pos_MB
self.MB_pos[i] = jnp.transpose(self.MB_pos[i], (2, 1, 0))
def generateMockData(self, s_hat: np.ndarray, state: borg.likelihood.MarkovState,) -> None:
"""
Generates mock data by simulating the forward model with the given white noise,
drawing distance tracers from the density field, computing their distance
moduli and radial velocities, and adding Gaussian noise to the appropriate
variables. Also calculates the initial negative log-likelihood of the data.
Args:
- s_hat (np.ndarray): The input (initial) density field.
- state (borg.likelihood.MarkovState): The Markov state object to be used in the likelihood.
- make_plot (bool, default=True): Whether to make diagnostic plots for the mock data generation
"""
if self.run_type == 'data':
raise NotImplementedError
elif self.run_type == 'velmass':
self.coord_true, self.coord_meas, self.sig_mu, self.vr_true, self.cz_obs = \
mock_maker.velmass_mock(self.ini_file, seed=self.mock_seed)
elif self.run_type == 'mock':
self.coord_true, self.coord_meas, self.sig_mu, self.vr_true, self.cz_obs = \
mock_maker.borg_mock(s_hat, state, self.fwd, self.fwd_vel, self.ini_file, seed=self.mock_seed)
else:
raise NotImplementedError
# Save mock to file
with h5py.File(f'tracer_data_{self.run_type}.h5', 'w') as h5file:
for i in range(self.nsamp):
sample_group = h5file.create_group(f'sample_{i}')
sample_group.create_dataset('coord_true', data=self.coord_true[i])
sample_group.create_dataset('coord_meas', data=self.coord_meas[i])
sample_group.create_dataset('sig_mu', data=self.sig_mu[i])
sample_group.create_dataset('vr_true', data=self.vr_true[i])
sample_group.create_dataset('cz_obs', data=self.cz_obs[i])
self.r_hMpc = [np.sqrt(np.sum(self.coord_meas[i] ** 2, axis=0)) for i in range(self.nsamp)]
# Check that the measured coordinates all lie within the box
for i in range(self.nsamp):
if np.amax(self.r_hMpc[i]) > self.R_lim:
raise ValueError('All tracers must have measured coordinates within R_lim')
self.generateMBData()
myprint('From mock')
self.saved_s_hat = s_hat.copy()
self.logLikelihoodComplex(s_hat, False)
def loadMockData(self, state: borg.likelihood.MarkovState) -> None:
myprint('Loading previously made mock data')
self.coord_true = [None] * self.nsamp
self.coord_meas = [None] * self.nsamp
self.sig_mu = [None] * self.nsamp
self.vr_true = [None] * self.nsamp
self.cz_obs = [None] * self.nsamp
self.r_hMpc = [None] * self.nsamp
with h5py.File(f'tracer_data_{self.run_type}.h5', 'r') as f:
for i in range(self.nsamp):
self.coord_true[i] = jnp.array(f[f'sample_{i}/coord_true'][:])
self.coord_meas[i] = jnp.array(f[f'sample_{i}/coord_meas'][:])
self.sig_mu[i] = f[f'sample_{i}/sig_mu'][()]
self.vr_true[i] = jnp.array(f[f'sample_{i}/vr_true'][:])
self.cz_obs[i] = jnp.array(f[f'sample_{i}/cz_obs'][:])
self.r_hMpc[i] = np.sqrt(np.sum(self.coord_meas[i] ** 2, axis=0))
# Check that the measured coordinates all lie within the box
for i in range(self.nsamp):
if np.amax(self.r_hMpc[i]) > self.R_lim:
raise ValueError('All tracers must have measured coordinates within R_lim')
self.generateMBData()
def dens2like(self, output_density: np.ndarray, output_velocity: np.ndarray):
"""
Given stored distance tracer data, computes the negative log-likelihood of the data
for this final density field.
Args:
output_density (np.ndarray): The z=0 density field.
Return:
lkl (float): The negative log-likelihood of the data.
"""
lkl = 0
sig_v = self.model_params['sig_v']
# Compute velocity field
self.bulk_flow = jnp.array([self.model_params['bulk_flow_x'],
self.model_params['bulk_flow_y'],
self.model_params['bulk_flow_z']])
v = output_velocity + self.bulk_flow.reshape((3, 1, 1, 1))
omega_m = self.fwd.getCosmoParams().omega_m
# self.lkl_ind = [None] * self.nsamp
# global temp_lkl_ind
for i in range(self.nsamp):
muA = self.model_params[f'mua{i}']
alpha = self.model_params[f'alpha{i}']
lam = self.model_params[f'lam{i}']
lkl += vel2like(
self.cz_obs[i],
v,
output_density,
self.MB_pos[i],
self.r_integration[i],
self.r_hMpc[i],
self.sig_mu[i],
sig_v,
omega_m,
muA,
alpha,
lam,
self.L[0],
jnp.array(self.fwd.getOutputBoxModel().xmin),
self.interp_order,
self.bias_epsilon,
self.R_max
)
# self.lkl_ind[i] = temp_lkl_ind.copy()
# Add in bulk flow prior
lkl += jnp.sum(0.5 * jnp.log(2 * np.pi) + jnp.log(self.sigma_bulk / jnp.sqrt(3)) + self.bulk_flow ** 2 / 2. / (self.sigma_bulk / jnp.sqrt(3)) ** 2)
# lkl = jnp.clip(lkl, -self.bignum, self.bignum)
lkl = lax.cond(
jnp.isfinite(lkl),
lambda _: lkl, # If True (finite), return lkl
lambda _: self.bignum, # If False (not finite), return self.bignum
operand=None # No additional operand needed here
)
return lkl
def logLikelihoodComplex(self, s_hat: np.ndarray, gradientIsNext: bool, skip_density: bool=False, update_from_model: bool=True):
"""
Calculates the negative log-likelihood of the data.
Args:
- s_hat (np.ndarray): The input white noise.
- gradientIsNext (bool): If True, prepares the forward model for gradient calculations.
- skip_density (bool, default=False): If True, do not repeat the s_hat -> density, velocity computation
and use the stored result
- update_from_model (bool, default=True): If True, update self.model_params with self.fwd_param.getModelParam
Returns:
The negative log-likelihood value.
"""
N = self.fwd.getBoxModel().N[0]
L = self.fwd.getOutputBoxModel().L[0]
if update_from_model:
for k in self.model_params.keys():
self.model_params[k] = self.fwd_param.getModelParam('nullforward', k)
self.updateCosmology(self.fwd.getCosmoParams()) # for sigma8 -> As
# fname = f'{os.getcwd()}/s_hat.npy'
# myprint(f'Saving s_hat field to {fname}')
# np.save(fname, s_hat)
# myprint('Done')
if not skip_density:
myprint('Getting density field now')
# Run BORG density field
output_density = np.zeros((N,N,N))
self.fwd.forwardModel_v2(s_hat)
self.fwd.getDensityFinal(output_density)
myprint('Getting velocity field now')
# Get velocity field
output_velocity = self.fwd_vel.getVelocityField()
else:
output_density = self.delta.copy()
output_velocity = self.vel.copy()
L = self.dens2like(output_density, output_velocity)
if isinstance(L, numbers.Number) or isinstance(L, jaxlib.xla_extension.ArrayImpl):
myprint(f"var(s_hat): {np.var(s_hat)}, Call to logLike: {L}")
myprint(self.model_params)
myprint(self.fwd.getCosmoParams())
self.delta = output_density.copy()
self.vel = output_velocity.copy()
return L
def gradientLikelihoodComplex(self, s_hat: np.ndarray):
"""
Calculates the adjoint negative log-likelihood of the data.
Args:
- s_hat (np.ndarray): The input density field.
Returns:
The adjoint negative log-likelihood gradient.
"""
N = self.fwd.getBoxModel().N[0]
L = self.fwd.getOutputBoxModel().L[0]
# Run BORG density field
output_density = np.zeros((N,N,N))
self.fwd.forwardModel_v2(s_hat)
self.fwd.getDensityFinal(output_density)
# Get velocity field
output_velocity = self.fwd_vel.getVelocityField()
# getlike(dens, vel)
mygradient, velgradient = self.grad_like(output_density, output_velocity)
mygradient = np.array(mygradient, dtype=np.float64)
vgradient = np.array(velgradient, dtype=np.float64)
self.fwd_vel.computeAdjointModel(vgradient)
self.fwd.adjointModel_v2(mygradient)
mygrad_hat = np.zeros(s_hat.shape, dtype=np.complex128)
self.fwd.getAdjointModel(mygrad_hat)
self.fwd.clearAdjointGradient()
return mygrad_hat
def commitAuxiliaryFields(self, state: borg.likelihood.MarkovState) -> None:
"""
Commits the final density field to the Markov state.
Args:
- state (borg.state.State): The state object containing the final density field.
"""
self.updateCosmology(self.fwd.getCosmoParams())
self.dens2like(self.delta, self.vel)
state["BORG_final_density"][:] = self.delta
# @partial(jax.jit, static_argnames=['L_BOX', 'interp_order', 'bias_epsilon'])
def vel2like(cz_obs, v, MB_field, MB_pos, r, r_hMpc, sig_mu, sig_v, omega_m, muA, alpha, lam,
L_BOX, X_MIN, interp_order, bias_epsilon, R_max):
"""
Jitted part of dens2like
"""
tracer_vel = projection.interp_field(v,
MB_pos,
L_BOX,
X_MIN,
interp_order,
use_jitted=True,
)
tracer_vr = projection.project_radial(
tracer_vel,
MB_pos,
jnp.zeros(3,)
)
# Remove the velocity term
# tracer_vr = jnp.zeros(tracer_vr.shape)
# Convert velocities to redshifts
zco = utils.z_cos(r, omega_m)
cz_pred = utils.speed_of_light * zco + (1 + zco) * tracer_vr
delta_cz_sigv = (cz_pred - jnp.expand_dims(cz_obs, axis=1)) / sig_v
# Get p_r
delta_mu = 5. * jnp.log10(r / jnp.expand_dims(r_hMpc,axis=1) * muA)
# Get los biased density
los_density = projection.interp_field(
MB_field,
MB_pos,
L_BOX,
X_MIN,
interp_order,
use_jitted=True,
)
los_density = jax.nn.relu(1. + los_density)
los_density = jnp.power(los_density + bias_epsilon, alpha)
# Remove bias term
# los_density = jnp.ones(los_density.shape)
d2 = (delta_mu / sig_mu) ** 2
best = jnp.amin(jnp.abs(d2), axis=1)
# Multiply p_r by arbitrary number for numerical stability (cancels in p_r / p_r_norm)
d2 = d2 - jnp.expand_dims(jnp.nanmin(d2, axis=1), axis=1)
p_r = r ** 2 * jnp.exp(-0.5 * d2) * los_density * jnp.exp(- lam * r / R_max)
p_r_norm = jnp.expand_dims(jnp.trapezoid(p_r, r, axis=1), axis=1)
# Integrate to get likelihood
d2 = delta_cz_sigv**2
scale = jnp.nanmin(d2, axis=1)
d2 = d2 - jnp.expand_dims(scale, axis=1)
exp_delta_cz = jnp.exp(-0.5 * d2)
p_cz = jnp.trapezoid(exp_delta_cz * p_r / p_r_norm, r, axis=1)
lkl_ind = jnp.log(p_cz) - scale / 2 - 0.5 * jnp.log(2 * np.pi * sig_v**2)
lkl = - lkl_ind.sum()
# global temp_lkl_ind
# temp_lkl_ind = lkl_ind
return lkl
@derive_prior
def transform_mua(x):
a, b = model_params_prior['mua']
return samplers.transform_uniform(x, a, b)
def inv_transform_mua(alpha):
a, b = model_params_prior['mua']
return samplers.inv_transform_uniform(alpha, a, b)
@derive_prior
def transform_alpha(x):
a, b = model_params_prior['alpha']
return samplers.transform_uniform(x, a, b)
def inv_transform_alpha(alpha):
a, b = model_params_prior['alpha']
return samplers.inv_transform_uniform(alpha, a, b)
@derive_prior
def transform_lam(x):
a, b = model_params_prior['lam']
return samplers.transform_uniform(x, a, b)
def inv_transform_lam(lam):
a, b = model_params_prior['lam']
return samplers.inv_transform_uniform(lam, a, b)
@derive_prior
def transform_sig_v(x):
a, b = model_params_prior['sig_v']
return samplers.transform_uniform(x, a, b)
def inv_transform_sig_v(alpha):
a, b = model_params_prior['sig_v']
return samplers.inv_transform_uniform(alpha, a, b)
@derive_prior
def transform_bulk_flow(x):
a, b = model_params_prior['bulk_flow']
return samplers.transform_uniform(x, a, b)
def inv_transform_bulk_flow(alpha):
a, b = model_params_prior['bulk_flow']
return samplers.inv_transform_uniform(alpha, a, b)
@borg.registerGravityBuilder
def build_gravity_model(state: borg.likelihood.MarkovState, box: borg.forward.BoxModel, ini_file=None) -> borg.forward.BaseForwardModel:
"""
Builds the gravity model and returns the forward model chain.
Args:
- state (borg.likelihood.MarkovState): The Markov state object to be used in the likelihood.
- box (borg.forward.BoxModel): The input box model.
- ini_file (str, default=None): The location of the ini file. If None, use borg.getIniConfigurationFilename()
Returns:
borg.forward.BaseForwardModel: The forward model.
"""
global chain, fwd_param, fwd_vel
myprint("Building gravity model")
if ini_file is None:
myprint("Reading from configuration file: " + borg.getIniConfigurationFilename())
config = configparser.ConfigParser()
config.read(borg.getIniConfigurationFilename())
else:
myprint("Reading from configuration file: " + ini_file)
config = configparser.ConfigParser()
config.read(ini_file)
ai = float(config['model']['ai'])
af = float(config['model']['af'])
supersampling = int(config['model']['supersampling'])
if config['model']['gravity'] in ['pm', 'cola']:
forcesampling = int(config['model']['forcesampling'])
# Setup forward model
chain = borg.forward.ChainForwardModel(box)
chain.addModel(borg.forward.models.HermiticEnforcer(box))
# CLASS transfer function
chain @= borg.forward.model_lib.M_PRIMORDIAL_AS(box)
transfer_class = borg.forward.model_lib.M_TRANSFER_CLASS(box, opts=dict(a_transfer=1.0))
transfer_class.setModelParams({"extra_class_arguments":{'YHe':'0.24'}})
chain @= transfer_class
if config['model']['gravity'] == 'linear':
raise NotImplementedError(config['model']['gravity'])
elif config['model']['gravity'] == 'lpt':
mod = borg.forward.model_lib.M_LPT_CIC(
box,
opts=dict(a_initial=af,
a_final=af,
do_rsd=False,
supersampling=supersampling,
lightcone=False,
part_factor=1.01,))
elif config['model']['gravity'] == '2lpt':
mod = borg.forward.model_lib.M_2LPT_CIC(
box,
opts=dict(a_initial=af,
a_final=af,
do_rsd=False,
supersampling=supersampling,
lightcone=False,
part_factor=1.01,))
elif config['model']['gravity'] == 'pm':
mod = borg.forward.model_lib.M_PM_CIC(
box,
opts=dict(a_initial=af,
a_final=af,
do_rsd=False,
supersampling=supersampling,
part_factor=1.01,
forcesampling=forcesampling,
pm_start_z=1/ai - 1,
pm_nsteps=int(config['model']['nsteps']),
tcola=False))
elif config['model']['gravity'] == 'cola':
mod = borg.forward.model_lib.M_PM_CIC(
box,
opts=dict(a_initial=af,
a_final=af,
do_rsd=False,
supersampling=supersampling,
part_factor=1.01,
forcesampling=forcesampling,
pm_start_z=1/ai - 1,
pm_nsteps=int(config['model']['nsteps']),
tcola=True))
else:
raise NotImplementedError(config['model']['gravity'])
mod.accumulateAdjoint(True)
chain @= mod
# Cosmological parameters
if ini_file is None:
cpar = utils.get_cosmopar(borg.getIniConfigurationFilename())
else:
cpar = utils.get_cosmopar(ini_file)
chain.setCosmoParams(cpar)
# This is the forward model for the model parameters
fwd_param = borg.forward.ChainForwardModel(box)
mod_null = forwards.NullForward(box)
fwd_param.addModel(mod_null)
fwd_param.setCosmoParams(cpar)
# This is the forward model for velocity
velmodel_name = config['model']['velocity']
velmodel = getattr(borg.forward.velocity, velmodel_name)
if velmodel_name == 'LinearModel':
fwd_vel = velmodel(box, mod, af)
elif velmodel_name == 'CICModel':
rsmooth = float(config['model']['rsmooth']) # Mpc/h
fwd_vel = velmodel(box, mod, rsmooth)
else:
fwd_vel = velmodel(box, mod)
return chain
_glob_model = None
_glob_cosmo = None
begin_model = None
begin_cosmo = None
model_params_prior = {}
def check_model_sampling(loop):
return loop.getStepID() > begin_model
def check_cosmo_sampling(loop):
return loop.getStepID() > begin_cosmo
@borg.registerSamplerBuilder
def build_sampler(
state: borg.likelihood.MarkovState,
info: borg.likelihood.LikelihoodInfo,
loop: borg.samplers.MainLoop
):
"""
Builds the sampler and returns it.
Which parameters to sample are given in the ini file.
We assume all parameters are NOT meant to be sampled, unless we find "XX_sampler_blocked = false" in the ini file
Args:
- state (borg.likelihood.MarkovState): The Markov state object to be used in the likelihood.
- info (borg.likelihood.LikelihoodInfo): The likelihood information.
ADD LOOP - DEFINE CONDITIONS AND DEPENDENCIES
Returns:
List of samplers to use.
"""
global _glob_model, _glob_cosmo, begin_model, begin_cosmo, model_params_prior
borg.print_msg(borg.Level.std, "Hello sampler, loop is {l}, step_id={s}", l=loop, s=loop.getStepID())
myprint("Building sampler")
myprint("Reading from configuration file: " + borg.getIniConfigurationFilename())
config = configparser.ConfigParser()
config.read(borg.getIniConfigurationFilename())
end = '_sampler_blocked'
to_sample = [k[:-len(end)] for (k, v) in config['block_loop'].items() if k[-len(end):] == end and v.lower() == 'false']
myprint(f'Parameters to sample: {to_sample}')
nsamp = int(config['run']['nsamp'])
all_sampler = []
# Cosmology sampler arguments
prefix = ""
params = []
initial_values = {}
prior = {}
for p in ["omega_m", "sigma8"]:
if p not in to_sample:
continue
if p in config['prior'].keys() and p in config['cosmology'].keys():
myprint(f'Adding {p} sampler')
params.append(f"cosmology.{p}")
initial_values[f"cosmology.{p}"] = float(config['cosmology'][p])
prior[f"cosmology.{p}"] = np.array(ast.literal_eval(config['prior'][p]))
else:
s = f'Could not find {p} prior and/or default, so will not sample'
warnings.warn(s, stacklevel=2)
# Remove for later to prevent duplication
to_sample.remove(p)
begin_cosmo = int(config['mcmc']['warmup_cosmo'])
if len(params) > 0:
myprint('Adding cosmological parameter sampler')
cosmo_sampler = borg.samplers.ModelParamsSampler(prefix, params, likelihood, chain, initial_values, prior)
cosmo_sampler.setName("cosmo_sampler")
_glob_cosmo = cosmo_sampler
all_sampler.append(cosmo_sampler)
loop.push(cosmo_sampler)
loop.addToConditionGroup("warmup_cosmo", "cosmo_sampler")
loop.addConditionToConditionGroup("warmup_cosmo", partial(check_cosmo_sampling, loop))
# Model parameter sampler
prefix = ""
params = []
initial_values = {}
prior = {}
transform_attributes = []
inv_transform_attributes = []
for p in to_sample:
if p in config['prior'].keys():
if p == 'sig_v':
myprint(f'Adding {p} sampler')
params.append(p)
initial_values[p] = float(config['model'][p])
if 'inf' in config['prior'][p]:
x = ast.literal_eval(config['prior'][p].replace('inf', '"inf"'))
prior[p] = np.array([xx if xx != 'inf' else np.inf for xx in x])
else:
prior[p] = np.array(ast.literal_eval(config['prior'][p]))
model_params_prior[p] = prior[p]
transform_attributes.append(globals().get(f'transform_{p}'))
inv_transform_attributes.append(globals().get(f'inv_transform_{p}'))
elif p == 'bulk_flow':
for i, d in enumerate(['_x', '_y', '_z']):
myprint(f'Adding {p}{d} sampler')
params.append(f'{p}{d}')
initial_values[f'{p}{d}'] = np.array(ast.literal_eval(config['model']['bulk_flow']))[i]
if 'inf' in config['prior'][p]:
x = ast.literal_eval(config['prior'][p].replace('inf', '"inf"'))
prior[f'{p}{d}'] = np.array([xx if xx != 'inf' else np.inf for xx in x])
else:
prior[f'{p}{d}'] = np.array(ast.literal_eval(config['prior'][p]))
transform_attributes.append(globals().get(f'transform_{p}'))
inv_transform_attributes.append(globals().get(f'inv_transform_{p}'))
model_params_prior[p] = prior[f'{p}_x']
else:
for i in range(nsamp):
myprint(f'Adding {p}{i} sampler')
params.append(f'{p}{i}')
initial_values[f'{p}{i}'] = float(config[f'sample_{i}'][p])
if 'inf' in config['prior'][p]:
x = ast.literal_eval(config['prior'][p].replace('inf', '"inf"'))
prior[f'{p}{i}'] = np.array([xx if xx != 'inf' else np.inf for xx in x])
else:
prior[f'{p}{i}'] = np.array(ast.literal_eval(config['prior'][p]))
transform_attributes.append(globals().get(f'transform_{p}'))
inv_transform_attributes.append(globals().get(f'inv_transform_{p}'))
model_params_prior[p] = prior[f'{p}{0}']
else:
s = f'Could not find {p} prior, so will not sample'
warnings.warn(s, stacklevel=2)
begin_model = int(config['mcmc']['warmup_model'])
if len(params) > 0:
myprint('Adding model parameter sampler')
if config['sampling']['algorithm'].lower() == 'slice':
model_sampler = borg.samplers.ModelParamsSampler(prefix, params, likelihood, fwd_param, initial_values, prior)
elif config['sampling']['algorithm'].lower() == 'hmc':
model_sampler = HMCBiasSampler(
"model_params",
likelihood,
params,
transform_attributes=transform_attributes,
inv_transform_attributes=inv_transform_attributes,
prior = [p.prior for p in transform_attributes],
Nsteps = int(config['sampling']['nsteps']),
epsilon = float(config['sampling']['epsilon']),
refresh = float(config['sampling']['refresh']),
)
with open('model_params.txt', 'w') as file:
for item in params:
file.write(f"{item}\n")
elif config['sampling']['algorithm'].lower() == 'mvslice':
lam = [None] * len(params)
for i, p in enumerate(params):
if p.startswith('bulk_flow'):
lam[i] = float(config['sampling'][f'mvlam_bulk_flow'])
elif p[-1].isdigit():
# Search for a non-digit before end of reversed string
match = re.search(r'\D(?=\d*$)', p[::-1])
lam[i] = float(config['sampling'][f'mvlam_{p[:match.start()]}'])
else:
lam[i] = float(config['sampling'][f'mvlam_{p}'])
model_sampler = MVSliceBiasSampler(
"model_params",
likelihood,
params,
lam
)
with open('model_params.txt', 'w') as file:
for item in params:
file.write(f"{item}\n")
elif config['sampling']['algorithm'].lower() == 'blackjax':
# state.newArray1d("inverse_mass_matrix", len(params), True)
# state.newScalar('step_size', 0., True)
model_sampler = BlackJaxBiasSampler(
"model_params",
likelihood,
params,
int(config['sampling']['rng_seed']),
int(config['sampling']['warmup_nsteps']),
float(config['sampling']['warmup_target_acceptance_rate']),
)
with open('model_params.txt', 'w') as file:
for item in params:
file.write(f"{item}\n")
elif config['sampling']['algorithm'].lower() == 'transformedblackjax':
model_sampler = TransformedBlackJaxBiasSampler(
"model_params",
likelihood,
params,
transform_attributes=transform_attributes,
inv_transform_attributes=inv_transform_attributes,
prior = [p.prior for p in transform_attributes],
rng_seed = int(config['sampling']['rng_seed']),
warmup_nsteps = int(config['sampling']['warmup_nsteps']),
warmup_target_acceptance_rate = float(config['sampling']['warmup_target_acceptance_rate']),
)
with open('model_params.txt', 'w') as file:
for item in params:
file.write(f"{item}\n")
else:
raise NotImplementedError
model_sampler.setName("model_sampler")
_glob_model = model_sampler
loop.push(model_sampler)
all_sampler.append(model_sampler)
loop.addToConditionGroup("warmup_model", "model_sampler")
loop.addConditionToConditionGroup("warmup_model", partial(check_model_sampling, loop))
myprint('Done')
return []
@borg.registerLikelihoodBuilder
def build_likelihood(state: borg.likelihood.MarkovState, info: borg.likelihood.LikelihoodInfo) -> borg.likelihood.BaseLikelihood:
"""
Builds the likelihood object and returns it.
Args:
- state (borg.likelihood.MarkovState): The Markov state object to be used in the likelihood.
- info (borg.likelihood.LikelihoodInfo): The likelihood information.
Returns:
borg.likelihood.BaseLikelihood: The likelihood object.
"""
global likelihood, fwd_param
myprint("Building likelihood")
myprint(chain.getCosmoParams())
boxm = chain.getBoxModel()
likelihood = VelocityBORGLikelihood(chain, fwd_param, fwd_vel, borg.getIniConfigurationFilename())
return likelihood

View file

@ -8,6 +8,10 @@ import jax.numpy as jnp
import scipy.integrate
import math
import re
import numpy as np
import jax.numpy as jnp
from astropy.coordinates import SkyCoord
import astropy.units as apu
# Output stream management
cons = borg.console()
@ -194,6 +198,55 @@ def parse_file_to_dict(file_path):
return config_dict
def generateMBData(RA, Dec, cz_obs, L, N, R_lim, Nsig, Nint_points, sigma_v, frac_sigma_r):
"""
Generate points along the line of sight of each tracer to enable marginalisation
over distance uncertainty. The central distance is given such that the observed
redshift equals the cosmological redshift at this distance. The range is then
+/- Nsig * sig, where
sig^2 = (sig_v/100)^2 + sig_r^2
and sig_v is the velocity uncertainty in km/s
Args:
- RA (np.ndarray): Right Ascension (degrees) of the tracers (shape = (Nt,))
- Dec (np.ndarray): Delination (degrees) of the tracers (shape = (Nt,))
- cz_obs (np.ndarray): Observed redshifts (km/s) of the tracers (shape = (Nt,))
- L (float): Box length (Mpc/h)
- N (int): Number of grid cells per side
- R_lim (float): Maximum allowed (true) comoving distance of a tracer (Mpc/h)
- Nsig (float): How many standard deviations away from the predicted radius to use
- Nint_points (int): Number of radii over which to integrate the likelihood
- sigma_v (float): Uncertainty on the velocity field (km/s)
- frac_sigma_r (float): An estimate of the fractional uncertainty on the positions of tracers
Returns:
- MB_pos (np.ndarray): Comoving coordinates of integration points to use in likelihood (Mpc/h).
The shape is (3, Nt, Nsig)
"""
myprint(f"Making MB data")
# Convert RA, DEC to radial vector
r_hat = np.array(SkyCoord(ra=RA*apu.deg, dec=Dec*apu.deg).cartesian.xyz).T
# Get min and max distance to integrate over
# cz = 100 h r, so sigma_v corresponds to a sigma_r of ~ sigma_v / 100
robs = cz_obs / 100
sigr = np.sqrt((sigma_v / 100) ** 2 + (frac_sigma_r * robs)**2)
rmin = robs - Nsig * sigr
rmin = rmin.at[rmin <= 0].set(L / N / 100.)
rmax = robs + Nsig * sigr
rmax = rmax.at[rmax > R_lim].set(R_lim)
# Compute coordinates of integration points
r_integration = np.linspace(rmin, rmax, Nint_points)
MB_pos = np.expand_dims(r_integration, axis=2) * r_hat[None,...]
MB_pos = jnp.transpose(MB_pos, (2, 1, 0))
return MB_pos
if __name__ == "__main__":
cpar = get_cosmopar('../conf/basic_ini.ini')

162
conf/borg_mock_ini.ini Normal file
View file

@ -0,0 +1,162 @@
[system]
console_output = borg_log
VERBOSE_LEVEL = 2
N0 = 64
N1 = 64
N2 = 64
L0 = 500.0
L1 = 500.0
L2 = 500.0
corner0 = -250.0
corner1 = -250.0
corner2 = -250.0
NUM_MODES = 100
test_mode = true
seed_cpower = true
[block_loop]
hades_sampler_blocked = false
bias_sampler_blocked= true
nmean_sampler_blocked= true
sigma8_sampler_blocked = true
omega_m_sampler_blocked = true
alpha_sampler_blocked = false
sig_v_sampler_blocked = false
bulk_flow_sampler_blocked = false
ares_heat = 1.0
[mcmc]
number_to_generate = 4
warmup_model = 200
warmup_cosmo = 800
random_ic = false
init_random_scaling = 0.1
bignum = 1e20
[hades]
algorithm = HMC
max_epsilon = 0.01
max_timesteps = 50
mixing = 1
[sampling]
algorithm = slice
epsilon = 0.005
Nsteps = 20
refresh = 0.1
rng_seed = 1
warmup_nsteps = 400
warmup_target_acceptance_rate = 0.7
[model]
gravity = lpt
forcesampling = 4
supersampling = 2
velocity = CICModel
af = 1.0
ai = 0.05
nsteps = 20
Rsmooth = 4.
bias_epsilon = 1e-7
interp_order = 1
sig_v = 150.
R_lim = 220
Nint_points = 201
Nsig = 10
bulk_flow = [0.0, 0.0, 0.0]
[prior]
omega_m = [0.1, 0.8]
sigma8 = [0.1, 1.5]
alpha = [0.0, 10.0]
sig_v = [50.0, 200.0]
bulk_flow = [-200.0, 200.0]
[cosmology]
omega_r = 0
fnl = 0
omega_k = 0
omega_m = 0.315
omega_b = 0.049
omega_q = 0.685
h100 = 0.68
sigma8 = 0.81
n_s = 0.97
w = -1
wprime = 0
beta = 1.5
z0 = 0
[run]
run_type = mock
NCAT = 0
NSAMP = 4
[mock]
seed = 12345
R_max = 100
[python]
likelihood_path = /home/bartlett/fsigma8/borg_velocity/borg_velocity/likelihood.py
[sample_0]
tracer_type = tfr
Nt = 1247
alpha = 1.4
a_TFR = -23
b_TFR = -8.2
sigma_TFR = 0.3
sigma_m = 0.042
sigma_eta = 0.01
hyper_eta_mu = 0.08
hyper_eta_sigma = 0.13
mthresh = 11.25
frac_sig_rhMpc = 0.07
[sample_1]
tracer_type = tfr
Nt = 2010
alpha = 1.4
a_TFR = -23
b_TFR = -8.2
sigma_TFR = 0.3
sigma_m = 0.042
sigma_eta = 0.01
hyper_eta_mu = 0.08
hyper_eta_sigma = 0.13
mthresh = 11.25
frac_sig_rhMpc = 0.07
[sample_2]
tracer_type = sn
Nt = 125
alpha = 1.4
a_tripp = 0.140
b_tripp = 2.78
M_SN = -18.558
sigma_SN = 0.082
sigma_m = 0.04
sigma_stretch = 0.17
sigma_c = 0.04
hyper_stretch_mu = -0.04
hyper_stretch_sigma = 1.23
hyper_c_mu = -0.02
hyper_c_sigma = 0.08
frac_sig_rhMpc = 0.07
[sample_3]
tracer_type = sn
Nt = 55
alpha = 1.4
a_tripp = 0.140
b_tripp = 2.78
M_SN = -18.558
sigma_SN = 0.082
sigma_m = 0.04
sigma_stretch = 0.17
sigma_c = 0.04
hyper_stretch_mu = -0.04
hyper_stretch_sigma = 1.23
hyper_c_mu = -0.02
hyper_c_sigma = 0.08
frac_sig_rhMpc = 0.07

View file

@ -16,7 +16,7 @@ set -e
# Path variables
BORG=/data101/bartlett/build_borg/tools/hades_python/hades_python
RUN_DIR=/data101/bartlett/fsigma8/borg_velocity/velmass_test
RUN_DIR=/data101/bartlett/fsigma8/borg_velocity/borg_mock_test
mkdir -p $RUN_DIR
cd $RUN_DIR
@ -29,7 +29,7 @@ BASH_XTRACEFD="3"
set -x
# Just ICs
INI_FILE=/home/bartlett/fsigma8/borg_velocity/conf/velmass_ini.ini
INI_FILE=/home/bartlett/fsigma8/borg_velocity/conf/borg_mock_ini.ini
cp $INI_FILE ini_file.ini
$BORG INIT ini_file.ini
# $BORG RESUME ini_file.ini