borg_velocity/tests/tfr_inference.py
2025-02-05 15:53:57 +01:00

393 lines
No EOL
14 KiB
Python
Raw Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

import aquila_borg as borg
import numpy as np
from astropy.coordinates import SkyCoord
import astropy.units as apu
import astropy.constants
from astropy.cosmology import LambdaCDM
import pandas as pd
from scipy.interpolate import interp1d
import jax.numpy as jnp
import jax.scipy.special
import numpyro
import numpyro.distributions as dist
from jax import lax
import borg_velocity.poisson_process as poisson_process
import borg_velocity.projection as projection
# Output stream management
cons = borg.console()
myprint = lambda x: cons.print_std(x) if type(x) == str else cons.print_std(repr(x))
def build_gravity_model(box, cpar, ai=0.05, af=1.0, nsteps=20, forcesampling=4, supersampling=2, rsmooth=4.0, gravity='lpt', velmodel_name='CICModel'):
"""
Builds the gravity model and returns the forward model chain.
Args:
- box (borg.forward.BoxModel): Box within which to run simulation
- cpar (borg.cosmo.CosmologicalParameters): Cosmological parameters to use
- ai (float, default=0.05): Scale factor to begin simulation
- af (float, default=1.0): Scale factor to end simulation
- nsteps (int, default=20): Number of steps to use in the simulation
- forcesampling (int, default=4): Sampling factor for force evaluations
- supersampling (int, default=2): Supersampling factor of particles
- rsmooth (float, default=4.0): Smoothing scale for velocity field (Mpc/h)
- gravity (str, default='lpt'): Which gravity model to use
- velmodel_name (str, default='CICModel'): Which velocity estimator to use
Returns:
- chain (borg.forward.BaseForwardModel): The forward model for density
- fwd_vel (borg.forward.VelocityBase): The forward model for velocity
"""
myprint(f"Building gravity model {gravity}")
# Setup forward model
chain = borg.forward.ChainForwardModel(box)
chain.addModel(borg.forward.models.HermiticEnforcer(box))
# CLASS transfer function
chain @= borg.forward.model_lib.M_PRIMORDIAL_AS(box)
transfer_class = borg.forward.model_lib.M_TRANSFER_CLASS(box, opts=dict(a_transfer=1.0))
transfer_class.setModelParams({"extra_class_arguments":{'YHe':'0.24'}})
chain @= transfer_class
if gravity == 'linear':
raise NotImplementedError(gravity)
elif gravity == 'lpt':
mod = borg.forward.model_lib.M_LPT_CIC(
box,
opts=dict(a_initial=af,
a_final=af,
do_rsd=False,
supersampling=supersampling,
lightcone=False,
part_factor=1.01,))
elif gravity == '2lpt':
mod = borg.forward.model_lib.M_2LPT_CIC(
box,
opts=dict(a_initial=af,
a_final=af,
do_rsd=False,
supersampling=supersampling,
lightcone=False,
part_factor=1.01,))
elif gravity == 'pm':
mod = borg.forward.model_lib.M_PM_CIC(
box,
opts=dict(a_initial=af,
a_final=af,
do_rsd=False,
supersampling=supersampling,
part_factor=1.01,
forcesampling=forcesampling,
pm_start_z=1/ai - 1,
pm_nsteps=nsteps,
tcola=False))
elif gravity == 'cola':
mod = borg.forward.model_lib.M_PM_CIC(
box,
opts=dict(a_initial=af,
a_final=af,
do_rsd=False,
supersampling=supersampling,
part_factor=1.01,
forcesampling=forcesampling,
pm_start_z=1/ai - 1,
pm_nsteps=nsteps,
tcola=True))
else:
raise NotImplementedError(gravity)
mod.accumulateAdjoint(True)
chain @= mod
# Cosmological parameters
chain.setCosmoParams(cpar)
# This is the forward model for velocity
velmodel = getattr(borg.forward.velocity, velmodel_name)
if velmodel_name == 'LinearModel':
fwd_vel = velmodel(box, mod, af)
elif velmodel_name == 'CICModel':
fwd_vel = velmodel(box, mod, rsmooth)
else:
fwd_vel = velmodel(box, mod)
return chain, fwd_vel
def get_fields(L, N, xmin, gravity='lpt', velmodel_name='CICModel'):
"""
Obtain a density and velocity field to use for mock
Args:
- L (float): Box length (Mpc/h)
- N (int): Number of grid cells per side
- xmin (float): Coordinate of corner of the box (Mpc/h)
- gravity (str, default='lpt'): Which gravity model to use
- velmodel_name (str, default='CICModel'): Which velocity estimator to use
Returns:
- cpar (borg.cosmo.CosmologicalParameters): Cosmological parameters to use
- output_density (np.ndarray): Over-density field
- output_velocity (np.ndarray): Velocity field
"""
# Setup box and cosmology
cpar = borg.cosmo.CosmologicalParameters()
cpar.default()
box = borg.forward.BoxModel()
box.L = (L, L, L)
box.N = (N, N, N)
box.xmin = (xmin, xmin, xmin)
# Get forward models
fwd_model, fwd_vel = build_gravity_model(box, cpar, gravity=gravity, velmodel_name=velmodel_name)
# Make some initial conditions
s_hat = np.fft.rfftn(np.random.randn(*box.N)) / box.Ntot ** (0.5)
# Obtain density and velocity fields
output_density = np.zeros(box.N)
fwd_model.forwardModel_v2(s_hat)
fwd_model.getDensityFinal(output_density)
output_velocity = fwd_vel.getVelocityField()
return cpar, output_density, output_velocity
def create_mock(Nt, L, xmin, cpar, dens, vel, Rmax, alpha, mthresh,
a_TFR, b_TFR, sigma_TFR, sigma_m, sigma_eta,
hyper_eta_mu, hyper_eta_sigma, sigma_v, interp_order=1, bias_epsilon=1e-7):
# Initialize lists to store valid positions and corresponding sig_mu values
all_xtrue = np.empty((3, Nt))
all_mobs = np.empty(Nt)
all_etaobs = np.empty(Nt)
# Counter for accepted positions
accepted_count = 0
# Cosmology object needed for z <-> r
cosmo = LambdaCDM(H0 = cpar.h * 100, Om0 = cpar.omega_m, Ode0 = cpar.omega_q)
# Precompute redshift-distance mapping
z_grid = np.logspace(-4, -1, 10000) # Covers z ~ 0 to 0.1
dL_grid = cosmo.luminosity_distance(z_grid).value # Luminosity distances in Mpc
# Create an interpolation function: distance -> redshift
dL_to_z = interp1d(dL_grid, z_grid, kind="cubic", fill_value="extrapolate")
# Bias model
phi = (1. + dens + bias_epsilon) ** alpha
# Only use centre of box
x = np.linspace(xmin, xmin + L, dens.shape[0]+1)
i0 = np.argmin(np.abs(x + Rmax))
i1 = np.argmin(np.abs(x - Rmax))
L_small = x[i1] - x[i0]
xmin_small = x[i0]
phi_small = phi[i0:i1, i0:i1, i0:i1]
# Loop until we have Nt valid positions
while accepted_count < Nt:
# Generate positions (comoving)
xtrue = poisson_process.sample_3d(phi_small, Nt, L_small, (xmin_small, xmin_small, xmin_small))
# Convert to RA, Dec, Distance (comoving)
rtrue = np.sqrt(np.sum(xtrue** 2, axis=0)) # Mpc/h
c = SkyCoord(x=xtrue[0], y=xtrue[1], z=xtrue[2], representation_type='cartesian')
RA = c.spherical.lon.degree
Dec = c.spherical.lat.degree
r_hat = np.array(SkyCoord(ra=RA*apu.deg, dec=Dec*apu.deg).cartesian.xyz)
# Compute cosmological redshift
# zcosmo = z_at_value(cosmo.comoving_distance, rtrue * apu.Mpc / cpar.h).value
zcosmo = dL_to_z(rtrue / cpar.h)
# Compute luminosity distance
# DO I NEED TO DO /h???
dL = (1 + zcosmo) * rtrue / cpar.h # Mpc/h
# Compute true distance modulus
mutrue = 5 * np.log10(dL) + 25
# Sample true linewidth (eta) from its prior
etatrue = hyper_eta_mu + hyper_eta_sigma * np.random.randn(Nt)
# Obtain muTFR from mutrue using the intrinsic scatter
muTFR = mutrue + sigma_TFR * np.random.randn(Nt)
# Obtain apparent magnitude from the TFR
mtrue = muTFR + (a_TFR + b_TFR * etatrue)
# Scatter true observed apparent magnitudes and linewidths
mobs = mtrue + sigma_m * np.random.randn(Nt)
etaobs = etatrue + sigma_eta * np.random.randn(Nt)
# Apply apparement magnitude cut
m = mobs <= mthresh
mobs = mobs[m]
etaobs = etaobs[m]
xtrue = xtrue[:,m]
# Calculate how many valid positions we need to reach Nt
remaining_needed = Nt - accepted_count
selected_count = min(xtrue.shape[1], remaining_needed)
# Append only the needed number of valid positions
all_xtrue[:,accepted_count:accepted_count+selected_count] = xtrue[:,:selected_count]
all_mobs = mobs[:selected_count]
all_etaobs = etaobs[:selected_count]
# Update the accepted count
accepted_count += selected_count
myprint(f'\tMade {accepted_count} of {Nt}')
# Get the radial component of the peculiar velocity at the positions of the objects
myprint('Obtaining peculiar velocities')
tracer_vel = projection.interp_field(
vel,
np.expand_dims(all_xtrue, axis=2),
L,
np.array([xmin, xmin, xmin]),
interp_order
) # km/s
myprint('Radial projection')
vr_true = np.squeeze(projection.project_radial(
tracer_vel,
np.expand_dims(all_xtrue, axis=2),
np.zeros(3,)
)) # km/s
# Obtain total redshift
vr_noised = vr_true + sigma_v * np.random.randn(Nt)
zCMB = (1 + zcosmo) * (1 + vr_noised / astropy.constants.c.to('km/s').value) - 1
return zCMB, all_mobs, all_etaobs, all_xtrue
def estimate_data_parameters():
"""
ID 2MASS XSC ID name (HHMMSSss+DDMMSSs)
RAdeg Right ascension (J2000)
DEdeg Declination (J2000)
cz2mrs Heliocentric redshift from the 2MRS (km/s)
Kmag NIR magnitudes in the K band from the 2MRS (mag)
Hmag NIR magnitudes in the H band from the 2MRS (mag)
Jmag NIR magnitudes in the J band from the 2MRS (mag)
e_Kmag Error of the NIR magnitudes in K band from the (mag)
e_Hmag Error of the NIR magnitudes in H band from the (mag)
e_Jmag Error of the NIR magnitudes in J band from the (mag)
WHIc Corrected HI width (km/s)
e_WHIc Error of corrected HI width (km/s)
"""
columns = ['ID', 'RAdeg', 'DEdeg', 'cz2mrs', 'Kmag', 'Hmag', 'Jmag', 'e_Kmag', 'e_Hmah', 'e_Jmag', 'WHIc', 'e_WHIc']
fname = '/data101/bartlett/fsigma8/PV_data/2MASS/table1.dat'
df = pd.read_csv(fname, sep='\s+', names=columns)
sigma_m = np.median(df['e_Kmag'])
eta = np.log10(df['WHIc']) - 2.5
sigma_eta = np.median(df['e_WHIc'] / df['WHIc'] / np.log(10))
hyper_eta_mu = np.median(eta)
hyper_eta_sigma = (np.percentile(eta, 84) - np.percentile(eta, 16)) / 2
return sigma_m, sigma_eta, hyper_eta_mu, hyper_eta_sigma
def likelihood(a_TFR, b_TFR, sigma_TFR, eta_true, m_true):
loglike = 0
# Apparent magnitude terms
norm = 0.5 * (1 + jax.scipy.special.erf((mthresh - m_true) / (jnp.sqrt(2) * sigma_m)))
loglike +=
0.5 * jnp.sum((mobs - m_true) ** 2 / sigma_m ** 2)
+ jnp.sum(jnp.log(norm))
+ Nt * 0.5 * jnp.log(2 * jnp.pi * sigma_m ** 2)
# Linewidth terms
loglike +=
0.5 * jnp.sum((etaobs - eta_true) ** 2 / sigma_eta ** 2)
+ Nt * 0.5 * jnp.log(2 * jnp.pi * sigma_eta ** 2)
# Peculiar velocity terms
return
def likelihood_model():
# TO DO: Sort out these priors
a_TFR = numpyro.sample("a_TFR", dist.Uniform(*alpha_prior))
b_TFR = numpyro.sample("b_TFR", dist.Uniform(*alpha_prior))
sigma_TFR = numpyro.sample("sigma_TFR", dist.Uniform(*alpha_prior))
eta_true = numpyro.sample("eta_true", dist.Normal(mean, sigma), sample_shape=(Nt,))
m_true = numpyro.sample("m_true", dist.Normal(mean, sigma), sample_shape=(Nt,))
numpyro.sample("obs", TFRLikelihood(a_TFR, b_TFR, sigma_TFR, eta_true, m_true))
return
class TFRLikelihood(dist.Distribution):
support = dist.constraints.real
def __init__(self, a_TFR, b_TFR, sigma_TFR, eta_true, m_true):
self.a_TFR, self.b_TFR, self.sigma_TFR, self.eta_true, self.m_true = dist.util.promote_shapes(a_TFR, b_TFR, sigma_TFR, eta_true, m_true)
batch_shape = lax.broadcast_shapes(
jnp.shape(a_TFR),
jnp.shape(b_TFR),
jnp.shape(sigma_TFR),
jnp.shape(eta_true),
jnp.shape(m_true),
)
super(TFRLikelihood, self).__init__(batch_shape = batch_shape)
def sample(self, key, sample_shape=()):
raise NotImplementedError
def log_prov(self, value)
return likelihood(self.a_TFR, self.b_TFR, self.sigma_TFR, self.eta_true, self.m_true)
def main():
# Get some parameters from the data
sigma_m, sigma_eta, hyper_eta_mu, hyper_eta_sigma = estimate_data_parameters()
# Other parameters to use
L = 500.0
N = 64
xmin = -L/2
Rmax = 100
Nt = 2000
alpha = 1.4
mthresh = 11.25
a_TFR = -23
b_TFR = -8.2
sigma_TFR = 0.3
sigma_v = 150
cpar, dens, vel = get_fields(L, N, xmin)
zCMB, mobs, etaobs, xtrue = create_mock(
Nt, L, xmin, cpar, dens, vel, Rmax, alpha, mthresh,
a_TFR, b_TFR, sigma_TFR, sigma_m, sigma_eta,
hyper_eta_mu, hyper_eta_sigma, sigma_v)
if __name__ == "__main__":
main()