Minimum example to give MCMC samples
This commit is contained in:
parent
85b93c539f
commit
17f9099e55
5 changed files with 168 additions and 18 deletions
|
@ -3,6 +3,7 @@ import jax
|
|||
import jax.numpy as jnp
|
||||
from functools import partial
|
||||
|
||||
from borg_velocity.utils import myprint
|
||||
|
||||
class NullForward(borg.forward.BaseForwardModel):
|
||||
"""
|
||||
|
|
|
@ -4,6 +4,9 @@ import configparser
|
|||
import warnings
|
||||
import aquila_borg as borg
|
||||
import symbolic_pofk.linear
|
||||
import jax
|
||||
from functools import partial
|
||||
import ast
|
||||
|
||||
import borg_velocity.utils as utils
|
||||
from borg_velocity.utils import myprint
|
||||
|
@ -48,6 +51,8 @@ class VelocityBORGLikelihood(borg.likelihood.BaseLikelihood):
|
|||
self.run_type = config['run']['run_type']
|
||||
|
||||
# Model parameters
|
||||
self.ai = float(config['model']['ai'])
|
||||
self.af = float(config['model']['af'])
|
||||
self.sig_v = float(config['model']['sig_v'])
|
||||
if config['model']['R_lim'] == 'none':
|
||||
self.R_lim = self.L[0]/2
|
||||
|
@ -55,7 +60,16 @@ class VelocityBORGLikelihood(borg.likelihood.BaseLikelihood):
|
|||
self.R_lim = float(config['model']['R_lim'])
|
||||
self.Nint_points = int(config['model']['nint_points'])
|
||||
self.Nsig = float(config['model']['Nsig'])
|
||||
self.sig_v = float(config['model']['sig_v'])
|
||||
self.smooth_R = float(config['model']['smooth_R'])
|
||||
self.bias_epsilon = float(config['model']['bias_epsilon'])
|
||||
self.interp_order = int(config['model']['interp_order'])
|
||||
|
||||
# Load bulk flow
|
||||
self.bulk_flow = np.array(ast.literal_eval(config['model']['bulk_flow']))
|
||||
|
||||
# For log-likelihood values
|
||||
self.bignum = float(config['mcmc']['bignum'])
|
||||
|
||||
myprint(f" Init {self.N}, {self.L}")
|
||||
super().__init__(fwd, self.N, self.L)
|
||||
|
@ -64,6 +78,25 @@ class VelocityBORGLikelihood(borg.likelihood.BaseLikelihood):
|
|||
self.fwd = fwd
|
||||
self.fwd_param = param_model
|
||||
|
||||
# Initialise model parameters
|
||||
model_params = {
|
||||
**{f'mua{i}':self.muA[i] for i in range(self.nsamp)},
|
||||
**{f'alpha{i}':self.alpha[i] for i in range(self.nsamp)},
|
||||
**{f'bulk_flow_{d}':self.bulk_flow[i] for i, d in enumerate(['x', 'y', 'z'])},
|
||||
'sig_v':self.sig_v
|
||||
}
|
||||
self.fwd_param.setModelParams(model_params)
|
||||
|
||||
# Initialise cosmological parameters
|
||||
cpar = utils.get_cosmopar(self.ini_file)
|
||||
self.fwd.setCosmoParams(cpar)
|
||||
self.fwd_param.setCosmoParams(cpar)
|
||||
self.updateCosmology(cpar)
|
||||
myprint(f"Original cosmological parameters: {self.fwd.getCosmoParams()}")
|
||||
|
||||
# Initialise derivative
|
||||
self.grad_like = jax.grad(self.dens2like)
|
||||
|
||||
|
||||
def initializeLikelihood(self, state: borg.likelihood.MarkovState) -> None:
|
||||
"""
|
||||
|
@ -120,6 +153,12 @@ class VelocityBORGLikelihood(borg.likelihood.BaseLikelihood):
|
|||
cpar.omega_q = 1. - cpar.omega_m - cpar.omega_k
|
||||
self.fwd.setCosmoParams(cpar)
|
||||
self.fwd_param.setCosmoParams(cpar)
|
||||
|
||||
# Compute growth rate
|
||||
cosmology = borg.cosmo.Cosmology(cosmo)
|
||||
f = cosmology.gplus(self.af) # dD / da
|
||||
f *= self.af / cosmology.d_plus(self.af) # f = dlnD / dlna
|
||||
self.f = f
|
||||
|
||||
|
||||
def generateMBData(self) -> None:
|
||||
|
@ -133,7 +172,7 @@ class VelocityBORGLikelihood(borg.likelihood.BaseLikelihood):
|
|||
"""
|
||||
|
||||
self.MB_pos = [None] * self.nsamp
|
||||
self.r = [None] * self.nsamp
|
||||
self.r_integration = [None] * self.nsamp
|
||||
self.RA = [None] * self.nsamp
|
||||
self.DEC = [None] * self.nsamp
|
||||
|
||||
|
@ -152,10 +191,11 @@ class VelocityBORGLikelihood(borg.likelihood.BaseLikelihood):
|
|||
rmax = rmax.at[rmax > self.R_lim].set(self.R_lim)
|
||||
|
||||
# Compute coordinates of integration points
|
||||
self.r[i] = np.linspace(rmin, rmax, self.Nint_points)
|
||||
cartesian_pos_MB = np.expand_dims(self.r[i], axis=2) * r_hat
|
||||
self.r_integration[i] = np.linspace(rmin, rmax, self.Nint_points)
|
||||
cartesian_pos_MB = np.expand_dims(self.r_integration[i], axis=2) * r_hat
|
||||
self.r_integration[i] = self.r_integration[i].T
|
||||
self.MB_pos[i] = cartesian_pos_MB
|
||||
self.MB_pos[i] = jnp.transpose(self.MB_pos[i], (2, 0, 1))
|
||||
self.MB_pos[i] = jnp.transpose(self.MB_pos[i], (2, 1, 0))
|
||||
|
||||
|
||||
def generateMockData(self, s_hat: np.ndarray, state: borg.likelihood.MarkovState, make_plot: bool=True) -> None:
|
||||
|
@ -180,9 +220,59 @@ class VelocityBORGLikelihood(borg.likelihood.BaseLikelihood):
|
|||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
self.r_hMpc = [np.sqrt(np.sum(self.coord_meas[i] ** 2, axis=0)) for i in range(self.nsamp)]
|
||||
self.generateMBData()
|
||||
quit()
|
||||
|
||||
|
||||
def dens2like(self, output_density: np.ndarray):
|
||||
"""
|
||||
Given stored distance tracer data, computes the negative log-likelihood of the data
|
||||
for this final density field.
|
||||
|
||||
Args:
|
||||
output_density (np.ndarray): The z=0 density field.
|
||||
Return:
|
||||
lkl (float): The negative log-likelihood of the data.
|
||||
"""
|
||||
|
||||
lkl = 0
|
||||
sig_v = self.fwd_param.getModelParam('nullforward', 'sig_v')
|
||||
|
||||
# Compute velocity field
|
||||
bulk_flow = jnp.array([self.fwd_param.getModelParam('nullforward', 'bulk_flow_x'),
|
||||
self.fwd_param.getModelParam('nullforward', 'bulk_flow_y'),
|
||||
self.fwd_param.getModelParam('nullforward', 'bulk_flow_z')])
|
||||
v = forwards.dens2vel_linear(output_density, self.f,
|
||||
self.fwd.getOutputBoxModel().L[0], self.smooth_R)
|
||||
v = v + self.bulk_flow.reshape((3, 1, 1, 1))
|
||||
|
||||
omega_m = self.fwd.getCosmoParams().omega_m
|
||||
|
||||
for i in range(self.nsamp):
|
||||
muA = self.fwd_param.getModelParam('nullforward', f'mua{i}')
|
||||
alpha = self.fwd_param.getModelParam('nullforward', f'alpha{i}')
|
||||
lkl += vel2like(
|
||||
self.cz_obs[i],
|
||||
v,
|
||||
output_density,
|
||||
self.MB_pos[i],
|
||||
self.r_integration[i],
|
||||
self.r_hMpc[i],
|
||||
self.sig_mu[i],
|
||||
sig_v,
|
||||
omega_m,
|
||||
muA,
|
||||
alpha,
|
||||
self.L[0],
|
||||
jnp.array(self.fwd.getOutputBoxModel().xmin),
|
||||
self.interp_order,
|
||||
self.bias_epsilon
|
||||
)
|
||||
|
||||
if not jnp.isfinite(lkl):
|
||||
lkl = self.bignum
|
||||
|
||||
return lkl
|
||||
|
||||
def logLikelihoodComplex(self, s_hat: np.ndarray, gradientIsNext: bool):
|
||||
"""
|
||||
|
@ -207,8 +297,7 @@ class VelocityBORGLikelihood(borg.likelihood.BaseLikelihood):
|
|||
|
||||
self.delta = output_density
|
||||
|
||||
# L = self.dens2like(output_density)
|
||||
L = 1.
|
||||
L = self.dens2like(output_density)
|
||||
myprint(f"var(s_hat): {np.var(s_hat)}, Call to logLike: {L}")
|
||||
|
||||
return L
|
||||
|
@ -234,10 +323,10 @@ class VelocityBORGLikelihood(borg.likelihood.BaseLikelihood):
|
|||
self.fwd.forwardModel_v2(s_hat)
|
||||
self.fwd.getDensityFinal(output_density)
|
||||
|
||||
# mygradient = self.grad_like(output_density)
|
||||
# mygradient = np.array(mygradient, dtype=np.float64)
|
||||
mygradient = self.grad_like(output_density)
|
||||
mygradient = np.array(mygradient, dtype=np.float64)
|
||||
|
||||
# self.fwd.adjointModel_v2(mygradient)
|
||||
self.fwd.adjointModel_v2(mygradient)
|
||||
mygrad_hat = np.zeros(s_hat.shape, dtype=np.complex128)
|
||||
self.fwd.getAdjointModel(mygrad_hat)
|
||||
|
||||
|
@ -253,6 +342,65 @@ class VelocityBORGLikelihood(borg.likelihood.BaseLikelihood):
|
|||
self.updateCosmology(self.fwd.getCosmoParams())
|
||||
self.dens2like(self.delta)
|
||||
state["BORG_final_density"][:] = self.delta
|
||||
|
||||
|
||||
@partial(jax.jit, static_argnames=['L_BOX', 'interp_order', 'bias_epsilon'])
|
||||
def vel2like(cz_obs, v, MB_field, MB_pos, r, r_hMpc, sig_mu, sig_v, omega_m, muA, alpha, L_BOX, X_MIN, interp_order, bias_epsilon):
|
||||
"""
|
||||
Jitted part of dens2like
|
||||
"""
|
||||
|
||||
tracer_vel = projection.interp_field(v,
|
||||
MB_pos,
|
||||
L_BOX,
|
||||
X_MIN,
|
||||
interp_order,
|
||||
use_jitted=True,
|
||||
)
|
||||
|
||||
tracer_vr = projection.project_radial(
|
||||
tracer_vel,
|
||||
MB_pos,
|
||||
jnp.zeros(3,)
|
||||
)
|
||||
|
||||
# Convert velocities to redshifts
|
||||
zco = utils.z_cos(r, omega_m)
|
||||
cz_pred = utils.speed_of_light * zco + (1 + zco) * tracer_vr
|
||||
delta_cz_sigv = (cz_pred - jnp.expand_dims(cz_obs, axis=1)) / sig_v
|
||||
|
||||
# Get p_r
|
||||
delta_mu = 5. * jnp.log10(r / jnp.expand_dims(r_hMpc,axis=1) * muA)
|
||||
|
||||
# Get los biased density
|
||||
los_density = projection.interp_field(
|
||||
MB_field,
|
||||
MB_pos,
|
||||
L_BOX,
|
||||
X_MIN,
|
||||
interp_order,
|
||||
use_jitted=True,
|
||||
)
|
||||
los_density = jnp.power(1. + los_density + bias_epsilon, alpha)
|
||||
|
||||
d2 = (delta_mu / sig_mu) ** 2
|
||||
best = jnp.amin(jnp.abs(d2), axis=1)
|
||||
|
||||
# Multiply p_r by arbitrary number for numerical stability (cancels in p_r / p_r_norm)
|
||||
d2 = d2 - jnp.expand_dims(jnp.nanmin(d2, axis=1), axis=1)
|
||||
p_r = r ** 2 * jnp.exp(-0.5 * d2) * los_density
|
||||
p_r_norm = jnp.expand_dims(jnp.trapz(p_r, r, axis=1), axis=1)
|
||||
|
||||
# Integrate to get likelihood
|
||||
d2 = delta_cz_sigv**2
|
||||
scale = jnp.nanmin(d2, axis=1)
|
||||
d2 = d2 - jnp.expand_dims(scale, axis=1)
|
||||
exp_delta_cz = jnp.exp(-0.5 * d2)
|
||||
p_cz = jnp.trapz(exp_delta_cz * p_r / p_r_norm, r, axis=1)
|
||||
lkl_ind = jnp.log(p_cz) - scale / 2 - 0.5 * jnp.log(2 * np.pi * sig_v**2)
|
||||
lkl = - lkl_ind.sum()
|
||||
|
||||
return lkl
|
||||
|
||||
|
||||
@borg.registerGravityBuilder
|
||||
|
|
|
@ -71,7 +71,7 @@ def borg_mock(s_hat, state, fwd_model, ini_file):
|
|||
fwd_model.getOutputBoxModel().L[0], smooth_R)
|
||||
|
||||
# Add bulk flow
|
||||
bulk_flow = np.array(ast.literal_eval(config['mock']['bulk_flow']))
|
||||
bulk_flow = np.array(ast.literal_eval(config['model']['bulk_flow']))
|
||||
output_vel = output_vel + bulk_flow.reshape((3, 1, 1, 1))
|
||||
|
||||
# Sample positions according to bias model
|
||||
|
@ -117,20 +117,20 @@ def borg_mock(s_hat, state, fwd_model, ini_file):
|
|||
np.array(fwd_model.getOutputBoxModel().xmin),
|
||||
interp_order
|
||||
)
|
||||
vr_true[i] = projection.project_radial(
|
||||
vr_true[i] = np.squeeze(projection.project_radial(
|
||||
tracer_vel,
|
||||
np.expand_dims(coord_true[i], axis=2),
|
||||
np.zeros(3,)
|
||||
)
|
||||
))
|
||||
|
||||
# Compute observed redshifts (including noise)
|
||||
sig_v = float(config['model']['sig_v'])
|
||||
cz_obs = [None] * nsamp
|
||||
for i in range(nsamp):
|
||||
rtrue = np.sqrt(np.sum(coord_true[i] ** 2, axis=1))
|
||||
rtrue = np.sqrt(np.sum(coord_true[i] ** 2, axis=0))
|
||||
zco = utils.z_cos(rtrue, cosmo.omega_m)
|
||||
cz_obs = utils.speed_of_light * zco + (1 + zco) * vr_true[i]
|
||||
cz_obs += np.random.normal(size=cz_obs.shape) * sig_v # CHECK THIS LINE!!!!!
|
||||
cz_obs[i] = utils.speed_of_light * zco + (1 + zco) * vr_true[i]
|
||||
cz_obs[i] += np.random.normal(size=cz_obs[i].shape) * sig_v # CHECK THIS LINE!!!!!
|
||||
|
||||
# Add observational systematic due to incorrect distance estimate
|
||||
# \mu -> \mu + 5 log10(A), or equivalently d -> A d
|
||||
|
|
|
@ -91,6 +91,6 @@ def get_radial_vectors(coord_meas):
|
|||
|
||||
# Get unit vectors along line of sight
|
||||
r_hat = jnp.array(SkyCoord(ra=RA*apu.deg, dec=DEC*apu.deg).cartesian.xyz)
|
||||
r_hat = jnp.expand_dims(r_hat, axis=0)
|
||||
r_hat = jnp.expand_dims(r_hat.T, axis=0)
|
||||
|
||||
return r_hat
|
|
@ -29,6 +29,7 @@ ares_heat = 1.0
|
|||
number_to_generate = 10
|
||||
random_ic = false
|
||||
init_random_scaling = 0.1
|
||||
bignum = 1e300
|
||||
|
||||
[model]
|
||||
gravity = lpt
|
||||
|
@ -42,6 +43,7 @@ sig_v = 150.
|
|||
R_lim = none
|
||||
Nint_points = 201
|
||||
Nsig = 10
|
||||
bulk_flow = [0.0, 0.0, 0.0]
|
||||
|
||||
[prior]
|
||||
omega_m = [0.1, 0.8]
|
||||
|
@ -73,7 +75,6 @@ NSAMP = 2
|
|||
|
||||
[mock]
|
||||
R_max = 100
|
||||
bulk_flow = [0.0, 0.0, 0.0]
|
||||
|
||||
[python]
|
||||
likelihood_path = /home/bartlett/fsigma8/borg_velocity/borg_velocity/likelihood.py
|
||||
|
|
Loading…
Add table
Reference in a new issue