942 lines
37 KiB
Python
942 lines
37 KiB
Python
import numpy as np
|
||
import aquila_borg as borg
|
||
import jax.numpy as jnp
|
||
import configparser
|
||
import warnings
|
||
import aquila_borg as borg
|
||
#import symbolic_pofk.linear
|
||
import jax
|
||
from jax import lax
|
||
import jaxlib
|
||
from functools import partial
|
||
import ast
|
||
import numbers
|
||
import h5py
|
||
import re
|
||
import os
|
||
|
||
import borg_velocity.utils as utils
|
||
from borg_velocity.utils import myprint, compute_As, get_sigma_bulk
|
||
import borg_velocity.forwards as forwards
|
||
import borg_velocity.mock_maker as mock_maker
|
||
import borg_velocity.projection as projection
|
||
from borg_velocity.samplers import HMCBiasSampler, derive_prior, MVSliceBiasSampler, BlackJaxBiasSampler, TransformedBlackJaxBiasSampler
|
||
import borg_velocity.samplers as samplers
|
||
|
||
class VelocityBORGLikelihood(borg.likelihood.BaseLikelihood):
|
||
"""
|
||
HADES likelihood for distance-tracers
|
||
"""
|
||
|
||
def __init__(self, fwd: borg.forward.BaseForwardModel, param_model: forwards.NullForward, fwd_vel: borg.forward.BaseForwardModel, ini_file: str) -> None:
|
||
"""
|
||
Initialises the VelocityBORGLikelihood class
|
||
|
||
Args:
|
||
- fwd (borg.forward.BaseForwardModel): The forward model to be used in the likelihood.
|
||
- param_model (forwards.NullForward): An empty forward model for storing model parameters.
|
||
- ini_file (str): The name of the ini file containing the model and borg parameters.
|
||
ADD FW_VEL HERE
|
||
"""
|
||
|
||
self.ini_file = ini_file
|
||
|
||
myprint("Reading from configuration file: " + ini_file)
|
||
config = configparser.ConfigParser()
|
||
config.read(ini_file)
|
||
|
||
# Grid parameters
|
||
self.N = [int(config['system'][f'N{i}']) for i in range(3)]
|
||
self.L = [float(config['system'][f'L{i}']) for i in range(3)]
|
||
|
||
# Catalogues
|
||
self.nsamp = int(config['run']['nsamp'])
|
||
assert self.nsamp > 0, "Need at least one sample to run"
|
||
self.Nt = [int(config[f'sample_{i}']['Nt']) for i in range(self.nsamp)]
|
||
self.alpha = [float(config[f'sample_{i}']['alpha']) for i in range(self.nsamp)]
|
||
self.lam = [float(config[f'sample_{i}']['lam']) for i in range(self.nsamp)]
|
||
self.muA = [float(config[f'sample_{i}']['muA']) for i in range(self.nsamp)]
|
||
self.frac_sig_rhMpc = [float(config[f'sample_{i}']['frac_sig_rhMpc']) for i in range(self.nsamp)]
|
||
|
||
# What type of run we're doing
|
||
self.run_type = config['run']['run_type']
|
||
|
||
# Seed if creating mocks
|
||
self.mock_seed = int(config['mock']['seed'])
|
||
self.R_max = float(config['mock']['r_max'])
|
||
|
||
# Model parameters
|
||
self.ai = float(config['model']['ai'])
|
||
self.af = float(config['model']['af'])
|
||
self.sig_v = float(config['model']['sig_v'])
|
||
if config['model']['R_lim'] == 'none':
|
||
self.R_lim = self.L[0]/2
|
||
else:
|
||
self.R_lim = float(config['model']['R_lim'])
|
||
self.Nint_points = int(config['model']['nint_points'])
|
||
self.Nsig = float(config['model']['Nsig'])
|
||
self.sig_v = float(config['model']['sig_v'])
|
||
self.smooth_R = float(config['model']['smooth_R'])
|
||
self.bias_epsilon = float(config['model']['bias_epsilon'])
|
||
self.interp_order = int(config['model']['interp_order'])
|
||
|
||
# Load bulk flow
|
||
self.bulk_flow = np.array(ast.literal_eval(config['model']['bulk_flow']))
|
||
|
||
# For log-likelihood values
|
||
self.bignum = float(config['mcmc']['bignum'])
|
||
|
||
myprint(f" Init {self.N}, {self.L}")
|
||
super().__init__(fwd, self.N, self.L)
|
||
|
||
# Define the forward models
|
||
self.fwd = fwd
|
||
self.fwd_param = param_model
|
||
self.fwd_vel = fwd_vel
|
||
|
||
# Initialise model parameters
|
||
self.model_params = {
|
||
**{f'mua{i}':self.muA[i] for i in range(self.nsamp)},
|
||
**{f'alpha{i}':self.alpha[i] for i in range(self.nsamp)},
|
||
**{f'lam{i}':self.lam[i] for i in range(self.nsamp)},
|
||
**{f'bulk_flow_{d}':self.bulk_flow[i] for i, d in enumerate(['x', 'y', 'z'])},
|
||
'sig_v':self.sig_v
|
||
}
|
||
self.fwd_param.setModelParams(self.model_params)
|
||
|
||
# Initialise cosmological parameters
|
||
cpar = utils.get_cosmopar(self.ini_file)
|
||
self.fwd.setCosmoParams(cpar)
|
||
self.fwd_param.setCosmoParams(cpar)
|
||
self.updateCosmology(cpar)
|
||
myprint(f"Original cosmological parameters: {self.fwd.getCosmoParams()}")
|
||
|
||
# Initialise derivative
|
||
self.grad_like = jax.grad(self.dens2like, argnums=(0,1))
|
||
|
||
# Find out what kind of run this is
|
||
if borg.EMBEDDED:
|
||
self.action = utils.get_action()
|
||
else:
|
||
self.action = None
|
||
|
||
|
||
def initializeLikelihood(self, state: borg.likelihood.MarkovState) -> None:
|
||
"""
|
||
Initialise the likelihood internal variables and MarkovState variables.
|
||
|
||
Args:
|
||
- state (borg.likelihood.MarkovState): The Markov state object to be used in the likelihood.
|
||
"""
|
||
|
||
myprint("Init likelihood")
|
||
|
||
# for i in range(self.nsamp):
|
||
# state.newArray1d(f"tracer_vr_{i}", self.Nt[i], True)
|
||
# if self.run_type != 'data':
|
||
# state.newArray1d(f"data_x_{i}", self.Nt[i], True)
|
||
# state.newArray1d(f"data_y_{i}", self.Nt[i], True)
|
||
# state.newArray1d(f"data_z_{i}", self.Nt[i], True)
|
||
# state.newArray1d(f"true_x_{i}", self.Nt[i], True)
|
||
# state.newArray1d(f"true_y_{i}", self.Nt[i], True)
|
||
# state.newArray1d(f"true_z_{i}", self.Nt[i], True)
|
||
# self.data = [state[f"tracer_vr_{i}"] for i in range(self.nsamp)]
|
||
state.newArray3d("BORG_final_density", *self.fwd.getOutputBoxModel().N, True)
|
||
|
||
if self.run_type == 'data':
|
||
self.loadObservedData(make_plot=False)
|
||
elif borg.EMBEDDED:
|
||
if self.action == 'INIT':
|
||
pass # Data will be loaded later
|
||
elif self.action == 'RESUME':
|
||
self.loadMockData(state) # load data from mock_data.h5
|
||
else:
|
||
myprint(f'Unknown action: {self.action}')
|
||
raise NotImplementedError
|
||
|
||
|
||
def updateMetaParameters(self, state: borg.likelihood.MarkovState) -> None:
|
||
"""
|
||
Update the meta parameters of the sampler (not sampled) from the MarkovState.
|
||
|
||
Args:
|
||
- state (borg.likelihood.MarkovState): The state object to be used in the likelihood.
|
||
|
||
"""
|
||
cpar = state['cosmology']
|
||
cpar.omega_q = 1. - cpar.omega_m - cpar.omega_k
|
||
self.fwd.setCosmoParams(cpar)
|
||
self.fwd_param.setCosmoParams(cpar)
|
||
|
||
|
||
def updateCosmology(self, cosmo: borg.cosmo.CosmologicalParameters) -> None:
|
||
"""
|
||
Updates the forward model's cosmological parameters with the given values.
|
||
|
||
Args:
|
||
- cosmo (borg.cosmo.CosmologicalParameters): The cosmological parameters.
|
||
|
||
"""
|
||
cpar = cosmo
|
||
|
||
# Convert sigma8 to As
|
||
cpar = compute_As(cpar)
|
||
# cpar.A_s = 1.e-9 * symbolic_pofk.linear.sigma8_to_As(
|
||
# cpar.sigma8, cpar.omega_m, cpar.omega_b, cpar.h, cpar.n_s)
|
||
myprint(f"Updating cosmology Om = {cosmo.omega_m}, sig8 = {cosmo.sigma8}, As = {cosmo.A_s}")
|
||
|
||
cpar.omega_q = 1. - cpar.omega_m - cpar.omega_k
|
||
self.fwd.setCosmoParams(cpar)
|
||
self.fwd_param.setCosmoParams(cpar)
|
||
|
||
|
||
def generateMBData(self) -> None:
|
||
"""
|
||
Generate points along the line of sight of each tracer to enable marginalisation
|
||
over distance uncertainty. The central distance is given such that the observed
|
||
redshift equals the cosmological redshift at this distance. The range is then
|
||
+/- Nsig * sig, where
|
||
sig^2 = (sig_v/100)^2 + sig_r^2
|
||
and sig_v is the velocity uncertainty in km/s
|
||
"""
|
||
|
||
self.MB_pos = [None] * self.nsamp
|
||
self.r_integration = [None] * self.nsamp
|
||
self.RA = [None] * self.nsamp
|
||
self.DEC = [None] * self.nsamp
|
||
|
||
for i in range(self.nsamp):
|
||
myprint(f"Making MB data for sample {i}")
|
||
|
||
# Get angular coordinates of all points
|
||
r_hat = projection.get_radial_vectors(self.coord_meas[i])
|
||
|
||
# Get min and max distance to integrate over
|
||
robs = self.cz_obs[i] / 100
|
||
sigr = np.sqrt((self.sig_v / 100) ** 2 + (self.frac_sig_rhMpc[i] * robs)**2)
|
||
rmin = robs - self.Nsig * sigr
|
||
rmin = rmin.at[rmin <= 0].set(self.L[0] / self.N[0] / 100.)
|
||
rmax = robs + self.Nsig * sigr
|
||
rmax = rmax.at[rmax > self.R_lim].set(self.R_lim)
|
||
|
||
# Compute coordinates of integration points
|
||
self.r_integration[i] = np.linspace(rmin, rmax, self.Nint_points)
|
||
cartesian_pos_MB = np.expand_dims(self.r_integration[i], axis=2) * r_hat
|
||
self.r_integration[i] = self.r_integration[i].T
|
||
self.MB_pos[i] = cartesian_pos_MB
|
||
self.MB_pos[i] = jnp.transpose(self.MB_pos[i], (2, 1, 0))
|
||
|
||
|
||
def generateMockData(self, s_hat: np.ndarray, state: borg.likelihood.MarkovState,) -> None:
|
||
"""
|
||
Generates mock data by simulating the forward model with the given white noise,
|
||
drawing distance tracers from the density field, computing their distance
|
||
moduli and radial velocities, and adding Gaussian noise to the appropriate
|
||
variables. Also calculates the initial negative log-likelihood of the data.
|
||
|
||
Args:
|
||
- s_hat (np.ndarray): The input (initial) density field.
|
||
- state (borg.likelihood.MarkovState): The Markov state object to be used in the likelihood.
|
||
- make_plot (bool, default=True): Whether to make diagnostic plots for the mock data generation
|
||
"""
|
||
|
||
if self.run_type == 'data':
|
||
raise NotImplementedError
|
||
elif self.run_type == 'velmass':
|
||
raise NotImplementedError
|
||
elif self.run_type == 'mock':
|
||
self.coord_true, self.coord_meas, self.sig_mu, self.vr_true, self.cz_obs = \
|
||
mock_maker.borg_mock(s_hat, state, self.fwd, self.fwd_vel, self.ini_file, seed=self.mock_seed)
|
||
else:
|
||
raise NotImplementedError
|
||
|
||
# Save mock to file
|
||
with h5py.File(f'tracer_data_{self.run_type}.h5', 'w') as h5file:
|
||
for i in range(self.nsamp):
|
||
sample_group = h5file.create_group(f'sample_{i}')
|
||
sample_group.create_dataset('coord_true', data=self.coord_true[i])
|
||
sample_group.create_dataset('coord_meas', data=self.coord_meas[i])
|
||
sample_group.create_dataset('sig_mu', data=self.sig_mu[i])
|
||
sample_group.create_dataset('vr_true', data=self.vr_true[i])
|
||
sample_group.create_dataset('cz_obs', data=self.cz_obs[i])
|
||
|
||
self.r_hMpc = [np.sqrt(np.sum(self.coord_meas[i] ** 2, axis=0)) for i in range(self.nsamp)]
|
||
|
||
# Check that the measured coordinates all lie within the box
|
||
for i in range(self.nsamp):
|
||
if np.amax(self.r_hMpc[i]) > self.R_lim:
|
||
raise ValueError('All tracers must have measured coordinates within R_lim')
|
||
|
||
self.generateMBData()
|
||
myprint('From mock')
|
||
self.saved_s_hat = s_hat.copy()
|
||
self.logLikelihoodComplex(s_hat, False)
|
||
|
||
|
||
def loadMockData(self, state: borg.likelihood.MarkovState) -> None:
|
||
|
||
myprint('Loading previously made mock data')
|
||
|
||
self.coord_true = [None] * self.nsamp
|
||
self.coord_meas = [None] * self.nsamp
|
||
self.sig_mu = [None] * self.nsamp
|
||
self.vr_true = [None] * self.nsamp
|
||
self.cz_obs = [None] * self.nsamp
|
||
self.r_hMpc = [None] * self.nsamp
|
||
|
||
with h5py.File(f'tracer_data_{self.run_type}.h5', 'r') as f:
|
||
for i in range(self.nsamp):
|
||
self.coord_true[i] = jnp.array(f[f'sample_{i}/coord_true'][:])
|
||
self.coord_meas[i] = jnp.array(f[f'sample_{i}/coord_meas'][:])
|
||
self.sig_mu[i] = f[f'sample_{i}/sig_mu'][()]
|
||
self.vr_true[i] = jnp.array(f[f'sample_{i}/vr_true'][:])
|
||
self.cz_obs[i] = jnp.array(f[f'sample_{i}/cz_obs'][:])
|
||
self.r_hMpc[i] = np.sqrt(np.sum(self.coord_meas[i] ** 2, axis=0))
|
||
|
||
# Check that the measured coordinates all lie within the box
|
||
for i in range(self.nsamp):
|
||
if np.amax(self.r_hMpc[i]) > self.R_lim:
|
||
raise ValueError('All tracers must have measured coordinates within R_lim')
|
||
|
||
self.generateMBData()
|
||
|
||
|
||
def dens2like(self, output_density: np.ndarray, output_velocity: np.ndarray):
|
||
"""
|
||
Given stored distance tracer data, computes the negative log-likelihood of the data
|
||
for this final density field.
|
||
|
||
Args:
|
||
output_density (np.ndarray): The z=0 density field.
|
||
Return:
|
||
lkl (float): The negative log-likelihood of the data.
|
||
"""
|
||
|
||
lkl = 0
|
||
|
||
sig_v = self.model_params['sig_v']
|
||
|
||
# Compute velocity field
|
||
self.bulk_flow = jnp.array([self.model_params['bulk_flow_x'],
|
||
self.model_params['bulk_flow_y'],
|
||
self.model_params['bulk_flow_z']])
|
||
v = output_velocity + self.bulk_flow.reshape((3, 1, 1, 1))
|
||
|
||
omega_m = self.fwd.getCosmoParams().omega_m
|
||
|
||
# self.lkl_ind = [None] * self.nsamp
|
||
|
||
# global temp_lkl_ind
|
||
|
||
for i in range(self.nsamp):
|
||
muA = self.model_params[f'mua{i}']
|
||
alpha = self.model_params[f'alpha{i}']
|
||
lam = self.model_params[f'lam{i}']
|
||
lkl += vel2like(
|
||
self.cz_obs[i],
|
||
v,
|
||
output_density,
|
||
self.MB_pos[i],
|
||
self.r_integration[i],
|
||
self.r_hMpc[i],
|
||
self.sig_mu[i],
|
||
sig_v,
|
||
omega_m,
|
||
muA,
|
||
alpha,
|
||
lam,
|
||
self.L[0],
|
||
jnp.array(self.fwd.getOutputBoxModel().xmin),
|
||
self.interp_order,
|
||
self.bias_epsilon,
|
||
self.R_max
|
||
)
|
||
|
||
# self.lkl_ind[i] = temp_lkl_ind.copy()
|
||
|
||
# Add in bulk flow prior
|
||
# sigma_bulk = get_sigma_bulk(self.L[0], self.fwd.getCosmoParams())
|
||
# lkl += jnp.sum(0.5 * jnp.log(2 * np.pi) + jnp.log(sigma_bulk) + self.bulk_flow ** 2 / 2. / sigma_bulk ** 2)
|
||
|
||
# lkl = jnp.clip(lkl, -self.bignum, self.bignum)
|
||
lkl = lax.cond(
|
||
jnp.isfinite(lkl),
|
||
lambda _: lkl, # If True (finite), return lkl
|
||
lambda _: self.bignum, # If False (not finite), return self.bignum
|
||
operand=None # No additional operand needed here
|
||
)
|
||
|
||
return lkl
|
||
|
||
def logLikelihoodComplex(self, s_hat: np.ndarray, gradientIsNext: bool, skip_density: bool=False, update_from_model: bool=True):
|
||
"""
|
||
Calculates the negative log-likelihood of the data.
|
||
|
||
Args:
|
||
- s_hat (np.ndarray): The input white noise.
|
||
- gradientIsNext (bool): If True, prepares the forward model for gradient calculations.
|
||
- skip_density (bool, default=False): If True, do not repeat the s_hat -> density, velocity computation
|
||
and use the stored result
|
||
- update_from_model (bool, default=True): If True, update self.model_params with self.fwd_param.getModelParam
|
||
|
||
Returns:
|
||
The negative log-likelihood value.
|
||
|
||
"""
|
||
|
||
N = self.fwd.getBoxModel().N[0]
|
||
L = self.fwd.getOutputBoxModel().L[0]
|
||
|
||
if update_from_model:
|
||
for k in self.model_params.keys():
|
||
self.model_params[k] = self.fwd_param.getModelParam('nullforward', k)
|
||
self.updateCosmology(self.fwd.getCosmoParams()) # for sigma8 -> As
|
||
|
||
# fname = f'{os.getcwd()}/s_hat.npy'
|
||
# myprint(f'Saving s_hat field to {fname}')
|
||
# np.save(fname, s_hat)
|
||
# myprint('Done')
|
||
|
||
if not skip_density:
|
||
# Run BORG density field
|
||
output_density = np.zeros((N,N,N))
|
||
self.fwd.forwardModel_v2(s_hat)
|
||
self.fwd.getDensityFinal(output_density)
|
||
|
||
# Get velocity field
|
||
output_velocity = self.fwd_vel.getVelocityField()
|
||
else:
|
||
output_density = self.delta.copy()
|
||
output_velocity = self.vel.copy()
|
||
|
||
L = self.dens2like(output_density, output_velocity)
|
||
if isinstance(L, numbers.Number) or isinstance(L, jaxlib.xla_extension.ArrayImpl):
|
||
myprint(f"var(s_hat): {np.var(s_hat)}, Call to logLike: {L}")
|
||
myprint(self.model_params)
|
||
myprint(self.fwd.getCosmoParams())
|
||
|
||
self.delta = output_density.copy()
|
||
self.vel = output_velocity.copy()
|
||
|
||
return L
|
||
|
||
|
||
def gradientLikelihoodComplex(self, s_hat: np.ndarray):
|
||
"""
|
||
Calculates the adjoint negative log-likelihood of the data.
|
||
|
||
Args:
|
||
- s_hat (np.ndarray): The input density field.
|
||
|
||
Returns:
|
||
The adjoint negative log-likelihood gradient.
|
||
|
||
"""
|
||
|
||
N = self.fwd.getBoxModel().N[0]
|
||
L = self.fwd.getOutputBoxModel().L[0]
|
||
|
||
# Run BORG density field
|
||
output_density = np.zeros((N,N,N))
|
||
self.fwd.forwardModel_v2(s_hat)
|
||
self.fwd.getDensityFinal(output_density)
|
||
|
||
# Get velocity field
|
||
output_velocity = self.fwd_vel.getVelocityField()
|
||
|
||
# getlike(dens, vel)
|
||
mygradient, velgradient = self.grad_like(output_density, output_velocity)
|
||
|
||
mygradient = np.array(mygradient, dtype=np.float64)
|
||
vgradient = np.array(velgradient, dtype=np.float64)
|
||
|
||
self.fwd_vel.computeAdjointModel(vgradient)
|
||
self.fwd.adjointModel_v2(mygradient)
|
||
mygrad_hat = np.zeros(s_hat.shape, dtype=np.complex128)
|
||
self.fwd.getAdjointModel(mygrad_hat)
|
||
self.fwd.clearAdjointGradient()
|
||
|
||
return mygrad_hat
|
||
|
||
|
||
def commitAuxiliaryFields(self, state: borg.likelihood.MarkovState) -> None:
|
||
"""
|
||
Commits the final density field to the Markov state.
|
||
Args:
|
||
- state (borg.state.State): The state object containing the final density field.
|
||
"""
|
||
self.updateCosmology(self.fwd.getCosmoParams())
|
||
self.dens2like(self.delta, self.vel)
|
||
state["BORG_final_density"][:] = self.delta
|
||
|
||
|
||
# @partial(jax.jit, static_argnames=['L_BOX', 'interp_order', 'bias_epsilon'])
|
||
def vel2like(cz_obs, v, MB_field, MB_pos, r, r_hMpc, sig_mu, sig_v, omega_m, muA, alpha, lam,
|
||
L_BOX, X_MIN, interp_order, bias_epsilon, R_max):
|
||
"""
|
||
Jitted part of dens2like
|
||
"""
|
||
|
||
tracer_vel = projection.interp_field(v,
|
||
MB_pos,
|
||
L_BOX,
|
||
X_MIN,
|
||
interp_order,
|
||
use_jitted=True,
|
||
)
|
||
|
||
tracer_vr = projection.project_radial(
|
||
tracer_vel,
|
||
MB_pos,
|
||
jnp.zeros(3,)
|
||
)
|
||
|
||
# Remove the velocity term
|
||
# tracer_vr = jnp.zeros(tracer_vr.shape)
|
||
|
||
# Convert velocities to redshifts
|
||
zco = utils.z_cos(r, omega_m)
|
||
cz_pred = utils.speed_of_light * zco + (1 + zco) * tracer_vr
|
||
delta_cz_sigv = (cz_pred - jnp.expand_dims(cz_obs, axis=1)) / sig_v
|
||
|
||
# Get p_r
|
||
delta_mu = 5. * jnp.log10(r / jnp.expand_dims(r_hMpc,axis=1) * muA)
|
||
|
||
# Get los biased density
|
||
los_density = projection.interp_field(
|
||
MB_field,
|
||
MB_pos,
|
||
L_BOX,
|
||
X_MIN,
|
||
interp_order,
|
||
use_jitted=True,
|
||
)
|
||
los_density = jax.nn.relu(1. + los_density)
|
||
los_density = jnp.power(los_density + bias_epsilon, alpha)
|
||
|
||
# Remove bias term
|
||
# los_density = jnp.ones(los_density.shape)
|
||
|
||
d2 = (delta_mu / sig_mu) ** 2
|
||
best = jnp.amin(jnp.abs(d2), axis=1)
|
||
|
||
# Multiply p_r by arbitrary number for numerical stability (cancels in p_r / p_r_norm)
|
||
d2 = d2 - jnp.expand_dims(jnp.nanmin(d2, axis=1), axis=1)
|
||
p_r = r ** 2 * jnp.exp(-0.5 * d2) * los_density * jnp.exp(- lam * r / R_max)
|
||
p_r_norm = jnp.expand_dims(jnp.trapezoid(p_r, r, axis=1), axis=1)
|
||
|
||
# Integrate to get likelihood
|
||
d2 = delta_cz_sigv**2
|
||
scale = jnp.nanmin(d2, axis=1)
|
||
d2 = d2 - jnp.expand_dims(scale, axis=1)
|
||
exp_delta_cz = jnp.exp(-0.5 * d2)
|
||
|
||
p_cz = jnp.trapezoid(exp_delta_cz * p_r / p_r_norm, r, axis=1)
|
||
lkl_ind = jnp.log(p_cz) - scale / 2 - 0.5 * jnp.log(2 * np.pi * sig_v**2)
|
||
lkl = - lkl_ind.sum()
|
||
|
||
# global temp_lkl_ind
|
||
# temp_lkl_ind = lkl_ind
|
||
|
||
return lkl
|
||
|
||
|
||
@derive_prior
|
||
def transform_mua(x):
|
||
a, b = model_params_prior['mua']
|
||
return samplers.transform_uniform(x, a, b)
|
||
|
||
def inv_transform_mua(alpha):
|
||
a, b = model_params_prior['mua']
|
||
return samplers.inv_transform_uniform(alpha, a, b)
|
||
|
||
@derive_prior
|
||
def transform_alpha(x):
|
||
a, b = model_params_prior['alpha']
|
||
return samplers.transform_uniform(x, a, b)
|
||
|
||
def inv_transform_alpha(alpha):
|
||
a, b = model_params_prior['alpha']
|
||
return samplers.inv_transform_uniform(alpha, a, b)
|
||
|
||
@derive_prior
|
||
def transform_lam(x):
|
||
a, b = model_params_prior['lam']
|
||
return samplers.transform_uniform(x, a, b)
|
||
|
||
def inv_transform_lam(lam):
|
||
a, b = model_params_prior['lam']
|
||
return samplers.inv_transform_uniform(lam, a, b)
|
||
|
||
@derive_prior
|
||
def transform_sig_v(x):
|
||
a, b = model_params_prior['sig_v']
|
||
return samplers.transform_uniform(x, a, b)
|
||
|
||
def inv_transform_sig_v(alpha):
|
||
a, b = model_params_prior['sig_v']
|
||
return samplers.inv_transform_uniform(alpha, a, b)
|
||
|
||
@derive_prior
|
||
def transform_bulk_flow(x):
|
||
a, b = model_params_prior['bulk_flow']
|
||
return samplers.transform_uniform(x, a, b)
|
||
|
||
def inv_transform_bulk_flow(alpha):
|
||
a, b = model_params_prior['bulk_flow']
|
||
return samplers.inv_transform_uniform(alpha, a, b)
|
||
|
||
|
||
@borg.registerGravityBuilder
|
||
def build_gravity_model(state: borg.likelihood.MarkovState, box: borg.forward.BoxModel, ini_file=None) -> borg.forward.BaseForwardModel:
|
||
"""
|
||
Builds the gravity model and returns the forward model chain.
|
||
|
||
Args:
|
||
- state (borg.likelihood.MarkovState): The Markov state object to be used in the likelihood.
|
||
- box (borg.forward.BoxModel): The input box model.
|
||
- ini_file (str, default=None): The location of the ini file. If None, use borg.getIniConfigurationFilename()
|
||
|
||
Returns:
|
||
borg.forward.BaseForwardModel: The forward model.
|
||
|
||
"""
|
||
|
||
global chain, fwd_param, fwd_vel
|
||
myprint("Building gravity model")
|
||
|
||
if ini_file is None:
|
||
myprint("Reading from configuration file: " + borg.getIniConfigurationFilename())
|
||
config = configparser.ConfigParser()
|
||
config.read(borg.getIniConfigurationFilename())
|
||
else:
|
||
myprint("Reading from configuration file: " + ini_file)
|
||
config = configparser.ConfigParser()
|
||
config.read(ini_file)
|
||
ai = float(config['model']['ai'])
|
||
af = float(config['model']['af'])
|
||
supersampling = int(config['model']['supersampling'])
|
||
|
||
if config['model']['gravity'] in ['pm', 'cola']:
|
||
forcesampling = int(config['model']['forcesampling'])
|
||
|
||
# Setup forward model
|
||
chain = borg.forward.ChainForwardModel(box)
|
||
chain.addModel(borg.forward.models.HermiticEnforcer(box))
|
||
|
||
# CLASS transfer function
|
||
chain @= borg.forward.model_lib.M_PRIMORDIAL_AS(box)
|
||
transfer_class = borg.forward.model_lib.M_TRANSFER_CLASS(box, opts=dict(a_transfer=1.0))
|
||
transfer_class.setModelParams({"extra_class_arguments":{'YHe':'0.24'}})
|
||
chain @= transfer_class
|
||
|
||
if config['model']['gravity'] == 'linear':
|
||
raise NotImplementedError(config['model']['gravity'])
|
||
elif config['model']['gravity'] == 'lpt':
|
||
mod = borg.forward.model_lib.M_LPT_CIC(
|
||
box,
|
||
opts=dict(a_initial=af,
|
||
a_final=af,
|
||
do_rsd=False,
|
||
supersampling=supersampling,
|
||
lightcone=False,
|
||
part_factor=1.01,))
|
||
elif config['model']['gravity'] == '2lpt':
|
||
mod = borg.forward.model_lib.M_2LPT_CIC(
|
||
box,
|
||
opts=dict(a_initial=af,
|
||
a_final=af,
|
||
do_rsd=False,
|
||
supersampling=supersampling,
|
||
lightcone=False,
|
||
part_factor=1.01,))
|
||
elif config['model']['gravity'] == 'pm':
|
||
mod = borg.forward.model_lib.M_PM_CIC(
|
||
box,
|
||
opts=dict(a_initial=af,
|
||
a_final=af,
|
||
do_rsd=False,
|
||
supersampling=supersampling,
|
||
part_factor=1.01,
|
||
forcesampling=forcesampling,
|
||
pm_start_z=1/ai - 1,
|
||
pm_nsteps=int(config['model']['nsteps']),
|
||
tcola=False))
|
||
elif config['model']['gravity'] == 'cola':
|
||
mod = borg.forward.model_lib.M_PM_CIC(
|
||
box,
|
||
opts=dict(a_initial=af,
|
||
a_final=af,
|
||
do_rsd=False,
|
||
supersampling=supersampling,
|
||
part_factor=1.01,
|
||
forcesampling=forcesampling,
|
||
pm_start_z=1/ai - 1,
|
||
pm_nsteps=int(config['model']['nsteps']),
|
||
tcola=True))
|
||
else:
|
||
raise NotImplementedError(config['model']['gravity'])
|
||
|
||
mod.accumulateAdjoint(True)
|
||
chain @= mod
|
||
|
||
# Cosmological parameters
|
||
if ini_file is None:
|
||
cpar = utils.get_cosmopar(borg.getIniConfigurationFilename())
|
||
else:
|
||
cpar = utils.get_cosmopar(ini_file)
|
||
chain.setCosmoParams(cpar)
|
||
|
||
# This is the forward model for the model parameters
|
||
fwd_param = borg.forward.ChainForwardModel(box)
|
||
mod_null = forwards.NullForward(box)
|
||
fwd_param.addModel(mod_null)
|
||
fwd_param.setCosmoParams(cpar)
|
||
|
||
# This is the forward model for velocity
|
||
velmodel_name = config['model']['velocity']
|
||
velmodel = getattr(borg.forward.velocity, velmodel_name)
|
||
if velmodel_name == 'LinearModel':
|
||
fwd_vel = velmodel(box, mod, af)
|
||
elif velmodel_name == 'CICModel':
|
||
rsmooth = float(config['model']['rsmooth']) # Mpc/h
|
||
fwd_vel = velmodel(box, mod, rsmooth)
|
||
else:
|
||
fwd_vel = velmodel(box, mod)
|
||
|
||
return chain
|
||
|
||
_glob_model = None
|
||
_glob_cosmo = None
|
||
begin_model = None
|
||
begin_cosmo = None
|
||
model_params_prior = {}
|
||
|
||
def check_model_sampling(loop):
|
||
return loop.getStepID() > begin_model
|
||
|
||
def check_cosmo_sampling(loop):
|
||
return loop.getStepID() > begin_cosmo
|
||
|
||
|
||
@borg.registerSamplerBuilder
|
||
def build_sampler(
|
||
state: borg.likelihood.MarkovState,
|
||
info: borg.likelihood.LikelihoodInfo,
|
||
loop: borg.samplers.MainLoop
|
||
):
|
||
"""
|
||
Builds the sampler and returns it.
|
||
Which parameters to sample are given in the ini file.
|
||
We assume all parameters are NOT meant to be sampled, unless we find "XX_sampler_blocked = false" in the ini file
|
||
|
||
Args:
|
||
- state (borg.likelihood.MarkovState): The Markov state object to be used in the likelihood.
|
||
- info (borg.likelihood.LikelihoodInfo): The likelihood information.
|
||
ADD LOOP - DEFINE CONDITIONS AND DEPENDENCIES
|
||
|
||
Returns:
|
||
List of samplers to use.
|
||
|
||
"""
|
||
global _glob_model, _glob_cosmo, begin_model, begin_cosmo, model_params_prior
|
||
borg.print_msg(borg.Level.std, "Hello sampler, loop is {l}, step_id={s}", l=loop, s=loop.getStepID())
|
||
|
||
myprint("Building sampler")
|
||
|
||
myprint("Reading from configuration file: " + borg.getIniConfigurationFilename())
|
||
config = configparser.ConfigParser()
|
||
config.read(borg.getIniConfigurationFilename())
|
||
end = '_sampler_blocked'
|
||
to_sample = [k[:-len(end)] for (k, v) in config['block_loop'].items() if k[-len(end):] == end and v.lower() == 'false']
|
||
myprint(f'Parameters to sample: {to_sample}')
|
||
nsamp = int(config['run']['nsamp'])
|
||
|
||
all_sampler = []
|
||
|
||
# Cosmology sampler arguments
|
||
prefix = ""
|
||
params = []
|
||
initial_values = {}
|
||
prior = {}
|
||
for p in ["omega_m", "sigma8"]:
|
||
if p not in to_sample:
|
||
continue
|
||
if p in config['prior'].keys() and p in config['cosmology'].keys():
|
||
myprint(f'Adding {p} sampler')
|
||
params.append(f"cosmology.{p}")
|
||
initial_values[f"cosmology.{p}"] = float(config['cosmology'][p])
|
||
prior[f"cosmology.{p}"] = np.array(ast.literal_eval(config['prior'][p]))
|
||
else:
|
||
s = f'Could not find {p} prior and/or default, so will not sample'
|
||
warnings.warn(s, stacklevel=2)
|
||
# Remove for later to prevent duplication
|
||
to_sample.remove(p)
|
||
|
||
begin_cosmo = int(config['mcmc']['warmup_cosmo'])
|
||
|
||
if len(params) > 0:
|
||
myprint('Adding cosmological parameter sampler')
|
||
cosmo_sampler = borg.samplers.ModelParamsSampler(prefix, params, likelihood, chain, initial_values, prior)
|
||
cosmo_sampler.setName("cosmo_sampler")
|
||
_glob_cosmo = cosmo_sampler
|
||
all_sampler.append(cosmo_sampler)
|
||
loop.push(cosmo_sampler)
|
||
loop.addToConditionGroup("warmup_cosmo", "cosmo_sampler")
|
||
loop.addConditionToConditionGroup("warmup_cosmo", partial(check_cosmo_sampling, loop))
|
||
|
||
# Model parameter sampler
|
||
prefix = ""
|
||
params = []
|
||
initial_values = {}
|
||
prior = {}
|
||
transform_attributes = []
|
||
inv_transform_attributes = []
|
||
|
||
for p in to_sample:
|
||
if p in config['prior'].keys():
|
||
if p == 'sig_v':
|
||
myprint(f'Adding {p} sampler')
|
||
params.append(p)
|
||
initial_values[p] = float(config['model'][p])
|
||
if 'inf' in config['prior'][p]:
|
||
x = ast.literal_eval(config['prior'][p].replace('inf', '"inf"'))
|
||
prior[p] = np.array([xx if xx != 'inf' else np.inf for xx in x])
|
||
else:
|
||
prior[p] = np.array(ast.literal_eval(config['prior'][p]))
|
||
model_params_prior[p] = prior[p]
|
||
transform_attributes.append(globals().get(f'transform_{p}'))
|
||
inv_transform_attributes.append(globals().get(f'inv_transform_{p}'))
|
||
elif p == 'bulk_flow':
|
||
for i, d in enumerate(['_x', '_y', '_z']):
|
||
myprint(f'Adding {p}{d} sampler')
|
||
params.append(f'{p}{d}')
|
||
initial_values[f'{p}{d}'] = np.array(ast.literal_eval(config['model']['bulk_flow']))[i]
|
||
if 'inf' in config['prior'][p]:
|
||
x = ast.literal_eval(config['prior'][p].replace('inf', '"inf"'))
|
||
prior[f'{p}{d}'] = np.array([xx if xx != 'inf' else np.inf for xx in x])
|
||
else:
|
||
prior[f'{p}{d}'] = np.array(ast.literal_eval(config['prior'][p]))
|
||
transform_attributes.append(globals().get(f'transform_{p}'))
|
||
inv_transform_attributes.append(globals().get(f'inv_transform_{p}'))
|
||
model_params_prior[p] = prior[f'{p}_x']
|
||
else:
|
||
for i in range(nsamp):
|
||
myprint(f'Adding {p}{i} sampler')
|
||
params.append(f'{p}{i}')
|
||
initial_values[f'{p}{i}'] = float(config[f'sample_{i}'][p])
|
||
if 'inf' in config['prior'][p]:
|
||
x = ast.literal_eval(config['prior'][p].replace('inf', '"inf"'))
|
||
prior[f'{p}{i}'] = np.array([xx if xx != 'inf' else np.inf for xx in x])
|
||
else:
|
||
prior[f'{p}{i}'] = np.array(ast.literal_eval(config['prior'][p]))
|
||
transform_attributes.append(globals().get(f'transform_{p}'))
|
||
inv_transform_attributes.append(globals().get(f'inv_transform_{p}'))
|
||
model_params_prior[p] = prior[f'{p}{0}']
|
||
else:
|
||
s = f'Could not find {p} prior, so will not sample'
|
||
warnings.warn(s, stacklevel=2)
|
||
|
||
begin_model = int(config['mcmc']['warmup_model'])
|
||
|
||
if len(params) > 0:
|
||
myprint('Adding model parameter sampler')
|
||
if config['sampling']['algorithm'].lower() == 'slice':
|
||
model_sampler = borg.samplers.ModelParamsSampler(prefix, params, likelihood, fwd_param, initial_values, prior)
|
||
elif config['sampling']['algorithm'].lower() == 'hmc':
|
||
model_sampler = HMCBiasSampler(
|
||
"model_params",
|
||
likelihood,
|
||
params,
|
||
transform_attributes=transform_attributes,
|
||
inv_transform_attributes=inv_transform_attributes,
|
||
prior = [p.prior for p in transform_attributes],
|
||
Nsteps = int(config['sampling']['nsteps']),
|
||
epsilon = float(config['sampling']['epsilon']),
|
||
refresh = float(config['sampling']['refresh']),
|
||
)
|
||
with open('model_params.txt', 'w') as file:
|
||
for item in params:
|
||
file.write(f"{item}\n")
|
||
elif config['sampling']['algorithm'].lower() == 'mvslice':
|
||
lam = [None] * len(params)
|
||
for i, p in enumerate(params):
|
||
if p.startswith('bulk_flow'):
|
||
lam[i] = float(config['sampling'][f'mvlam_bulk_flow'])
|
||
elif p[-1].isdigit():
|
||
# Search for a non-digit before end of reversed string
|
||
match = re.search(r'\D(?=\d*$)', p[::-1])
|
||
lam[i] = float(config['sampling'][f'mvlam_{p[:match.start()]}'])
|
||
else:
|
||
lam[i] = float(config['sampling'][f'mvlam_{p}'])
|
||
model_sampler = MVSliceBiasSampler(
|
||
"model_params",
|
||
likelihood,
|
||
params,
|
||
lam
|
||
)
|
||
with open('model_params.txt', 'w') as file:
|
||
for item in params:
|
||
file.write(f"{item}\n")
|
||
elif config['sampling']['algorithm'].lower() == 'blackjax':
|
||
# state.newArray1d("inverse_mass_matrix", len(params), True)
|
||
# state.newScalar('step_size', 0., True)
|
||
model_sampler = BlackJaxBiasSampler(
|
||
"model_params",
|
||
likelihood,
|
||
params,
|
||
int(config['sampling']['rng_seed']),
|
||
int(config['sampling']['warmup_nsteps']),
|
||
float(config['sampling']['warmup_target_acceptance_rate']),
|
||
)
|
||
with open('model_params.txt', 'w') as file:
|
||
for item in params:
|
||
file.write(f"{item}\n")
|
||
elif config['sampling']['algorithm'].lower() == 'transformedblackjax':
|
||
model_sampler = TransformedBlackJaxBiasSampler(
|
||
"model_params",
|
||
likelihood,
|
||
params,
|
||
transform_attributes=transform_attributes,
|
||
inv_transform_attributes=inv_transform_attributes,
|
||
prior = [p.prior for p in transform_attributes],
|
||
rng_seed = int(config['sampling']['rng_seed']),
|
||
warmup_nsteps = int(config['sampling']['warmup_nsteps']),
|
||
warmup_target_acceptance_rate = float(config['sampling']['warmup_target_acceptance_rate']),
|
||
)
|
||
with open('model_params.txt', 'w') as file:
|
||
for item in params:
|
||
file.write(f"{item}\n")
|
||
else:
|
||
raise NotImplementedError
|
||
model_sampler.setName("model_sampler")
|
||
_glob_model = model_sampler
|
||
loop.push(model_sampler)
|
||
all_sampler.append(model_sampler)
|
||
loop.addToConditionGroup("warmup_model", "model_sampler")
|
||
loop.addConditionToConditionGroup("warmup_model", partial(check_model_sampling, loop))
|
||
myprint('Done')
|
||
|
||
return []
|
||
|
||
|
||
@borg.registerLikelihoodBuilder
|
||
def build_likelihood(state: borg.likelihood.MarkovState, info: borg.likelihood.LikelihoodInfo) -> borg.likelihood.BaseLikelihood:
|
||
"""
|
||
Builds the likelihood object and returns it.
|
||
|
||
Args:
|
||
- state (borg.likelihood.MarkovState): The Markov state object to be used in the likelihood.
|
||
- info (borg.likelihood.LikelihoodInfo): The likelihood information.
|
||
|
||
Returns:
|
||
borg.likelihood.BaseLikelihood: The likelihood object.
|
||
|
||
"""
|
||
global likelihood, fwd_param
|
||
myprint("Building likelihood")
|
||
myprint(chain.getCosmoParams())
|
||
boxm = chain.getBoxModel()
|
||
likelihood = VelocityBORGLikelihood(chain, fwd_param, fwd_vel, borg.getIniConfigurationFilename())
|
||
return likelihood
|
||
|