Initial import

This commit is contained in:
Guilhem Lavaux 2023-05-29 10:41:03 +02:00
commit 56a50eead3
820 changed files with 192077 additions and 0 deletions

31
extra/hmclet/README.md Normal file
View file

@ -0,0 +1,31 @@
# README #
This is HMCLET: a small extra HMC framework for ARES to allow sampling a bunch of model parameters together. It provides a self calibration step to estimate
the masses for the HMC.
### What is this repository for? ###
* Quick summary
* Version
### How do I get set up? ###
To include HMCLET please do a git clone of this repository in the `extra/` subfolder of ARES. For example:
cd $ARES/extra
git clone git@bitbucket.org:bayesian_lss_team/hmclet.git hmclet
Finally you can go to your build directory run `cmake .` to refresh the detected modules and run "make" again to finish the building
with HMClet included.
You can run `libLSS/tests/test_hmclet` to check that no error is triggered and verify the content of "test_sample.h5". It must contain a chain with 2 parameters
for which the first one oscillates around 1 with a variance of 10, and the other oscillates around 4 with a variance of 2.
### Contribution guidelines ###
You can submit pull requests to the BLSS team admin.
### Who do I talk to? ###
Guilhem Lavaux or Jens Jasche.
Check [our website](https://aquila-consortium.org/people.html) and the [wiki](https://aquila-consortium.org/wiki/).

View file

@ -0,0 +1,358 @@
[system]
console_output=logares.txt
mask_precision=0.1
VERBOSE_LEVEL = 2
N0 = 32
N1 = 32
N2 = 32
L0 = 677.7
L1 = 677.7
L2 = 677.7
corner0 = -338.85
corner1 = -338.85
corner2 = -338.85
NUM_MODES=100
N_MC=1000
test_mode=true
# If true, the initial power spectrum of the chain is set to the cosmological one
seed_cpower=true
bias_0_sampler_generic_blocked=false
bias_1_sampler_generic_blocked=false
bias_2_sampler_generic_blocked=false
bias_3_sampler_generic_blocked=false
bias_4_sampler_generic_blocked=true
# Indicate which samplers should be blocked for testing purposes
[block_loop]
# Indicate which samplers should be blocked for testing purposes
#messenger_signal_blocked=false
power_sampler_a_blocked=true
power_sampler_b_blocked=true
power_sampler_c_blocked=true
#bias_sampler_blocked=false
hades_sampler_blocked=false
ares_heat=1.0
[gravity]
#model=HADES_PT
model=LPT_CIC
supersampling=2
forcesampling=2
pm_nsteps=30
pm_start_z=69
lightcone=false
do_rsd=false
[mcmc]
number_to_generate=100
random_ic=false
init_random_scaling=1.0
[julia]
likelihood_path=test_like.jl
likelihood_module=julia_test
bias_sampler_type=hmclet
ic_in_julia=true
#hmclet_matrix=QN_DIAGONAL
hmclet_matrix=DIAGONAL
hmclet_frozen=true
#hmclet_burnin=400
#hmclet_burnin_memory=50
hmclet_maxEpsilon=0.1
#hmclet_maxEpsilon=0.01
hmclet_maxNtime=100
hmclet_massScale = 0
#hmclet_correlationLimiter=0.5
[hades]
likelihood=GENERIC_POISSON_BROKEN_POWERLAW_BIAS
#likelihood=BORG_POISSON
algorithm=HMC
max_epsilon=0.01
max_timesteps=50
mixing=1
[run]
NCAT = 1
[cosmology]
fnl = 0
omega_r = 0
omega_k = 0
omega_m = 0.3175
omega_b = 0.049
omega_q = 0.6825
w = -1
wprime = 0
n_s = 0.9624
sigma8 = 0.8344
h100 = 0.6711
beta = 1.5
z0 = 0
# 11.5 mag cut
[catalog_0]
datafile = 2MPP.txt
maskdata = completeness_11_5.fits.gz
#maskdata = one.fits
#maskdata = zero.fits
bias=100,1,0,0.005
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 9
galaxy_faint_apparent_magnitude_cut = 11.5
galaxy_bright_absolute_magnitude_cut = -21.50
galaxy_faint_absolute_magnitude_cut = -21.00
refbias = false
nmean=1
[catalog_1]
datafile = 2MPP.txt
maskdata = completeness_11_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 9
galaxy_faint_apparent_magnitude_cut = 11.5
galaxy_bright_absolute_magnitude_cut = -22.00
galaxy_faint_absolute_magnitude_cut = -21.50
refbias = false
nmean=1
[catalog_2]
datafile = 2MPP.txt
maskdata = completeness_11_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 9
galaxy_faint_apparent_magnitude_cut = 11.5
galaxy_bright_absolute_magnitude_cut = -22.50
galaxy_faint_absolute_magnitude_cut = -22.00
refbias = false
nmean=1
[catalog_3]
datafile = 2MPP.txt
maskdata = completeness_11_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 9
galaxy_faint_apparent_magnitude_cut = 11.5
galaxy_bright_absolute_magnitude_cut = -23.00
galaxy_faint_absolute_magnitude_cut = -22.50
refbias = false
nmean=1
[catalog_4]
datafile = 2MPP.txt
maskdata = completeness_11_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 9
galaxy_faint_apparent_magnitude_cut = 11.5
galaxy_bright_absolute_magnitude_cut = -23.50
galaxy_faint_absolute_magnitude_cut = -23.00
refbias = false
nmean=1
[catalog_5]
datafile = 2MPP.txt
maskdata = completeness_11_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 9
galaxy_faint_apparent_magnitude_cut = 11.5
galaxy_bright_absolute_magnitude_cut = -24.00
galaxy_faint_absolute_magnitude_cut = -23.50
refbias = false
nmean=1
[catalog_6]
datafile = 2MPP.txt
maskdata = completeness_11_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 9
galaxy_faint_apparent_magnitude_cut = 11.5
galaxy_bright_absolute_magnitude_cut = -24.50
galaxy_faint_absolute_magnitude_cut = -24.00
refbias = false
nmean=1
[catalog_7]
datafile = 2MPP.txt
maskdata = completeness_11_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 9
galaxy_faint_apparent_magnitude_cut = 11.5
galaxy_bright_absolute_magnitude_cut = -25.00
galaxy_faint_absolute_magnitude_cut = -24.50
refbias = false
nmean=1
# 11.5 - 12.5 mag cut
[catalog_8]
datafile = 2MPP.txt
maskdata = completeness_12_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 11.5
galaxy_faint_apparent_magnitude_cut = 12.5
galaxy_bright_absolute_magnitude_cut = -21.50
galaxy_faint_absolute_magnitude_cut = -21.00
refbias = false
nmean=1
[catalog_9]
datafile = 2MPP.txt
maskdata = completeness_12_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 11.5
galaxy_faint_apparent_magnitude_cut = 12.5
galaxy_bright_absolute_magnitude_cut = -22.00
galaxy_faint_absolute_magnitude_cut = -21.50
refbias = false
nmean=1.
[catalog_10]
datafile = 2MPP.txt
maskdata = completeness_12_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 11.5
galaxy_faint_apparent_magnitude_cut = 12.5
galaxy_bright_absolute_magnitude_cut = -22.50
galaxy_faint_absolute_magnitude_cut = -22.00
refbias = false
nmean=1
[catalog_11]
datafile = 2MPP.txt
maskdata = completeness_12_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 11.5
galaxy_faint_apparent_magnitude_cut = 12.5
galaxy_bright_absolute_magnitude_cut = -23.00
galaxy_faint_absolute_magnitude_cut = -22.50
refbias = false
nmean=1
[catalog_12]
datafile = 2MPP.txt
maskdata = completeness_12_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 11.5
galaxy_faint_apparent_magnitude_cut = 12.5
galaxy_bright_absolute_magnitude_cut = -23.50
galaxy_faint_absolute_magnitude_cut = -23.00
refbias = false
nmean=1
[catalog_13]
datafile = 2MPP.txt
maskdata = completeness_12_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 11.5
galaxy_faint_apparent_magnitude_cut = 12.5
galaxy_bright_absolute_magnitude_cut = -24.00
galaxy_faint_absolute_magnitude_cut = -23.50
refbias = false
nmean=1
[catalog_14]
datafile = 2MPP.txt
maskdata = completeness_12_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 11.5
galaxy_faint_apparent_magnitude_cut = 12.5
galaxy_bright_absolute_magnitude_cut = -24.50
galaxy_faint_absolute_magnitude_cut = -24.00
refbias = false
nmean=1
[catalog_15]
datafile = 2MPP.txt
maskdata = completeness_12_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 11.5
galaxy_faint_apparent_magnitude_cut = 12.5
galaxy_bright_absolute_magnitude_cut = -25.00
galaxy_faint_absolute_magnitude_cut = -24.50
refbias = false
nmean=1

View file

@ -0,0 +1,334 @@
[system]
console_output=logares.txt
mask_precision=0.7
VERBOSE_LEVEL = 3
N0 = 32
N1 = 32
N2 = 32
L0 = 677.7
L1 = 677.7
L2 = 677.7
corner0 = -338.85
corner1 = -338.85
corner2 = -338.85
NUM_MODES=100
N_MC=100
borg_supersampling=1
borg_forcesampling=1
borg_pm_nsteps=30
borg_pm_start_z=69
borg_lightcone=false
borg_do_rsd=false
hades_forward_model=LPT_CIC
hades_likelihood=BORG_POISSON
seed = 1234
test_mode=true
# If true, the initial power spectrum of the chain is set to the cosmological one
seed_cpower=true
# Indicate which samplers should be blocked for testing purposes
#messenger_signal_blocked=false
power_sampler_a_blocked=true
power_sampler_b_blocked=true
power_sampler_c_blocked=true
#bias_sampler_blocked=false
hades_sampler_blocked=false
hades_max_epsilon=0.05
hades_max_timesteps=20
hades_mixing=1
savePeriodicity=10
[julia]
likelihood_path=test_likelihood_TF.jl
likelihood_module=network
bias_sampler_type=hmclet
[run]
NCAT = 1
[cosmology]
omega_r = 0
omega_k = 0
omega_m = 0.3175
omega_b = 0.049
omega_q = 0.6825
w = -1
wprime = 0
n_s = 0.9624
sigma8 = 0.8344
h100 = 0.6711
beta = 1.5
z0 = 0
# 11.5 mag cut
[catalog_0]
datafile = 2MPP.txt
#maskdata = completeness_11_5.fits.gz
maskdata = one.fits
#maskdata = zero.fits
bias=1
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 9
galaxy_faint_apparent_magnitude_cut = 15.5
galaxy_bright_absolute_magnitude_cut = -25.50
galaxy_faint_absolute_magnitude_cut = -11.00
refbias = false
nmean=1
[catalog_1]
datafile = 2MPP.txt
maskdata = completeness_11_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 9
galaxy_faint_apparent_magnitude_cut = 11.5
galaxy_bright_absolute_magnitude_cut = -22.00
galaxy_faint_absolute_magnitude_cut = -21.50
refbias = false
nmean=1
[catalog_2]
datafile = 2MPP.txt
maskdata = completeness_11_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 9
galaxy_faint_apparent_magnitude_cut = 11.5
galaxy_bright_absolute_magnitude_cut = -22.50
galaxy_faint_absolute_magnitude_cut = -22.00
refbias = false
nmean=1
[catalog_3]
datafile = 2MPP.txt
maskdata = completeness_11_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 9
galaxy_faint_apparent_magnitude_cut = 11.5
galaxy_bright_absolute_magnitude_cut = -23.00
galaxy_faint_absolute_magnitude_cut = -22.50
refbias = false
nmean=1
[catalog_4]
datafile = 2MPP.txt
maskdata = completeness_11_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 9
galaxy_faint_apparent_magnitude_cut = 11.5
galaxy_bright_absolute_magnitude_cut = -23.50
galaxy_faint_absolute_magnitude_cut = -23.00
refbias = false
nmean=1
[catalog_5]
datafile = 2MPP.txt
maskdata = completeness_11_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 9
galaxy_faint_apparent_magnitude_cut = 11.5
galaxy_bright_absolute_magnitude_cut = -24.00
galaxy_faint_absolute_magnitude_cut = -23.50
refbias = false
nmean=1
[catalog_6]
datafile = 2MPP.txt
maskdata = completeness_11_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 9
galaxy_faint_apparent_magnitude_cut = 11.5
galaxy_bright_absolute_magnitude_cut = -24.50
galaxy_faint_absolute_magnitude_cut = -24.00
refbias = false
nmean=1
[catalog_7]
datafile = 2MPP.txt
maskdata = completeness_11_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 9
galaxy_faint_apparent_magnitude_cut = 11.5
galaxy_bright_absolute_magnitude_cut = -25.00
galaxy_faint_absolute_magnitude_cut = -24.50
refbias = false
nmean=1
# 11.5 - 12.5 mag cut
[catalog_8]
datafile = 2MPP.txt
maskdata = completeness_12_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 11.5
galaxy_faint_apparent_magnitude_cut = 12.5
galaxy_bright_absolute_magnitude_cut = -21.50
galaxy_faint_absolute_magnitude_cut = -21.00
refbias = false
nmean=1
[catalog_9]
datafile = 2MPP.txt
maskdata = completeness_12_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 11.5
galaxy_faint_apparent_magnitude_cut = 12.5
galaxy_bright_absolute_magnitude_cut = -22.00
galaxy_faint_absolute_magnitude_cut = -21.50
refbias = false
nmean=1.
[catalog_10]
datafile = 2MPP.txt
maskdata = completeness_12_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 11.5
galaxy_faint_apparent_magnitude_cut = 12.5
galaxy_bright_absolute_magnitude_cut = -22.50
galaxy_faint_absolute_magnitude_cut = -22.00
refbias = false
nmean=1
[catalog_11]
datafile = 2MPP.txt
maskdata = completeness_12_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 11.5
galaxy_faint_apparent_magnitude_cut = 12.5
galaxy_bright_absolute_magnitude_cut = -23.00
galaxy_faint_absolute_magnitude_cut = -22.50
refbias = false
nmean=1
[catalog_12]
datafile = 2MPP.txt
maskdata = completeness_12_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 11.5
galaxy_faint_apparent_magnitude_cut = 12.5
galaxy_bright_absolute_magnitude_cut = -23.50
galaxy_faint_absolute_magnitude_cut = -23.00
refbias = false
nmean=1
[catalog_13]
datafile = 2MPP.txt
maskdata = completeness_12_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 11.5
galaxy_faint_apparent_magnitude_cut = 12.5
galaxy_bright_absolute_magnitude_cut = -24.00
galaxy_faint_absolute_magnitude_cut = -23.50
refbias = false
nmean=1
[catalog_14]
datafile = 2MPP.txt
maskdata = completeness_12_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 11.5
galaxy_faint_apparent_magnitude_cut = 12.5
galaxy_bright_absolute_magnitude_cut = -24.50
galaxy_faint_absolute_magnitude_cut = -24.00
refbias = false
nmean=1
[catalog_15]
datafile = 2MPP.txt
maskdata = completeness_12_5.fits.gz
radial_selection = schechter
schechter_mstar = -23.28
schechter_alpha = -0.94
schechter_sampling_rate = 1000
schechter_dmax = 700
galaxy_bright_apparent_magnitude_cut = 11.5
galaxy_faint_apparent_magnitude_cut = 12.5
galaxy_bright_absolute_magnitude_cut = -25.00
galaxy_faint_absolute_magnitude_cut = -24.50
refbias = false
nmean=1

View file

@ -0,0 +1,187 @@
#+
# ARES/HADES/BORG Package -- ./extra/hmclet/example/test_like.jl
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
module julia_test
using ..libLSS
using NPZ
import ..libLSS.State
import ..libLSS.GhostPlanes, ..libLSS.get_ghost_plane
import ..libLSS.print, ..libLSS.LOG_INFO, ..libLSS.LOG_VERBOSE, ..libLSS.LOG_DEBUG
apply_transform(bias_tilde) = exp.(bias_tilde)
apply_inv_transform(bias) = log.(bias)
function initialize(state)
print(LOG_INFO, "Likelihood initialization in Julia")
NCAT = libLSS.get(state, "NCAT", Int64)
print(LOG_VERBOSE, "Found " *repr(NCAT) * " catalogues")
for catalog in 0:(NCAT-1)
# galaxies = libLSS.get_galaxy_descriptor(state, catalog)
# print(LOG_VERBOSE, repr(size(galaxies)))
# all_spin = getfield.(galaxies, :spin)
bias = libLSS.resize_array(state, "galaxy_bias_"*repr(catalog), 2, Float64)
bias[1] = 1
bias[2] = 0.01
bias .= apply_inv_transform(bias)
end
end
function get_required_planes(state::State)
print(LOG_INFO, "Check required planes")
return Array{UInt64,1}([])
end
function likelihood(state::State, ghosts::GhostPlanes, array::AbstractArray{Float64,3})
print(LOG_VERBOSE, "Likelihood evaluation in Julia")
N0 = libLSS.get(state, "N0", Int64)
NCAT = libLSS.get(state, "NCAT", Int64)
L = Float64(0)
for catalog in 0:(NCAT-1)
sc = repr(catalog)
b = libLSS.get_array_1d(state, "galaxy_bias_"*sc, Float64)
L += likelihood_bias(state, ghosts, array, catalog, b)
end
print(LOG_VERBOSE, "Likelihood is " * repr(L))
return L
end
function generate_mock_data(state::State, ghosts::GhostPlanes, array::AbstractArray{Float64,3})
print(LOG_INFO, "Generate mock")
NCAT = libLSS.get(state, "NCAT", Int64)
for cat in 0:(NCAT-1)
sc = repr(cat)
data = libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64)
b = apply_transform(libLSS.get_array_1d(state, "galaxy_bias_"*sc, Float64))
print(LOG_VERBOSE, "Bias for mock is $(b)")
S = libLSS.get_array_3d(state, "galaxy_sel_window_$(sc)", Float64)
s = size(data)
print(LOG_INFO, "Shape is " * repr(size(data)) * " and " * repr(size(array)))
print(LOG_INFO, "Number of threads " * repr(Threads.nthreads()))
N0=s[1]
N1=s[2]
N2=s[3]
noise = sqrt(b[2])
print(LOG_INFO, "Noise is $(noise)")
bias = b[1]
for i=1:N0,j=1:N1,k=1:N2
data[i,j,k] = S[i,j,k]*(1+bias*array[i,j,k]) + sqrt(S[i,j,k])*noise*libLSS.gaussian(state)
end
end
end
function adjoint_gradient(state::State, array::AbstractArray{Float64,3}, ghosts::GhostPlanes, ag::AbstractArray{Float64,3})
print(LOG_VERBOSE, "Adjoint gradient in Julia")
N0 = libLSS.get(state, "N0", Int64)
NCAT = libLSS.get(state, "NCAT", Int64)
L = Float64(0)
ag[:,:,:] .= 0
for catalog in 0:(NCAT-1)
sc = repr(catalog)
data = libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64)
b = apply_transform(libLSS.get_array_1d(state, "galaxy_bias_"*sc, Float64))
S = libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64)
noise = b[2]
bias = b[1]
Smask = findall(S.>0)
ag[Smask] += -(data[Smask] .- S[Smask].*(1 .+ bias*array[Smask]))*bias/noise
end
end
# 1/2 sum( (data - S (1 + b rho))^2 / (S*n) )
# There is a change of variable to map [-infinity, infinity] to [0, infinity]
# y = exp(x) (x is the bias_tilde, y is the bias params)
# we know the function in terms of y though, but the posterior must be in terms of x
# probability conservation:
# f_tilde(x) dx = f(y) dy
#
# f_tilde(x) = f(y) dy/dx = f(y) exp(x) -> -log(f_tilde) = -log(f) - y
# -dlog(f_tilde(x))/dx = -dlog(f_tilde)/dy dy/dx = (-dlog(f)/dy - 1) * dy/dx
# dy/dx = exp(x) = y
function likelihood_bias(state::State, ghosts::GhostPlanes, array, catalog_id, catalog_bias_tilde)
catalog_bias = apply_transform(catalog_bias_tilde)
sc = string(catalog_id)
print(LOG_VERBOSE,"Catalog id is " * sc * " bias is " * repr(catalog_bias))
data = libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64)
S = libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64)
Smask = findall(S.>0)
noise = catalog_bias[2]
bias = catalog_bias[1]
prior_bias = catalog_bias_tilde[1] # Not the bias-tilde-2
return 0.5*sum(
(data[Smask] .- S[Smask].*(1 .+ bias.*array[Smask])).^2 ./ (S[Smask].*noise)
) + 0.5*size(Smask)[1]*log(noise) - prior_bias
end
function get_step_hint(state, catalog_id, bias_id)
return 0.1
end
function log_prior_bias(state, catalog_id, bias_tilde)
# Change of variable bias = exp(bias_tilde)
return 0
end
function adjoint_bias(state::State, ghosts::GhostPlanes,
array, catalog_id, catalog_bias_tilde, adjoint_gradient_bias)
catalog_bias = apply_transform(catalog_bias_tilde)
print(LOG_VERBOSE,"ADJOINT: Catalog id is $(catalog_id), bias is $(catalog_bias), bias_tilde is $(catalog_bias_tilde)")
sc = string(catalog_id)
data = libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64)
S = libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64)
Smask = findall(S.>0)
noise = catalog_bias[2]
bias = catalog_bias[1]
delta = (data[Smask] .- S[Smask].*(1 .+ bias*array[Smask]))
adjoint_gradient_bias[1] = -sum(delta.*array[Smask]) ./noise
adjoint_gradient_bias[2] = -0.5*sum(delta.^2 ./ (S[Smask])) /(noise^2) + 0.5 * size(Smask)[1]/noise
adjoint_gradient_bias .*= catalog_bias
adjoint_gradient_bias[1] -= 1 # Derivative of the prior
print(LOG_VERBOSE,"ADJOINT: -> $(adjoint_gradient_bias)")
end
function fill_diagonal_mass_matrix(state::State)
return [1e3,1e3]
# return [1e-5,1e-5]
# return [1e-7,1e-7]
end
function generate_ic(state::State)
print(LOG_INFO, "Generate special IC for the chain")
b = libLSS.get_array(state, "galaxy_bias_0", Float64, d1d)
b[1] = 1.
b[2] = 1.
# sref = npzread("velmass_ic_500Mpc_32.npz")["arr_0"]
# s = libLSS.get_array_3d(state, "s_field", Float64)
# s .*= 0.01
# startN0 = libLSS.get(state, "startN0", Int64)
# localN0,N1,N2 = size(s)
# print(LOG_INFO, "Dims = [$(startN0):$(startN0+localN0)]x$(N1)x$(N2)")
# for i=1:localN0, j=1:N1,k=1:N2
# s[i,j,k] = sref[k,j,i+startN0]
##### # 0.01*cos(2*pi*(i-1)/N)*sin(2*pi*(j-1)/N)
# end
# print(LOG_INFO, "DONE DONE")
end
end

View file

@ -0,0 +1,139 @@
#+
# ARES/HADES/BORG Package -- ./extra/hmclet/example/test_like_TF.jl
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
module network
using ..libLSS
import ..libLSS.State
import ..libLSS.GhostPlanes, ..libLSS.get_ghost_plane
import ..libLSS.print, ..libLSS.LOG_INFO, ..libLSS.LOG_VERBOSE, ..libLSS.LOG_DEBUG
using TensorFlow
using PyPlot
sess = Session(allow_growth = true)
adgrad = nothing
wgrad = nothing
function setup(N0, N1, N2)
global adgrad, wgrad
p = [TensorFlow.placeholder(Float64, shape = (), name = "bias"), TensorFlow.placeholder(Float64, shape = (), name = "noise")]
δ = TensorFlow.placeholder(Float64, shape = Int64[N0, N1, N2], name = "density")
g = TensorFlow.placeholder(Float64, shape = Int64[N0, N1, N2], name = "galaxy")
s = TensorFlow.placeholder(Float64, shape = Int64[N0, N1, N2], name = "selection")
gaussian = TensorFlow.placeholder(Float64, shape = Int64[N0, N1, N2], name = "gaussian_field")
mask = TensorFlow.placeholder(Bool, shape = Int64[N0, N1, N2], name = "mask")
mask_ = TensorFlow.reshape(mask, N0 * N1 * N2, name = "flat_mask")
g_ = TensorFlow.identity(TensorFlow.boolean_mask(TensorFlow.reshape(g, N0 * N1 * N2), mask_), name = "flat_masked_galaxy")
s_ = TensorFlow.identity(TensorFlow.boolean_mask(TensorFlow.reshape(s, N0 * N1 * N2), mask_), name = "flat_masked_selection")
output = TensorFlow.add(1., TensorFlow.multiply(p[1], δ), name = "biased_density")
mock = TensorFlow.multiply(s, output, name = "selected_biased_density")
mock_ = TensorFlow.identity(TensorFlow.boolean_mask(TensorFlow.reshape(mock, N0 * N1 * N2), mask_), name = "flat_masked_selected_biased_density")
mock_galaxy = TensorFlow.add(mock, TensorFlow.multiply(TensorFlow.multiply(TensorFlow.sqrt(TensorFlow.exp(p[2])), TensorFlow.sqrt(s)), gaussian), name = "mock_galaxy")
ms = TensorFlow.reduce_sum(TensorFlow.cast(mask, Float64), name = "number_of_voxels")
loss = TensorFlow.identity(TensorFlow.add(TensorFlow.multiply(0.5, TensorFlow.reduce_sum(TensorFlow.square(g_ - mock_) / TensorFlow.multiply(TensorFlow.exp(p[2]), s_))), TensorFlow.multiply(0.5, TensorFlow.multiply(ms, p[2]))) - TensorFlow.exp(p[1]) - TensorFlow.exp(p[2]), name = "loss")
adgrad = TensorFlow.gradients(loss, δ)
wgrad = [TensorFlow.gradients(loss, p[i]) for i in range(1, length = size(p)[1])]
end
function initialize(state)
print(LOG_INFO, "Likelihood initialization in Julia")
setup(libLSS.get(state, "N0", Int64, synchronous=true), libLSS.get(state, "N1", Int64, synchronous=true), libLSS.get(state, "N2", Int64, synchronous=true))
bias = libLSS.resize_array(state, "galaxy_bias_0", 2, Float64)
bias[:] .= log(1.)
print(LOG_VERBOSE, "Found " *repr(libLSS.get(state, "NCAT", Int64, synchronous=true)) * " catalogues")
end
function get_required_planes(state::State)
print(LOG_INFO, "Check required planes")
return Array{UInt64,1}([])
end
function likelihood(state::State, ghosts::GhostPlanes, array::AbstractArray{Float64,3})
print(LOG_INFO, "Likelihood evaluation in Julia")
L = Float64(0.)
for catalog=1:libLSS.get(state, "NCAT", Int64, synchronous=true)
L += run(sess, TensorFlow.get_tensor_by_name("loss"),
Dict(TensorFlow.get_tensor_by_name("bias")=>libLSS.get_array_1d(state, "galaxy_bias_"*repr(catalog - 1), Float64)[1],
TensorFlow.get_tensor_by_name("noise")=>libLSS.get_array_1d(state, "galaxy_bias_"*repr(catalog - 1), Float64)[2],
TensorFlow.get_tensor_by_name("density")=>array,
TensorFlow.get_tensor_by_name("galaxy")=>libLSS.get_array_3d(state, "galaxy_data_"*repr(catalog - 1), Float64),
TensorFlow.get_tensor_by_name("selection")=>libLSS.get_array_3d(state, "galaxy_sel_window_"*repr(catalog - 1), Float64),
TensorFlow.get_tensor_by_name("mask")=>libLSS.get_array_3d(state, "galaxy_sel_window_"*repr(catalog - 1), Float64).>0.))
end
print(LOG_VERBOSE, "Likelihood is " * repr(L))
return L
end
function generate_mock_data(state::State, ghosts::GhostPlanes, array::AbstractArray{Float64,3})
print(LOG_INFO, "Generate mock")
for catalog in 1:libLSS.get(state, "NCAT", Int64, synchronous=true)
gaussian_field = Array{Float64}(undef, size(array)[1], size(array)[2], size(array)[3])
data = libLSS.get_array_3d(state, "galaxy_data_"*repr(catalog - 1), Float64)
for i=1:size(array)[1],j=1:size(array)[2],k=1:size(array)[3]
gaussian_field[i,j,k] = libLSS.gaussian(state)
end
data[:, :, :] = run(sess, TensorFlow.get_tensor_by_name("mock_galaxy"),
Dict(TensorFlow.get_tensor_by_name("bias")=>libLSS.get_array_1d(state, "galaxy_bias_"*repr(catalog - 1), Float64)[1],
TensorFlow.get_tensor_by_name("noise")=>libLSS.get_array_1d(state, "galaxy_bias_"*repr(catalog - 1), Float64)[2],
TensorFlow.get_tensor_by_name("density")=>array,
TensorFlow.get_tensor_by_name("selection")=>libLSS.get_array_3d(state, "galaxy_sel_window_"*repr(catalog - 1), Float64),
TensorFlow.get_tensor_by_name("gaussian_field")=>gaussian_field))
print(LOG_INFO, "Plotting generated mock from catalog "*repr(catalog - 1)*" as ./plots/generate_mock_data_"*repr(catalog - 1)*".png")
imshow(dropdims(sum(data, dims = 3), dims = 3))
colorbar()
savefig("plots/generated_mock_data_"*repr(catalog - 1)*".png")
close()
end
end
function adjoint_gradient(state::State, array::AbstractArray{Float64,3}, ghosts::GhostPlanes, ag::AbstractArray{Float64,3})
print(LOG_VERBOSE, "Adjoint gradient in Julia")
ag[:,:,:] .= 0
for catalog=1:libLSS.get(state, "NCAT", Int64, synchronous=true)
Smask = libLSS.get_array_3d(state, "galaxy_sel_window_"*repr(catalog - 1), Float64).>0.
ag[Smask] += run(sess, adgrad,
Dict(TensorFlow.get_tensor_by_name("bias")=>libLSS.get_array_1d(state, "galaxy_bias_"*repr(catalog - 1), Float64)[1],
TensorFlow.get_tensor_by_name("noise")=>libLSS.get_array_1d(state, "galaxy_bias_"*repr(catalog - 1), Float64)[2],
TensorFlow.get_tensor_by_name("density")=>array, TensorFlow.get_tensor_by_name("galaxy")=>libLSS.get_array_3d(state, "galaxy_data_"*repr(catalog - 1), Float64),
TensorFlow.get_tensor_by_name("selection")=>libLSS.get_array_3d(state, "galaxy_sel_window_"*repr(catalog - 1), Float64),
TensorFlow.get_tensor_by_name("mask")=>Smask))[Smask]
end
end
function likelihood_bias(state::State, ghosts::GhostPlanes, array, catalog_id, catalog_bias)
print(LOG_VERBOSE, "Likelihood bias in Julia")
return run(sess, TensorFlow.get_tensor_by_name("loss"),
Dict(TensorFlow.get_tensor_by_name("bias")=>catalog_bias[1],
TensorFlow.get_tensor_by_name("noise")=>catalog_bias[2],
TensorFlow.get_tensor_by_name("density")=>array,
TensorFlow.get_tensor_by_name("galaxy")=>libLSS.get_array_3d(state, "galaxy_data_"*string(catalog_id), Float64),
TensorFlow.get_tensor_by_name("selection")=>libLSS.get_array_3d(state, "galaxy_sel_window_"*string(catalog_id), Float64),
TensorFlow.get_tensor_by_name("mask")=>libLSS.get_array_3d(state, "galaxy_sel_window_"*string(catalog_id), Float64) .> 0.))
end
function get_step_hint(state, catalog_id, bias_id)
return 0.1
end
function log_prior_bias(state, catalog_id, bias_tilde)
return 0.
end
function adjoint_bias(state::State, ghosts::GhostPlanes, array, catalog_id, catalog_bias, adjoint_gradient_bias)
print(LOG_VERBOSE, "Adjoint gradient of bias in Julia")
adjoint_gradient_bias .= run(sess, wgrad,
Dict(TensorFlow.get_tensor_by_name("bias")=>catalog_bias[1],
TensorFlow.get_tensor_by_name("noise")=>catalog_bias[2],
TensorFlow.get_tensor_by_name("density")=>array,
TensorFlow.get_tensor_by_name("galaxy")=>libLSS.get_array_3d(state, "galaxy_data_"*string(catalog_id), Float64),
TensorFlow.get_tensor_by_name("selection")=>libLSS.get_array_3d(state, "galaxy_sel_window_"*string(catalog_id), Float64),
TensorFlow.get_tensor_by_name("mask")=>libLSS.get_array_3d(state, "galaxy_sel_window_"*string(catalog_id), Float64) .> 0.))
end
end

View file

@ -0,0 +1,184 @@
#
# This is a very drafty test framework for julia likelihood.
# It allows to avoid running a full BORG machine to test the basics
# of the likelihood.
#
JULIA_LIKELIHOOD="sim_run_quadratic.jl"
module libLSS
using HDF5
struct GalaxyDescriptor
id::Clonglong
phi::Cdouble
theta::Cdouble
zo::Cdouble
m::Cdouble
M_abs::Cdouble
Mgal::Cdouble
z::Cdouble
r::Cdouble
w::Cdouble
final_w::Cdouble
radius::Cdouble
spin::Cdouble
posx::Cdouble
posy::Cdouble
posz::Cdouble
vx::Cdouble
vy::Cdouble
vz::Cdouble
function GalaxyDescriptor(c::HDF5.HDF5Compound{11})
new(c.data[1], #id
0, 0, 0, 0, 0, #phi, theta, zo, m, M_abs
c.data[2], 0, 0, c.data[11], #Mgal, z, r, w
c.data[11], #final_w
c.data[3], #radius
c.data[4], #spin
c.data[5],c.data[6], c.data[7], #posxyz
c.data[8], c.data[8], c.data[10] #velxyz
)
end
end
struct DimensionSpec{N} end
d1d = DimensionSpec{1}()
d2d = DimensionSpec{2}()
d3d = DimensionSpec{3}()
struct State
info::Dict{String,Any}
descriptors::Dict{Int,Array{GalaxyDescriptor,1}}
function State()
new(Dict{String,Any}(), Dict{Int,Array{GalaxyDescriptor,1}}())
end
end
struct GhostPlanes end
# function get_ghost_plane()
# end
function get(state::State, name::String, ::Type{T}; synchronous=false) where {T}
return T(state.info[name])
end
function autosize_array(state::State, name::String, ::Bool, ::Type{T}, ::DimensionSpec{N}) where{T,N}
end
function resize_array(state::State, name::String, N::Int, ::Type{T}, cppOrder=true) where {T}
a = Array{T,1}(undef, N)
L = min(N, length(state.info[name]))
a[1:L] .= state.info[name][1:L]
state.info[name] = a
return a
end
function new_array(state::State, name::String, N::Int, ::Type{T}, cppOrder=true) where {T}
a = Array{T,1}(undef, N)
state.info[name] = a
return a
end
function get_galaxy_descriptor(state::State, catalog::Int)
state.descriptors[catalog]
end
@enum LOG_LEVEL LOG_INFO=0 LOG_VERBOSE=1 LOG_DEBUG=2 LOG_ERROR=3
function uniform(state::State)
rand(Float64)
end
function get_array(state::State, name::String, ::Type{T}, ::DimensionSpec{N}) where {T,N}
return state.info[name]
end
struct BadGradient <: Exception end
function prefix(id::LOG_LEVEL)
if id == LOG_INFO
return "INFO "
elseif id == LOG_VERBOSE
return "VERBOSE"
elseif id == LOG_DEBUG
return "DEBUG "
elseif id == LOG_ERROR
return "ERROR "
end
return "WEIRD"
end
function print(id, text)
println("[$(prefix(id))] $(text)")
end
end
include(JULIA_LIKELIHOOD)
using HDF5
using NPZ
state = libLSS.State()
state.info["NCAT"] = 1
state.info["startN0"] = 0
state.info["localN0"] = 32
state.info["N0"] = 32
state.info["N1"] = 32
state.info["N2"] = 32
state.info["MCMC_STEP"] = 1
state.info["halo_rnd"] = npzread("rnd.npy")
data = h5read("halo_full.h5", "data")
state.descriptors[0] = libLSS.GalaxyDescriptor.(data)
libLSS.new_array(state, "galaxy_bias_0", 1, Float64)
ghosts = libLSS.GhostPlanes()
lkl_julia.initialize(state)
primary_pars = libLSS.get(state, "galaxy_bias_0", Array{Float64,1})
#primary_pars=[-0.184885, -10.9406, 0.201633, -2.0811, 26.0899, 0.425483, 0.933645]
#primary_pars=[-1.40152, -1.81401, 3.15168, 2.19131, 31.4598, 1.03612, -0.403347,-0.576611, -37.0138, 4.32735, -8.23236, -0.374622]
#primary_pars=[-1.5537, -2.00498, -10.4029, 2.09403, 30.367, 0.0492851, 0.682411, -1.19255, -7.96303, 2.81621, 31.0215, -1.71585]
#primary_pars=[-5.07158, -5.40877, -22.1815, 6.7823, 11.8081 , 6.26203, -80.481, -0.635414, -194.427, 1.07606, 116.867, -3.16294]
#primary_pars = [-3.85992, -4.13413, -16.7243, 4.96438, 19.1427, 3.14645, -0.886257, -0.843103, -151.239, 1.7652, 109.884, -2.72664]
#primary_pars = [-3.89838, -4.16932, -16.8285, 5.01234, 18.9541, 3.19879, -0.870979, -0.837335, -151.525, 1.74808, 113.346, -2.75558]
##++
primary_pars = [-3.89838, -4.16932, -16.8285, 5.01234, 18.9541, 3.19879, -0.870979, -0.837335, -151.525, 1.74808, 111.346, -2.75558]
##++
#primary_pars = [-2.80186, -3.16606, -13.8643, 3.6514, 24.3226, 1.6907, -1.30828, -1.00266, -239.525, 2.23281, -15.442, -1.67615]
density = Array{Float64, 3}(undef, 32, 32, 32)
u = sin.((0:1:31).*2pi/32)*0.8
density .= reshape(u, :, 1, 1).*reshape(u, 1, 1, :).*reshape(u, 1, :, 1)
Lref=lkl_julia.likelihood_bias(state, ghosts, density, 0, primary_pars)
ag_bias = Array{Float64,1}(undef, size(primary_pars))
lkl_julia.adjoint_bias(state, ghosts, density, 0, primary_pars, ag_bias)
@time lkl_julia.adjoint_bias(state, ghosts, density, 0, primary_pars, ag_bias)
if true
ag_bias2 = Array{Float64,1}(undef, size(primary_pars))
for i in 1:length(primary_pars)
prim2 = deepcopy(primary_pars)
dx = 0.00001*abs(prim2[i])
prim2[i] += dx
ag_bias2[i] = lkl_julia.likelihood_bias(state, ghosts, density, 0, prim2)
prim2[i] = primary_pars[i] - dx
ag_bias2[i] -= lkl_julia.likelihood_bias(state, ghosts, density, 0, prim2)
ag_bias2[i] /= (2dx)
end
println(" Numeric is $(ag_bias2)")
end
println(" Analytic is $(ag_bias)")

View file

@ -0,0 +1,19 @@
require_ares_module(hades)
SET(EXTRA_HMCLET ${CMAKE_SOURCE_DIR}/extra/hmclet)
SET(EXTRA_LIBLSS
${EXTRA_LIBLSS}
${EXTRA_HMCLET}/libLSS/hmclet/hmclet.cpp
${EXTRA_HMCLET}/libLSS/hmclet/hmclet_qnhmc.cpp
${EXTRA_HMCLET}/libLSS/hmclet/diagonal_mass.cpp
${EXTRA_HMCLET}/libLSS/hmclet/mass_burnin.cpp
${EXTRA_HMCLET}/libLSS/hmclet/dense_mass.cpp
)
IF(BUILD_JULIA)
SET(EXTRA_LIBLSS
${EXTRA_LIBLSS}
${EXTRA_HMCLET}/libLSS/hmclet/julia_slice.cpp
${EXTRA_HMCLET}/libLSS/hmclet/julia_hmclet.cpp
)
ENDIF()

View file

@ -0,0 +1,130 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/dense_mass.cpp
Copyright (C) 2014-2020 2019 <guilhem.lavaux@iap.fr>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#include <boost/format.hpp>
#include <functional>
#include <cmath>
#include "libLSS/tools/console.hpp"
#include <CosmoTool/hdf5_array.hpp>
#include "libLSS/tools/symplectic_integrator.hpp"
#include "libLSS/tools/fusewrapper.hpp"
#include "libLSS/samplers/rgen/slice_sweep.hpp"
#include "libLSS/hmclet/dense_mass.hpp"
#include "libLSS/tools/string_tools.hpp"
#include "libLSS/tools/hdf5_scalar.hpp"
using namespace LibLSS;
using namespace LibLSS::HMCLet;
namespace ph = std::placeholders;
using boost::format;
void DenseMassMatrix::saveMass(CosmoTool::H5_CommonFileGroup &g) {
boost::multi_array<double, 2> mass_buf(boost::extents[numParams][numParams]);
boost::multi_array<double, 1> mean_buf(boost::extents[numParams]);
Eigen::Map<Eigen::MatrixXd> map_buf(mass_buf.data(), numParams, numParams);
map_buf.noalias() = covariances;
Eigen::Map<Eigen::VectorXd>(mean_buf.data(), numParams).noalias() = mean;
CosmoTool::hdf5_write_array(g, "covariance", mass_buf);
CosmoTool::hdf5_write_array(g, "mean", mean_buf);
map_buf.noalias() = icCovar;
CosmoTool::hdf5_write_array(g, "icCovariance", mass_buf);
hdf5_save_scalar(g, "numInMass", numInMass);
map_buf.noalias() = finishedCovariances;
CosmoTool::hdf5_write_array(g, "finishedCovariances", mass_buf);
}
void DenseMassMatrix::loadMass(CosmoTool::H5_CommonFileGroup &g) {
boost::multi_array<double, 2> mass_buf(boost::extents[numParams][numParams]);
boost::multi_array<double, 1> mean_buf(boost::extents[numParams]);
Eigen::Map<Eigen::MatrixXd> map_buf(mass_buf.data(), numParams, numParams);
auto& cons = Console::instance();
CosmoTool::hdf5_read_array(g, "covariance", mass_buf, false);
covariances = map_buf;
CosmoTool::hdf5_read_array(g, "icCovariance", mass_buf, false);
icCovar = map_buf;
CosmoTool::hdf5_read_array(g, "finishedCovariances", mass_buf, false);
finishedCovariances.noalias() = map_buf;
CosmoTool::hdf5_read_array(g, "mean", mean_buf, false);
numInMass = hdf5_load_scalar<size_t>(g, "numInMass");
mean = Eigen::Map<Eigen::VectorXd>(mean_buf.data(), numParams);
Console::instance().print<LOG_INFO>("loaded mass.");
lltOfCovariances.compute(finishedCovariances);
}
void DenseMassMatrix::addMass(VectorType const &params) {
if (frozen)
return;
using CosmoTool::square;
auto f_params = Eigen::Map<const Eigen::VectorXd>(params.data(), numParams);
double coef = double(numInMass) / double(numInMass + 1);
double coef2 = 1 / double(numInMass);
double coef3 = 1 / double(numInMass + 1);
if (numInMass == 0)
mean = f_params;
else
mean = coef * mean + coef3 * f_params;
if (numInMass >= 1) {
auto c = f_params - mean;
covariances = coef * covariances + coef2 * c * c.adjoint();
}
numInMass++;
finishMass();
}
void DenseMassMatrix::finishMass() {
ConsoleContext<LOG_DEBUG> ctx("DenseMassMatrix::finishMass");
double w = initialMassWeight / double(initialMassWeight + numInMass);
double const corrector = limiter;
finishedCovariances = (1-w)*covariances + w*icCovar;
for (int i = 0; i < numParams; i++) {
for (int j = 0; j < numParams; j++) {
if (i!=j)
finishedCovariances(i,j) /= (1+corrector);
}
}
lltOfCovariances.compute(finishedCovariances);
}
void DenseMassMatrix::clear() {
ConsoleContext<LOG_DEBUG> ctx("DenseMassMatrix::clear");
covariances.fill(0);
finishedCovariances.fill(0);
mean.fill(0);
numInMass = 0;
initialMassWeight = 10;
finishMass();
}
void DenseMassMatrix::setInitialMass(
boost::multi_array_ref<double, 2> const &massMatrix) {
if (massMatrix.shape()[0] != numParams || massMatrix.shape()[1] != numParams)
error_helper<ErrorBadState>("Invalid mass matrix size");
for (size_t i = 0; i < numParams; i++) {
for (size_t j = 0; j < numParams; j++) {
icCovar(i, j) = massMatrix[i][j];
}
}
initialMassWeight = 10;
finishMass();
}
// ARES TAG: authors_num = 1
// ARES TAG: name(0) = Guilhem Lavaux
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
// ARES TAG: name(0) = 2019

View file

@ -0,0 +1,106 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/dense_mass.hpp
Copyright (C) 2014-2020 2019 <guilhem.lavaux@iap.fr>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#ifndef __LIBLSS_HMCLET_DENSE_MASS_HPP
# define __LIBLSS_HMCLET_DENSE_MASS_HPP
# include <memory>
# include <Eigen/Core>
# include <Eigen/Cholesky>
# include <Eigen/Eigenvalues>
# include <boost/multi_array.hpp>
# include "libLSS/samplers/core/random_number.hpp"
# include <CosmoTool/hdf5_array.hpp>
# include "libLSS/tools/errors.hpp"
# include "libLSS/hmclet/hmclet.hpp"
namespace LibLSS {
namespace HMCLet {
class DenseMassMatrix {
protected:
size_t numParams;
Eigen::MatrixXd finishedCovariances, icCovar, covariances;
Eigen::LLT<Eigen::MatrixXd> lltOfCovariances;
Eigen::VectorXd tmp_vector, mean;
size_t initialMassWeight;
size_t numInMass;
boost::multi_array<double, 1> tmp_data;
Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> es;
double limiter;
bool frozen;
public:
DenseMassMatrix(size_t numParams_)
: numParams(numParams_), finishedCovariances(numParams, numParams),
icCovar(numParams, numParams), covariances(numParams, numParams),
lltOfCovariances(numParams), tmp_vector(numParams), mean(numParams),
initialMassWeight(0), numInMass(0),
tmp_data(boost::extents[numParams]), limiter(0.5), frozen(false) {
icCovar.setIdentity();
clear();
}
void setInitialMass(boost::multi_array_ref<double, 2> const &params);
void freezeInitial() { icCovar = covariances; }
void freeze() { frozen = true; }
void setCorrelationLimiter(double limiter_) { limiter = limiter_; }
void saveMass(CosmoTool::H5_CommonFileGroup &g);
void loadMass(CosmoTool::H5_CommonFileGroup &g);
void addMass(VectorType const &params);
void clear();
template <
typename A, typename U = typename std::enable_if<
is_wrapper<A>::value, void>::type>
auto operator()(A const &q) {
auto tmpv = Eigen::Map<Eigen::VectorXd>(tmp_data.data(), numParams);
for (size_t i = 0; i < numParams; i++)
tmp_vector(i) = (*q)[i];
tmpv.noalias() = finishedCovariances * tmp_vector;
return fwrap(tmp_data);
}
auto operator()(VectorType const &q) { return operator()(fwrap(q)); }
template <typename A, typename B>
auto operator()(A const &a, B &&) {
return operator()(a);
}
auto sample(RandomNumber &rgen) -> decltype(fwrap(tmp_data)) {
boost::multi_array<double, 1> tmp_data2(boost::extents[numParams]);
auto tmpv2 = Eigen::Map<Eigen::VectorXd>(tmp_data2.data(), numParams);
auto tmpv = Eigen::Map<Eigen::VectorXd>(tmp_data.data(), numParams);
fwrap(tmp_data2) = rgen.gaussian(fwrap(b_fused_idx<double, 1>(
[](int) { return 1; }, boost::extents[numParams])));
tmpv = lltOfCovariances.matrixL().solve(tmpv2);
return fwrap(tmp_data);
}
void computeMainComponents() { es.compute(finishedCovariances); }
auto components() { return es.eigenvectors(); }
auto eigenValues() { return es.eigenvalues(); }
Eigen::VectorXd const &getMean() const { return mean; }
protected:
void finishMass();
};
} // namespace HMCLet
} // namespace LibLSS
#endif
// ARES TAG: authors_num = 1
// ARES TAG: name(0) = Guilhem Lavaux
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
// ARES TAG: name(0) = 2019

View file

@ -0,0 +1,103 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/diagonal_mass.cpp
Copyright (C) 2014-2020 2019 <guilhem.lavaux@iap.fr>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#include <boost/format.hpp>
#include <functional>
#include <cmath>
#include "libLSS/tools/console.hpp"
#include <CosmoTool/hdf5_array.hpp>
#include "libLSS/tools/symplectic_integrator.hpp"
#include "libLSS/tools/fusewrapper.hpp"
#include "libLSS/samplers/rgen/slice_sweep.hpp"
#include "libLSS/hmclet/diagonal_mass.hpp"
#include "libLSS/tools/string_tools.hpp"
#include "libLSS/tools/hdf5_scalar.hpp"
using namespace LibLSS;
using namespace LibLSS::HMCLet;
namespace ph = std::placeholders;
using boost::format;
void DiagonalMassMatrix::saveMass(CosmoTool::H5_CommonFileGroup &g) {
CosmoTool::hdf5_write_array(g, "mass", masses);
CosmoTool::hdf5_write_array(g, "mean", mean);
CosmoTool::hdf5_write_array(g, "icMass", icMass);
hdf5_save_scalar(g, "numInMass", numInMass);
hdf5_save_scalar(g, "frozen", frozen);
}
void DiagonalMassMatrix::loadMass(CosmoTool::H5_CommonFileGroup &g) {
CosmoTool::hdf5_read_array(g, "mass", masses);
CosmoTool::hdf5_read_array(g, "mean", mean);
CosmoTool::hdf5_read_array(g, "icMass", icMass);
numInMass = hdf5_load_scalar<size_t>(g, "numInMass");
frozen = hdf5_load_scalar<bool>(g, "frozen");
fwrap(inv_sqrt_masses) = std::sqrt(1 / fwrap(masses));
}
void DiagonalMassMatrix::addMass(VectorType const &params) {
if (frozen)
return;
using CosmoTool::square;
auto f_mean = fwrap(mean);
auto f_variances = fwrap(variances);
auto f_params = fwrap(params);
double coef = double(numInMass) / double(numInMass + 1);
double coef2 = 1 / double(numInMass);
double coef3 = 1 / double(numInMass + 1);
if (numInMass == 0)
f_mean = f_params;
else
f_mean = coef * f_mean + coef3 * f_params;
if (numInMass >= 1) {
auto c = f_params - f_mean;
f_variances = coef * f_variances + coef2 * c * c;
}
numInMass++;
finishMass();
}
void DiagonalMassMatrix::finishMass() {
ConsoleContext<LOG_DEBUG> ctx("DiagonalMassMatrix::finishMass");
auto fm = fwrap(variances);
double w = initialMassWeight / double(initialMassWeight + numInMass);
auto f_M = fwrap(masses);
auto f_inv_sq = fwrap(inv_sqrt_masses);
f_M = (1 - w) * fm + w * fwrap(icMass);
f_inv_sq = std::sqrt(1 / f_M);
ctx.print("mass weight = " + to_string(f_M.max() * 1e5));
ctx.print("inv_sqrt_masses weight = " + to_string(f_inv_sq.max()));
}
void DiagonalMassMatrix::clear() {
fwrap(variances) = 0;
fwrap(masses) = 0;
fwrap(mean) = 0;
numInMass = 0;
initialMassWeight = 5;
finishMass();
}
void DiagonalMassMatrix::setInitialMass(
VectorType const &diagonal_mass_matrix) {
fwrap(icMass) = diagonal_mass_matrix;
initialMassWeight = 5;
finishMass();
}
// ARES TAG: authors_num = 1
// ARES TAG: name(0) = Guilhem Lavaux
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
// ARES TAG: name(0) = 2019

View file

@ -0,0 +1,75 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/diagonal_mass.hpp
Copyright (C) 2014-2020 2019 <guilhem.lavaux@iap.fr>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#ifndef __LIBLSS_HMCLET_DIAGONAL_MASS_HPP
# define __LIBLSS_HMCLET_DIAGONAL_MASS_HPP
# include <memory>
# include <boost/multi_array.hpp>
# include "libLSS/samplers/core/random_number.hpp"
# include <CosmoTool/hdf5_array.hpp>
# include "libLSS/tools/errors.hpp"
# include "libLSS/hmclet/hmclet.hpp"
namespace LibLSS {
namespace HMCLet {
class DiagonalMassMatrix {
protected:
size_t numParams;
boost::multi_array<double, 1> masses, inv_sqrt_masses, icMass, variances;
boost::multi_array<double, 1> mean;
size_t initialMassWeight;
size_t numInMass;
bool frozen;
public:
DiagonalMassMatrix(size_t numParams_)
: numParams(numParams_), masses(boost::extents[numParams]),
inv_sqrt_masses(boost::extents[numParams]),
icMass(boost::extents[numParams]), mean(boost::extents[numParams]),
variances(boost::extents[numParams]), numInMass(0),
initialMassWeight(0), frozen(false) {}
void setInitialMass(VectorType const &params);
void freeze() { frozen = true; }
void freezeInitial() { fwrap(icMass) = fwrap(masses); }
void saveMass(CosmoTool::H5_CommonFileGroup &g);
void loadMass(CosmoTool::H5_CommonFileGroup &g);
void addMass(VectorType const &params);
void clear();
template <typename A>
auto operator()(A const &q) const {
return q * fwrap(masses);
}
template<typename A, typename B>
auto operator()(A const& a, B&& b) const {
return operator()(a);
}
auto sample(RandomNumber &rgen) const
-> decltype(rgen.gaussian(fwrap(inv_sqrt_masses))) {
return rgen.gaussian(fwrap(inv_sqrt_masses));
}
protected:
void finishMass();
};
} // namespace HMCLet
} // namespace LibLSS
#endif
// ARES TAG: authors_num = 1
// ARES TAG: name(0) = Guilhem Lavaux
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
// ARES TAG: name(0) = 2019

View file

@ -0,0 +1,152 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/hmclet.cpp
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#include <boost/format.hpp>
#include <functional>
#include <cmath>
#include "libLSS/tools/console.hpp"
#include <CosmoTool/hdf5_array.hpp>
#include "libLSS/tools/symplectic_integrator.hpp"
#include "libLSS/tools/fusewrapper.hpp"
#include "libLSS/samplers/rgen/slice_sweep.hpp"
#include "libLSS/hmclet/hmclet.hpp"
#include "libLSS/tools/string_tools.hpp"
#include "libLSS/tools/itertools.hpp"
using namespace LibLSS;
using namespace LibLSS::HMCLet;
namespace ph = std::placeholders;
using boost::format;
constexpr static int ROOT_RANK = 0;
template <typename MassType>
SimpleSampler<MassType>::SimpleSampler(
std::shared_ptr<JointPosterior> _posterior)
: numParams(_posterior->getNumberOfParameters()), massMatrix(numParams),
posterior(_posterior), momentum(boost::extents[numParams]) {
ConsoleContext<LOG_DEBUG> ctx("hmclet constructor");
fwrap(momentum) = 0;
}
template <typename MassType>
SimpleSampler<MassType>::~SimpleSampler() {}
template <typename MassType>
void SimpleSampler<MassType>::calibrate(
MPI_Communication *comm, RandomNumber &rng, size_t numSteps,
VectorType const &initial_params, VectorType const &initial_step) {
ConsoleContext<LOG_DEBUG> ctx("hmcLet calibrate");
using CosmoTool::square;
boost::multi_array<double, 1> params(boost::extents[numParams]);
fwrap(params) = initial_params;
massMatrix.clear();
// We do a few loops to have an idea of the width of the posterior.
for (size_t i = 0; i < numSteps; i++) {
for (size_t j = 0; j < numParams; j++) {
params[j] = slice_sweep_double(
comm, rng,
[this, j, &params](double x) -> double {
params[j] = x;
return -posterior->evaluate(params);
},
params[j], initial_step[j]);
}
massMatrix.addMass(params);
}
massMatrix.freezeInitial();
}
template <typename MassType>
void SimpleSampler<MassType>::newSample(
MPI_Communication *comm, RandomNumber &rgen, VectorType &params) {
ConsoleContext<LOG_DEBUG> ctx("hmcLet singleSampler");
auto paramSize = boost::extents[numParams];
SymplecticIntegrators integrator;
boost::multi_array<double, 1> tmp_gradient(paramSize), saveParams(paramSize),
savedMomentum(paramSize);
double Hstart, Hend, delta_H;
double epsilon;
int Ntime;
if (comm->rank() == ROOT_RANK) {
epsilon = maxEpsilon * (1 - rgen.uniform());
Ntime = 1 + int(maxNtime * rgen.uniform());
fwrap(momentum) = momentumScale * fwrap(momentum) + std::sqrt(1-momentumScale*momentumScale)* massMatrix.sample(rgen);
}
comm->broadcast_t(&epsilon, 1, ROOT_RANK);
comm->broadcast_t(&Ntime, 1, ROOT_RANK);
comm->broadcast_t(momentum.data(), numParams, ROOT_RANK);
fwrap(savedMomentum) = momentum;
LibLSS::copy_array(saveParams, params);
Hstart = posterior->evaluate(saveParams);
// Do the integration
ctx.print(boost::format("Integrate epsilon=%g ntime=%d") % epsilon % Ntime);
try {
integrator.integrate_dense(
std::bind(
&JointPosterior::adjointGradient, posterior.get(), ph::_1, ph::_2),
massMatrix, epsilon, Ntime, saveParams, momentum, tmp_gradient);
Hend = posterior->evaluate(saveParams);
double delta_Ekin;
{
auto p = fwrap(momentum);
auto old_p = fwrap(savedMomentum);
delta_Ekin = (0.5 * (p - old_p) * massMatrix(p + old_p)).sum();
}
delta_H = Hend - Hstart + delta_Ekin;
double log_u;
if (comm->rank() == ROOT_RANK)
log_u = std::log(1 - rgen.uniform());
comm->broadcast_t(&log_u, 1, ROOT_RANK);
ctx.print(
boost::format("deltaEkin = %g, delta_L = %g, deltaH = %g, log_u = %g") %
delta_Ekin % (Hend - Hstart) % delta_H % log_u);
if (log_u <= -delta_H) {
// Accept
LibLSS::copy_array(params, saveParams);
ctx.print("Accept");
}
} catch (HMCLet::ErrorBadGradient const &) {
ctx.print2<LOG_ERROR>(
"A bad gradient computation occured. Reject the sample");
}
massMatrix.addMass(params);
}
#include "libLSS/hmclet/diagonal_mass.hpp"
template class LibLSS::HMCLet::SimpleSampler<DiagonalMassMatrix>;
#include "libLSS/hmclet/dense_mass.hpp"
template class LibLSS::HMCLet::SimpleSampler<DenseMassMatrix>;
#include "libLSS/hmclet/mass_burnin.hpp"
template class LibLSS::HMCLet::SimpleSampler<MassMatrixWithBurnin<DiagonalMassMatrix>>;
template class LibLSS::HMCLet::SimpleSampler<MassMatrixWithBurnin<DenseMassMatrix>>;

View file

@ -0,0 +1,87 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/hmclet.hpp
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#ifndef __LIBLSS_HMCLET_HMCLET_HPP
#define __LIBLSS_HMCLET_HMCLET_HPP
#include <memory>
#include <boost/multi_array.hpp>
#include "libLSS/samplers/core/random_number.hpp"
#include <CosmoTool/hdf5_array.hpp>
#include "libLSS/tools/errors.hpp"
namespace LibLSS {
namespace HMCLet {
typedef boost::multi_array_ref<double, 1> VectorType;
LIBLSS_NEW_ERROR(ErrorBadGradient);
LIBLSS_NEW_ERROR(ErrorBadReject);
class JointPosterior {
public:
JointPosterior() {}
virtual ~JointPosterior() {}
virtual size_t getNumberOfParameters() const = 0;
virtual double evaluate(VectorType const &params) = 0;
virtual void adjointGradient(
VectorType const &params, VectorType &params_gradient) = 0;
};
class AbstractSimpleSampler {
public:
AbstractSimpleSampler() : maxEpsilon(0.02), maxNtime(50), momentumScale(0.0) {}
virtual void calibrate(
MPI_Communication *comm, RandomNumber &rng, size_t numSteps,
VectorType const &initial_params, VectorType const &initial_step) = 0;
virtual void newSample(
MPI_Communication *comm, RandomNumber &rng, VectorType &params) = 0;
void setMaxEpsilon(double epsilon_) { maxEpsilon = epsilon_; }
void setMaxNtime(size_t ntime_) { maxNtime = ntime_; }
void setMassScaling(double scale_) { momentumScale = scale_; }
virtual void reset() {}
double maxEpsilon;
double momentumScale;
size_t maxNtime;
};
template <typename MassType>
class SimpleSampler : public AbstractSimpleSampler {
public:
typedef MassType mass_t;
SimpleSampler(std::shared_ptr<JointPosterior> posterior);
~SimpleSampler();
virtual void calibrate(
MPI_Communication *comm, RandomNumber &rng, size_t numSteps,
VectorType const &initial_params, VectorType const &initial_step);
virtual void
newSample(MPI_Communication *comm, RandomNumber &rng, VectorType &params);
mass_t &getMass() { return massMatrix; }
protected:
size_t numParams;
mass_t massMatrix;
std::shared_ptr<JointPosterior> posterior;
boost::multi_array<double, 1> momentum;
};
} // namespace HMCLet
} // namespace LibLSS
#endif

View file

@ -0,0 +1,141 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/hmclet_qnhmc.cpp
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#include <boost/format.hpp>
#include <functional>
#include <cmath>
#include "libLSS/tools/console.hpp"
#include <CosmoTool/hdf5_array.hpp>
#include "libLSS/tools/symplectic_integrator.hpp"
#include "libLSS/tools/fusewrapper.hpp"
#include "libLSS/samplers/rgen/slice_sweep.hpp"
#include "libLSS/hmclet/hmclet_qnhmc.hpp"
#include "libLSS/tools/string_tools.hpp"
#include "libLSS/tools/itertools.hpp"
using namespace LibLSS;
using namespace LibLSS::QNHMCLet;
namespace ph = std::placeholders;
using boost::format;
constexpr static int ROOT_RANK = 0;
template <typename MassType, typename BMatrixType>
Sampler<MassType,BMatrixType>::Sampler(
std::shared_ptr<JointPosterior> _posterior)
: numParams(_posterior->getNumberOfParameters()), massMatrix(numParams),
posterior(_posterior), momentum(boost::extents[numParams]),
B(numParams) {
ConsoleContext<LOG_DEBUG> ctx("qnhmclet constructor");
fwrap(momentum) = 0;
}
template <typename MassType, typename BMatrixType>
Sampler<MassType,BMatrixType>::~Sampler() {}
template <typename MassType, typename BMatrixType>
void Sampler<MassType,BMatrixType>::newSample(
MPI_Communication *comm, RandomNumber &rgen, VectorType &params) {
ConsoleContext<LOG_DEBUG> ctx("qnhmcLet singleSampler");
auto paramSize = boost::extents[numParams];
SymplecticIntegrators integrator;
BMatrixType C(B);
boost::multi_array<double, 1> tmp_gradient(paramSize), integrateParams(paramSize),
savedMomentum(paramSize);
double Hstart, Hend, delta_H;
double epsilon;
int Ntime;
if (comm->rank() == ROOT_RANK) {
epsilon = maxEpsilon * (1 - rgen.uniform());
Ntime = 1 + int(maxNtime * rgen.uniform());
fwrap(momentum) = momentumScale * fwrap(momentum) + std::sqrt(1-momentumScale*momentumScale)* massMatrix.sample(rgen);
}
ctx.print("Momentum is " + to_string(momentum ));
comm->broadcast_t(&epsilon, 1, ROOT_RANK);
comm->broadcast_t(&Ntime, 1, ROOT_RANK);
comm->broadcast_t(momentum.data(), numParams, ROOT_RANK);
fwrap(savedMomentum) = momentum;
LibLSS::copy_array(integrateParams, params);
Hstart = posterior->evaluate(integrateParams);
// Do the integration
ctx.print(boost::format("Integrate epsilon=%g ntime=%d") % epsilon % Ntime);
try {
integrator.integrate_dense(
[this,&C,&ctx](Vector const& position, Vector& gradient) {
posterior->adjointGradient(position, gradient);
ctx.print("QN gradient " + to_string(gradient));
B.addInfo(position, gradient);
C(gradient);
ctx.print("QN[2] gradient " + to_string(gradient));
},
[this,&C](Vector const& p, auto& tmp_p) {
fwrap(tmp_p) = massMatrix(p);
C(tmp_p);
return fwrap(tmp_p);
}, epsilon, Ntime, integrateParams, momentum, tmp_gradient
);
Hend = posterior->evaluate(integrateParams);
double delta_Ekin;
{
auto p = fwrap(momentum);
auto old_p = fwrap(savedMomentum);
delta_Ekin = (0.5 * (p - old_p) * massMatrix(p + old_p)).sum();
}
delta_H = Hend - Hstart + delta_Ekin;
double log_u;
if (comm->rank() == ROOT_RANK)
log_u = std::log(1 - rgen.uniform());
comm->broadcast_t(&log_u, 1, ROOT_RANK);
ctx.print(
boost::format("deltaEkin = %g, delta_L = %g, deltaH = %g, log_u = %g") %
delta_Ekin % (Hend - Hstart) % delta_H % log_u);
if (log_u <= -delta_H) {
// Accept
LibLSS::copy_array(params, integrateParams);
ctx.print("Accept");
auto& q = B.get();
ctx.print(boost::format("B=[%g,%g;%g,%g]") % q[0][0] % q[0][1] % q[1][0] % q[1][1]);
} else {
if (std::isnan(delta_H)) {
// Try to recover by resetting completely B
throw HMCLet::ErrorBadReject("Bad integration");
} else {
// Reject
B = C; // Reset the drift matrix
}
}
} catch (HMCLet::ErrorBadGradient const &) {
ctx.print2<LOG_ERROR>(
"A bad gradient computation occured. Reject the sample");
throw HMCLet::ErrorBadReject("Bad gradient");
}
}
#include "libLSS/hmclet/diagonal_mass.hpp"
template class LibLSS::QNHMCLet::Sampler<HMCLet::DiagonalMassMatrix,BDense>;
#include "libLSS/hmclet/dense_mass.hpp"
template class LibLSS::QNHMCLet::Sampler<HMCLet::DenseMassMatrix,BDense>;

View file

@ -0,0 +1,182 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/hmclet_qnhmc.hpp
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#ifndef __LIBLSS_HMCLET_QNHMCLET_HPP
#define __LIBLSS_HMCLET_QNHMCLET_HPP
#include <memory>
#include <boost/multi_array.hpp>
#include "libLSS/samplers/core/random_number.hpp"
#include <CosmoTool/hdf5_array.hpp>
#include "libLSS/tools/errors.hpp"
#include "libLSS/hmclet/hmclet.hpp"
#include "libLSS/tools/hdf5_scalar.hpp"
#include <Eigen/Core>
namespace LibLSS {
// Implement QN-HMC algorithm
// http://auai.org/uai2016/proceedings/papers/102.pdf
namespace QNHMCLet {
using HMCLet::VectorType;
using HMCLet::JointPosterior;
class BDense {
protected:
size_t numParams;
size_t store;
boost::multi_array<double, 2> B;
boost::multi_array<double, 1> prev_theta, prev_grad_f;
Eigen::VectorXd s_k, y_k;
public:
BDense(size_t numParams_)
: numParams(numParams_),store(0),
B(boost::extents[numParams][numParams]), prev_theta(boost::extents[numParams]), prev_grad_f(boost::extents[numParams]), s_k(numParams), y_k(numParams) {
reset();
}
void reset() {
store = 0;
fwrap(prev_theta) = 0;
fwrap(prev_grad_f) = 0;
fwrap(B) = 0;
for (size_t i = 0; i < numParams; i++)
B[i][i] = 1e-5;
}
BDense(BDense const& other)
: numParams(other.numParams), store(other.store),
B(boost::extents[numParams][numParams]), prev_theta(boost::extents[numParams]), prev_grad_f(boost::extents[numParams]), s_k(numParams), y_k(numParams) {
fwrap(B) = other.B;
store = other.store;
s_k = other.s_k;
y_k = other.y_k;
fwrap(prev_theta) = other.prev_theta;
fwrap(prev_grad_f) = other.prev_grad_f;
}
BDense const& operator=(BDense const& other) {
Console::instance().c_assert(numParams == other.numParams, "Invalid B matrix state");;
//B.resize(boost::extents[numParams][numParams]);
fwrap(B) = other.B;
store = other.store;
s_k = other.s_k;
y_k = other.y_k;
fwrap(prev_theta) = other.prev_theta;
fwrap(prev_grad_f) = other.prev_grad_f;
return *this;
}
template<typename Theta, typename Gradient>
void addInfo(Theta const& theta, Gradient const& grad_f) {
auto w_prev_theta = fwrap(prev_theta);
auto w_prev_grad_f = fwrap(prev_grad_f);
auto B_map = Eigen::Map<Eigen::MatrixXd>(B.data(), numParams, numParams);
store++;
if (store == 1) {
w_prev_theta = theta;
w_prev_grad_f = grad_f;
return;
}
for (size_t i = 0; i < numParams; i++) {
s_k(i) = theta[i] - prev_theta[i];
y_k(i) = (grad_f[i] - prev_grad_f[i]);
}
double const alpha_0 = s_k.dot(y_k);
double const alpha = 1/alpha_0;
if (alpha_0*alpha_0 < 1e-5 * s_k.dot(s_k) * y_k.dot(y_k) ) {
// w_prev_theta = theta;
// w_prev_grad_f = grad_f;
Console::instance().print<LOG_DEBUG>(
boost::format("SKIPPED alpha = %lg, reduced = %lg" ) % alpha %
(alpha_0/std::sqrt(s_k.dot(s_k) * y_k.dot(y_k))));
return;
}
Console::instance().print<LOG_DEBUG>(
boost::format("alpha = %lg, s_k = %lg, y_k = %lg, reduced = %lg" ) % alpha % std::sqrt(s_k.dot(s_k)) % std::sqrt(y_k.dot(y_k)) %
(alpha_0/std::sqrt(s_k.dot(s_k) * y_k.dot(y_k))));
auto I = Eigen::MatrixXd::Identity(numParams,numParams);
Eigen::MatrixXd M = I - y_k * s_k.transpose() * alpha;
Eigen::MatrixXd N = s_k * s_k.transpose() * alpha;
B_map = M.transpose() * B_map * M;
B_map += N;
w_prev_theta = theta;
w_prev_grad_f = grad_f;
}
void operator()(boost::multi_array_ref<double,1>& x)
{
Eigen::Map<Eigen::VectorXd> m_x(x.data(), numParams);
Eigen::Map<Eigen::MatrixXd> m_B(B.data(), numParams, numParams);
m_x = m_B * m_x;
}
boost::multi_array_ref<double, 2> const& get() const { return B; }
void save(H5_CommonFileGroup& g) {
CosmoTool::hdf5_write_array(g, "B", B);
CosmoTool::hdf5_write_array(g, "prev_theta", prev_theta);
CosmoTool::hdf5_write_array(g, "prev_grad_f", prev_grad_f);
hdf5_save_scalar(g, "store", store);
}
void load(H5_CommonFileGroup& g) {
CosmoTool::hdf5_read_array(g, "B", B);
CosmoTool::hdf5_read_array(g, "prev_theta", prev_theta);
CosmoTool::hdf5_read_array(g, "prev_grad_f", prev_grad_f);
store = hdf5_load_scalar<int>(g, "store");
}
};
template <typename MassType, typename BMatrixType>
class Sampler : public HMCLet::AbstractSimpleSampler {
public:
typedef MassType mass_t;
Sampler(std::shared_ptr<JointPosterior> posterior);
~Sampler();
virtual void
newSample(MPI_Communication *comm, RandomNumber &rng, VectorType &params);
virtual void calibrate(
MPI_Communication *comm, RandomNumber &rng, size_t numSteps,
VectorType const &initial_params, VectorType const &initial_step) {}
mass_t &getMass() { return massMatrix; }
BMatrixType& getB() { return B; }
virtual void reset() {
Console::instance().print<LOG_DEBUG>("Resetting QN-HMC"); B.reset();
fwrap(momentum) = 0;
}
protected:
size_t numParams;
mass_t massMatrix;
BMatrixType B;
std::shared_ptr<JointPosterior> posterior;
typedef VectorType Vector;
boost::multi_array<double, 1> momentum;
};
} // namespace QNHMCLet
} // namespace LibLSS
#endif

View file

@ -0,0 +1,343 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/julia_hmclet.cpp
Copyright (C) 2014-2020 2018-2019 <guilhem.lavaux@iap.fr>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#include <string>
#include <boost/format.hpp>
#include "libLSS/mpi/generic_mpi.hpp"
#include "libLSS/julia/julia.hpp"
#include "libLSS/julia/julia_mcmc.hpp"
#include "libLSS/hmclet/julia_hmclet.hpp"
#include "libLSS/hmclet/hmclet_qnhmc.hpp"
#include "libLSS/samplers/core/types_samplers.hpp"
#include "libLSS/samplers/rgen/slice_sweep.hpp"
#include "libLSS/samplers/julia/julia_likelihood.hpp"
#include "libLSS/julia/julia_ghosts.hpp"
#include "libLSS/julia/julia_array.hpp"
#include "libLSS/hmclet/diagonal_mass.hpp"
#include "libLSS/hmclet/dense_mass.hpp"
#include "libLSS/hmclet/mass_burnin.hpp"
#include "libLSS/tools/itertools.hpp"
#include "libLSS/hmclet/mass_saver.hpp"
using namespace LibLSS;
using namespace LibLSS::JuliaLikelihood;
using namespace LibLSS::JuliaHmclet::details;
using boost::format;
using LibLSS::Julia::helpers::_r;
static constexpr int ROOT_RANK = 0;
// ----------------------------------------------------------------------------
// JuliaHmcletMeta
JuliaHmcletMeta::JuliaHmcletMeta(
MPI_Communication *comm_, std::shared_ptr<JuliaDensityLikelihood> likelihood_,
const std::string &likelihood_module_, MatrixType matrixType,
size_t burnin_, size_t memorySize_, double limiter_, bool frozen_)
: MarkovSampler(), comm(comm_), module_name(likelihood_module_),
likelihood(likelihood_), massMatrixType(matrixType),
burnin(burnin_), memorySize(memorySize_), limiter(limiter_), frozen(frozen_) {
ConsoleContext<LOG_INFO> ctx("JuliaHmcletMeta::JuliaHmcletMeta");
}
JuliaHmcletMeta::~JuliaHmcletMeta() {}
void JuliaHmcletMeta::initialize(MarkovState &state) { restore(state); }
static void julia_helper_diagonal_mass_matrix(
std::string const &module_name,
std::unique_ptr<AbstractSimpleSampler> &hmc_, Julia::Object &jl_state,
size_t burnin, size_t memorySize, bool frozen) {
auto hmc =
dynamic_cast<SimpleSampler<MassMatrixWithBurnin<DiagonalMassMatrix>> *>(
hmc_.get());
Julia::Object jl_mass =
Julia::invoke(module_name + ".fill_diagonal_mass_matrix", jl_state);
auto mass = jl_mass.unbox_array<double, 1>();
auto &hmc_mass = hmc->getMass();
hmc_mass.setInitialMass(mass);
hmc_mass.clear();
hmc_mass.setBurninMax(burnin);
hmc_mass.setMemorySize(memorySize);
if (frozen)
hmc_mass.freeze();
}
static void julia_helper_diagonal_mass_matrix_qn(
std::string const &module_name,
std::unique_ptr<AbstractSimpleSampler> &hmc_, Julia::Object &jl_state) {
Console::instance().print<LOG_DEBUG>("Initializing mass matrix QN");
auto hmc =
dynamic_cast<QNHMCLet::Sampler<DiagonalMassMatrix, QNHMCLet::BDense> *>(
hmc_.get());
Julia::Object jl_mass =
Julia::invoke(module_name + ".fill_diagonal_mass_matrix", jl_state);
auto mass = jl_mass.unbox_array<double, 1>();
Console::instance().print<LOG_DEBUG>("Got some mass-> " + to_string(mass));
auto &hmc_mass = hmc->getMass();
hmc_mass.setInitialMass(mass);
hmc_mass.clear();
hmc_mass.freeze();
}
static void julia_helper_dense_mass_matrix(
std::string const &module_name,
std::unique_ptr<AbstractSimpleSampler> &hmc_, Julia::Object &jl_state,
size_t burnin, size_t memorySize, double limiter, bool frozen) {
auto hmc =
dynamic_cast<SimpleSampler<MassMatrixWithBurnin<DenseMassMatrix>> *>(
hmc_.get());
Julia::Object jl_mass =
Julia::invoke(module_name + ".fill_dense_mass_matrix", jl_state);
auto mass = jl_mass.unbox_array<double, 2>();
auto &hmc_mass = hmc->getMass();
Console::instance().print<LOG_INFO>("Setup IC mass matrix");
hmc_mass.setInitialMass(mass);
hmc_mass.clear();
hmc_mass.setBurninMax(burnin);
hmc_mass.setMemorySize(memorySize);
hmc_mass.setCorrelationLimiter(limiter);
if (frozen)
hmc_mass.freeze();
}
std::tuple<samplerBuilder_t, massMatrixInit_t>
JuliaHmcletMeta::getAdequateSampler() {
ConsoleContext<LOG_VERBOSE> ctx("JuliaHmcletMeta::getAdequateSampler");
samplerBuilder_t f;
massMatrixInit_t f2;
if (massMatrixType == DIAGONAL) {
ctx.print("Using DIAGONAL mass matrix");
f = [](std::shared_ptr<JuliaHmcletPosterior> &posterior, MarkovState &state,
std::string const &name) {
typedef SimpleSampler<MassMatrixWithBurnin<DiagonalMassMatrix>> sampler_t;
auto sampler = std::unique_ptr<sampler_t>(new sampler_t(posterior));
add_saver(state, name, sampler);
return sampler;
};
f2 = std::bind(
&julia_helper_diagonal_mass_matrix, module_name, std::placeholders::_1,
std::placeholders::_2, burnin, memorySize, frozen);
} else if (massMatrixType == QN_DIAGONAL) {
f = [](std::shared_ptr<JuliaHmcletPosterior> &posterior, MarkovState &state,
std::string const &name) {
typedef QNHMCLet::Sampler<DiagonalMassMatrix,QNHMCLet::BDense> sampler_t;
auto sampler = std::unique_ptr<sampler_t>(new sampler_t(posterior));
add_saver(state, name, sampler);
return sampler;
};
f2 = std::bind(
&julia_helper_diagonal_mass_matrix_qn, module_name, std::placeholders::_1,
std::placeholders::_2);
} else if (massMatrixType == DENSE) {
ctx.print("Using DENSE mass matrix");
f = [](std::shared_ptr<JuliaHmcletPosterior> &posterior, MarkovState &state,
std::string const &name) {
typedef SimpleSampler<MassMatrixWithBurnin<DenseMassMatrix>> sampler_t;
auto sampler = std::unique_ptr<sampler_t>(new sampler_t(posterior));
add_saver(state, name, sampler);
return sampler;
};
f2 = std::bind(
&julia_helper_dense_mass_matrix, module_name, std::placeholders::_1,
std::placeholders::_2, burnin, memorySize, limiter, frozen);
}
return std::make_tuple(f, f2);
}
void JuliaHmcletMeta::restore(MarkovState &state) {
ConsoleContext<LOG_INFO> ctx("JuliaHmcletMeta::restore");
N0 = state.getScalar<long>("N0");
N1 = state.getScalar<long>("N1");
N2 = state.getScalar<long>("N2");
Ncatalog = state.getScalar<long>("NCAT");
FFTW_Manager_3d<double> mgr(N0, N1, N2, comm);
N2real = mgr.N2real;
localN0 = mgr.localN0;
long startN0 = mgr.startN0;
Julia::Object plane_array =
Julia::invoke(query_planes(module_name), Julia::pack(state));
auto planes = plane_array.unbox_array<uint64_t, 1>();
std::vector<size_t> owned_planes(localN0);
for (size_t i = 0; i < localN0; i++)
owned_planes[i] = startN0 + i;
ghosts.setup(comm, planes, owned_planes, std::array<size_t, 2>{N1, N2}, N0);
ctx.print("Resize posteriors");
posteriors.resize(Ncatalog);
hmcs.resize(Ncatalog);
samplerBuilder_t samplerBuilder;
state.newScalar<int>("hmclet_badreject", 0, true);
std::tie(samplerBuilder, massMatrixInit) = getAdequateSampler();
ctx.print("Register to likelihood post init");
likelihood->getPendingInit().ready([this, &state, samplerBuilder]() {
ConsoleContext<LOG_INFO> ctx2("JuliaHmcletMeta::restore::post_init");
for (size_t c = 0; c < Ncatalog; c++) {
auto &bias = *state.get<ArrayType1d>(format("galaxy_bias_%d") % c)->array;
ctx2.print("Make posterior");
posteriors[c] = std::make_shared<JuliaHmcletPosterior>(
comm, module_name, c, bias.size());
ctx2.print("Make hmclet");
hmcs[c] = samplerBuilder(
posteriors[c], state, str(format("galaxy_hmclet_%d") % c));
}
ready_hmclet.submit_ready();
});
}
void JuliaHmcletMeta::sample(MarkovState &state) {
ConsoleContext<LOG_VERBOSE> ctx("JuliaHmcletMeta::sample");
if (state.getScalar<bool>("bias_sampler_blocked"))
return;
Julia::Object jl_density;
auto &out_density = *state.get<ArrayType>("BORG_final_density")->array;
auto jl_state = Julia::pack(state);
long MCMC_STEP = state.getScalar<long>("MCMC_STEP");
RandomGen *rgen = state.get<RandomGen>("random_generator");
// Now we gather all the required planes on this node and dispatch
// our data to peers.
ghosts.synchronize(out_density);
Julia::Object jl_ghosts = Julia::newGhostManager(&ghosts, N2);
jl_density.box_array(out_density);
Julia::Object v_density =
Julia::view_array<3>(jl_density, {_r(1, localN0), _r(1, N1), _r(1, N2)});
if (MCMC_STEP == 0) {
for (size_t cat_idx = 0; cat_idx < Ncatalog; cat_idx++) {
VectorType &bias =
*(state.get<ArrayType1d>(format("galaxy_bias_%d") % cat_idx)->array);
if (!massMatrixInit) {
error_helper<ErrorBadState>(
"No mass matrix initializer provided to JuliaHmclet");
}
try {
massMatrixInit(hmcs[cat_idx], jl_state);
} catch (Julia::JuliaException const &) {
ctx.print2<LOG_WARNING>("Mass matrix not provided. Auto-seeding.");
size_t Nbias = bias.size();
boost::multi_array<double, 1> initial_step(boost::extents[Nbias]);
for (size_t j = 0; j < Nbias; j++)
initial_step[j] =
Julia::invoke(
module_name + ".get_step_hint", jl_state, cat_idx, j)
.unbox<double>();
posteriors[cat_idx]->updateGhosts(jl_ghosts);
posteriors[cat_idx]->updateState(jl_state, v_density);
hmcs[cat_idx]->calibrate(comm, rgen->get(), 10, bias, initial_step);
}
}
ctx.print("Done initializing mass matrix");
}
for (size_t cat_idx = 0; cat_idx < Ncatalog; cat_idx++) {
posteriors[cat_idx]->updateGhosts(jl_ghosts);
posteriors[cat_idx]->updateState(jl_state, v_density);
try {
hmcs[cat_idx]->newSample(
comm, rgen->get(),
*state.get<ArrayType1d>(format("galaxy_bias_%d") % cat_idx)->array);
} catch (LibLSS::HMCLet::ErrorBadReject const& e) {
state.getScalar<int>("hmclet_badreject")++;
ctx.print2<LOG_ERROR>("Bad reject. Note down and reset the hmc");
hmcs[cat_idx]->reset();
}
}
// Do not use posteriors beyond this without reupdating all arrays.
}
// ----------------------------------------------------------------------------
// JuliaHmcletPosterior
size_t JuliaHmcletPosterior::getNumberOfParameters() const {
return numBiasParams;
}
double JuliaHmcletPosterior::evaluate(VectorType const &params) {
ConsoleContext<LOG_DEBUG> ctx("JuliaHmcletPosterior::evaluate");
boost::multi_array<double, 1> a = params;
Julia::Object jl_p;
jl_p.box_array(a);
double L =
Julia::invoke(param_priors_name, *state, cat_id, jl_p).unbox<double>();
if (L == std::numeric_limits<double>::infinity())
return std::numeric_limits<double>::infinity();
L += Julia::invoke(likelihood_name, *state, *ghosts, *density, cat_id, jl_p)
.unbox<double>();
ctx.print("Reduce likelihood");
comm->all_reduce_t(MPI_IN_PLACE, &L, 1, MPI_SUM);
ctx.print("Returning L=" + to_string(L));
return L;
}
void JuliaHmcletPosterior::adjointGradient(
VectorType const &params, VectorType &params_gradient) {
ConsoleContext<LOG_DEBUG> ctx("JuliaHmcletPosterior::adjointGradient");
Julia::Object jl_p, jl_gradient;
boost::multi_array<double, 1> a(boost::extents[numBiasParams]);
int bad_gradient_count = 0;
fwrap(a) = params;
jl_p.box_array(a);
jl_gradient.box_array(params_gradient);
comm->broadcast_t(a.data(), numBiasParams, ROOT_RANK);
try {
Julia::invoke(
adjoint_name, *state, *ghosts, *density, cat_id, jl_p, jl_gradient);
} catch (Julia::JuliaException &e) {
if (Julia::isBadGradient(e))
bad_gradient_count = 1;
else
throw;
}
comm->all_reduce_t(MPI_IN_PLACE, &bad_gradient_count, 1, MPI_SUM);
if (bad_gradient_count > 0)
throw HMCLet::ErrorBadGradient("Bad gradient from Julia");
Console::instance().print<LOG_VERBOSE>("Got a gradient: " + to_string(params_gradient));
comm->all_reduce_t(
(double *)MPI_IN_PLACE, params_gradient.data(), numBiasParams, MPI_SUM);
}
// ARES TAG: authors_num = 1
// ARES TAG: name(0) = Guilhem Lavaux
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
// ARES TAG: name(0) = 2018-2019

View file

@ -0,0 +1,130 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/julia_hmclet.hpp
Copyright (C) 2014-2020 2018-2019 <guilhem.lavaux@iap.fr>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#ifndef __LIBLSS_JULIA_HMCLET_HPP
# define __LIBLSS_JULIA_HMCLET_HPP
# include <memory>
# include "libLSS/samplers/core/markov.hpp"
# include "libLSS/julia/julia.hpp"
# include "libLSS/samplers/julia/julia_likelihood.hpp"
# include "libLSS/hmclet/hmclet.hpp"
# include "libLSS/tools/mpi/ghost_planes.hpp"
namespace LibLSS {
namespace JuliaHmclet {
namespace types {
typedef ArrayType1d::ArrayType bias_t;
enum MatrixType {
DIAGONAL, DENSE, QN_DIAGONAL
};
}
namespace details {
using namespace types;
using namespace HMCLet;
class JuliaHmcletPosterior : virtual public JointPosterior {
protected:
MPI_Communication *comm;
std::string likelihood_module;
std::string likelihood_name;
std::string adjoint_name;
size_t cat_id;
Julia::Object *density;
Julia::Object *state;
Julia::Object *ghosts;
size_t numBiasParams;
std::string param_priors_name;
public:
JuliaHmcletPosterior(
MPI_Communication *comm_, const std::string likelihood_module_,
size_t cat_id_, size_t numBiasParams_)
: comm(comm_), likelihood_module(likelihood_module_),
likelihood_name(
JuliaLikelihood::likelihood_evaluate_bias(likelihood_module)),
adjoint_name(
JuliaLikelihood::likelihood_adjoint_bias(likelihood_module)),
cat_id(cat_id_), numBiasParams(numBiasParams_),
param_priors_name(likelihood_module + ".log_prior_bias") {}
virtual ~JuliaHmcletPosterior() {}
// We try to save a bit of julia stack protection.
void updateGhosts(Julia::Object &ghosts_) { ghosts = &ghosts_; }
void updateState(Julia::Object &state_, Julia::Object &density_) {
state = &state_;
density = &density_;
}
virtual size_t getNumberOfParameters() const;
virtual double evaluate(VectorType const &params);
virtual void
adjointGradient(VectorType const &params, VectorType &params_gradient);
};
typedef std::function<std::unique_ptr<AbstractSimpleSampler>(
std::shared_ptr<JuliaHmcletPosterior> &, MarkovState &,
std::string const &)>
samplerBuilder_t;
typedef std::function<void(
std::unique_ptr<AbstractSimpleSampler> &, Julia::Object &)>
massMatrixInit_t;
class JuliaHmcletMeta : virtual public MarkovSampler {
protected:
MPI_Communication *comm;
std::string module_name;
typedef HMCLet::AbstractSimpleSampler sampler_t;
typedef std::unique_ptr<sampler_t> SimpleSampler_p;
typedef std::vector<SimpleSampler_p> SimpleSampler_pv;
std::vector<std::shared_ptr<details::JuliaHmcletPosterior>> posteriors;
SimpleSampler_pv hmcs;
size_t Ncatalog, N0, N1, N2, N2real, localN0;
GhostPlanes<double, 2> ghosts;
std::shared_ptr<JuliaDensityLikelihood> likelihood;
Defer ready_hmclet;
MatrixType massMatrixType;
size_t burnin;
size_t memorySize;
double limiter;
bool frozen;
massMatrixInit_t massMatrixInit;
std::tuple<samplerBuilder_t, massMatrixInit_t> getAdequateSampler();
public:
JuliaHmcletMeta(
MPI_Communication *comm, std::shared_ptr<JuliaDensityLikelihood> likelihood_,
const std::string &likelihood_module, MatrixType massMatrixType_,
size_t burnin, size_t memorySize, double limiter, bool frozen);
~JuliaHmcletMeta();
Defer &postinit() { return ready_hmclet; }
SimpleSampler_pv &hmclets() { return hmcs; }
virtual void initialize(MarkovState &state);
virtual void restore(MarkovState &state);
virtual void sample(MarkovState &state);
};
} // namespace details
} // namespace JuliaHmclet
using JuliaHmclet::details::JuliaHmcletMeta;
} // namespace LibLSS
#endif
// ARES TAG: authors_num = 1
// ARES TAG: name(0) = Guilhem Lavaux
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
// ARES TAG: name(0) = 2018-2019

View file

@ -0,0 +1,168 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/julia_slice.cpp
Copyright (C) 2014-2020 2018-2019 <guilhem.lavaux@iap.fr>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#include <string>
#include "libLSS/mpi/generic_mpi.hpp"
#include "libLSS/julia/julia.hpp"
#include "libLSS/julia/julia_mcmc.hpp"
#include "libLSS/hmclet/julia_slice.hpp"
#include "libLSS/samplers/core/types_samplers.hpp"
#include "libLSS/samplers/rgen/slice_sweep.hpp"
#include "libLSS/samplers/julia/julia_likelihood.hpp"
#include "libLSS/julia/julia_ghosts.hpp"
#include "libLSS/julia/julia_array.hpp"
#include "libLSS/hmclet/mass_saver.hpp"
using namespace LibLSS;
using namespace LibLSS::JuliaLikelihood;
using LibLSS::Julia::helpers::_r;
JuliaMetaSlice::JuliaMetaSlice(
MPI_Communication *comm_, const std::string &likelihood_module_,
std::shared_ptr<JuliaDensityLikelihood> likelihood_, size_t burnin_, size_t memorySize_)
: MarkovSampler(), module_name(likelihood_module_), comm(comm_),
likelihood(likelihood_), burnin(burnin_), memorySize(memorySize_) {}
JuliaMetaSlice::~JuliaMetaSlice() {}
void JuliaMetaSlice::initialize(MarkovState &state) { restore(state); }
void JuliaMetaSlice::restore(MarkovState &state) {
N0 = state.getScalar<long>("N0");
N1 = state.getScalar<long>("N1");
N2 = state.getScalar<long>("N2");
N2real = state.getScalar<long>("N2real");
localN0 = state.getScalar<long>("localN0");
Ncatalog = state.getScalar<long>("NCAT");
Julia::Object plane_array =
Julia::invoke(query_planes(module_name), Julia::pack(state));
auto planes = plane_array.unbox_array<uint64_t, 1>();
std::vector<size_t> owned_planes(localN0);
for (size_t i = 0; i < localN0; i++)
owned_planes[i] = startN0 + i;
// Create and introduce the covariance matrix in the state.
// However this matrix is fully owned by JuliaMetaSlice. Only the saver
// is introduced as a mechanism to automatically save/restore the matrix.
//
likelihood->getPendingInit().ready([this, &state]() {
covariances.clear();
for (size_t i = 0; i < Ncatalog; i++) {
auto &bias =
*state.get<ArrayType1d>(boost::format("galaxy_bias_%d") % i)->array;
size_t numParams = bias.size();
auto covar = std::shared_ptr<mass_t>(new mass_t(numParams));
auto obj = new ObjectStateElement<HMCLet::MassSaver<mass_t>, true>();
obj->obj = new HMCLet::MassSaver<mass_t>(*covar.get());
state.newElement(boost::str(boost::format("galaxy_slice_%d") % i), obj, true);
covariances.push_back(covar);
Julia::Object jl_mass =
Julia::invoke(module_name + ".fill_dense_mass_matrix", Julia::pack(state));
auto mass = jl_mass.unbox_array<double, 2>();
Console::instance().print<LOG_INFO>("Setup IC mass matrix");
covar->setInitialMass(mass);
covar->clear();
covar->setBurninMax(burnin);
covar->setMemorySize(memorySize);
covar->setCorrelationLimiter(0.001); // The minimum to avoid blow up
}
});
ghosts.setup(
comm, planes, owned_planes, std::array<size_t, 2>{N1, N2real}, N0);
}
void JuliaMetaSlice::sample(MarkovState &state) {
using namespace Eigen;
ConsoleContext<LOG_VERBOSE> ctx("JuliaMetaSlice::sample");
Julia::Object jl_density;
if (state.getScalar<bool>("bias_sampler_blocked"))
return;
auto &out_density = *state.get<ArrayType>("BORG_final_density")->array;
auto jl_state = Julia::pack(state);
// Now we gather all the required planes on this node and dispatch
// our data to peers.
ghosts.synchronize(out_density);
RandomGen *rgen = state.get<RandomGen>("random_generator");
auto jl_ghosts = Julia::newGhostManager(&ghosts, N2);
jl_density.box_array(out_density);
std::string likelihood_name = likelihood_evaluate_bias(module_name);
std::string param_priors_name = module_name + ".log_prior_bias";
auto v_density =
Julia::view_array<3>(jl_density, {_r(1, localN0), _r(1, N1), _r(1, N2)});
for (int cat_idx = 0; cat_idx < Ncatalog; cat_idx++) {
auto &bias = *state.get<ArrayType1d>(galaxy_bias_name(cat_idx))->array;
size_t Nbiases = bias.size();
Map<VectorXd> current_bias(bias.data(), Nbiases);
VectorXd transformed_bias(Nbiases);
VectorXd new_transformed_bias(Nbiases);
boost::multi_array_ref<double, 1> new_bias(
&new_transformed_bias(0), boost::extents[Nbiases]);
Julia::Object jl_bias;
jl_bias.box_array(new_bias);
covariances[cat_idx]->computeMainComponents();
auto mean = covariances[cat_idx]->getMean();
auto components = covariances[cat_idx]->components();
transformed_bias.noalias() = components.adjoint() * (current_bias - mean);
for (int j = 0; j < Nbiases; j++) {
ctx.print(boost::format("catalog %d / bias %d") % cat_idx % j);
auto likelihood = [&, this, j, cat_idx](double x) -> double {
new_transformed_bias = transformed_bias;
new_transformed_bias(j) = x;
new_transformed_bias = components * new_transformed_bias + mean;
double L = Julia::invoke(
likelihood_name, jl_state, jl_ghosts, v_density, cat_idx,
jl_bias)
.unbox<double>();
ctx.print("Reduce likelihood");
comm->all_reduce_t(MPI_IN_PLACE, &L, 1, MPI_SUM);
ctx.print("Returning L=" + to_string(L));
L += Julia::invoke(param_priors_name, jl_state, cat_idx, jl_bias)
.unbox<double>();
return -L;
};
double step =
Julia::invoke(module_name + ".get_step_hint", jl_state, cat_idx, j)
.unbox<double>();
ctx.print("Advised step is " + to_string(step));
transformed_bias(j) = slice_sweep(
comm, rgen->get(), likelihood, transformed_bias(j), step, 0);
}
new_transformed_bias = components * transformed_bias + mean;
current_bias = new_transformed_bias;
covariances[cat_idx]->addMass(bias);
}
}
// ARES TAG: authors_num = 1
// ARES TAG: name(0) = Guilhem Lavaux
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
// ARES TAG: name(0) = 2018-2019

View file

@ -0,0 +1,53 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/julia_slice.hpp
Copyright (C) 2014-2020 2018-2019 <guilhem.lavaux@iap.fr>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#ifndef __JULIA_META_SLICE_HPP
# define __JULIA_META_SLICE_HPP
# include <vector>
# include "libLSS/samplers/core/markov.hpp"
# include "libLSS/tools/mpi/ghost_planes.hpp"
# include "libLSS/hmclet/mass_burnin.hpp"
# include "libLSS/hmclet/dense_mass.hpp"
# include "libLSS/samplers/julia/julia_likelihood.hpp"
namespace LibLSS {
class JuliaMetaSlice : public MarkovSampler {
protected:
GhostPlanes<double, 2> ghosts;
std::string module_name;
size_t N0, N1, N2, N2real, localN0, startN0, Ncatalog;
MPI_Communication *comm;
typedef HMCLet::MassMatrixWithBurnin<HMCLet::DenseMassMatrix> mass_t;
std::vector<std::shared_ptr<mass_t>> covariances;
std::shared_ptr<JuliaDensityLikelihood> likelihood;
size_t burnin, memorySize;
public:
JuliaMetaSlice(
MPI_Communication *comm, const std::string &likelihood_module,
std::shared_ptr<JuliaDensityLikelihood> likelihood_, size_t burnin_,
size_t memorySize_);
~JuliaMetaSlice();
virtual void initialize(MarkovState &state);
virtual void restore(MarkovState &state);
virtual void sample(MarkovState &state);
};
} // namespace LibLSS
#endif
// ARES TAG: authors_num = 1
// ARES TAG: name(0) = Guilhem Lavaux
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
// ARES TAG: name(0) = 2018-2019

View file

@ -0,0 +1,102 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/mass_burnin.cpp
Copyright (C) 2014-2020 2019 <guilhem.lavaux@iap.fr>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#include <boost/format.hpp>
#include <functional>
#include <cmath>
#include "libLSS/tools/console.hpp"
#include <CosmoTool/hdf5_array.hpp>
#include "libLSS/tools/symplectic_integrator.hpp"
#include "libLSS/tools/fusewrapper.hpp"
#include "libLSS/samplers/rgen/slice_sweep.hpp"
#include "libLSS/hmclet/mass_burnin.hpp"
#include "libLSS/tools/string_tools.hpp"
#include "libLSS/tools/hdf5_scalar.hpp"
#include "libLSS/tools/itertools.hpp"
using namespace LibLSS;
using namespace LibLSS::HMCLet;
namespace ph = std::placeholders;
using boost::format;
template <typename Matrix>
void MassMatrixWithBurnin<Matrix>::saveMass(CosmoTool::H5_CommonFileGroup &g) {
super_t::saveMass(g);
hdf5_save_scalar(g, "stepID", stepID);
Console::instance().print<LOG_VERBOSE>("Handling memory");
for (auto m : itertools::enumerate(memory)) {
int id = m.template get<0>();
auto const &a = m.template get<1>();
std::string s = str(boost::format("memory_%d") % id);
Console::instance().print<LOG_VERBOSE>(
boost::format("Saving memory %d / %s") % id % s);
CosmoTool::hdf5_write_array(g, s, a);
}
}
template <typename Matrix>
void MassMatrixWithBurnin<Matrix>::loadMass(CosmoTool::H5_CommonFileGroup &g) {
super_t::loadMass(g);
stepID = hdf5_load_scalar<size_t>(g, "stepID");
if (stepID > burninMaxIteration)
return;
memory.clear();
for (auto r : itertools::range(0, memorySize)) {
boost::multi_array<double, 1> m;
try {
CosmoTool::hdf5_read_array(g, str(boost::format("memory_%d") % r), m);
} catch (H5::Exception) {
break;
}
memory.push_back(m);
}
}
template <typename Matrix>
void MassMatrixWithBurnin<Matrix>::clear() {
super_t::clear();
memory.clear();
}
template <typename Matrix>
void MassMatrixWithBurnin<Matrix>::addMass(VectorType const &params) {
stepID++;
// If burnin is done, just proceed normally.
if (stepID > burninMaxIteration) {
// memory.clear();
// super_t::addMass(params);
return;
}
memory.push_back(params);
if (memory.size() > memorySize) {
memory.pop_front();
super_t::clear();
// Very dumb algorithm
for (auto &old_params : memory)
super_t::addMass(old_params);
} else {
super_t::addMass(params);
}
}
#include "libLSS/hmclet/diagonal_mass.hpp"
template class LibLSS::HMCLet::MassMatrixWithBurnin<DiagonalMassMatrix>;
#include "libLSS/hmclet/dense_mass.hpp"
template class LibLSS::HMCLet::MassMatrixWithBurnin<DenseMassMatrix>;
// ARES TAG: authors_num = 1
// ARES TAG: name(0) = Guilhem Lavaux
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
// ARES TAG: name(0) = 2019

View file

@ -0,0 +1,55 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/mass_burnin.hpp
Copyright (C) 2014-2020 2019 <guilhem.lavaux@iap.fr>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#ifndef __LIBLSS_HMCLET_DIAGONAL_MASS_BURNIN_HPP
# define __LIBLSS_HMCLET_DIAGONAL_MASS_BURNIN_HPP
# include <memory>
# include <boost/multi_array.hpp>
# include "libLSS/samplers/core/random_number.hpp"
# include <CosmoTool/hdf5_array.hpp>
# include "libLSS/tools/errors.hpp"
# include "libLSS/hmclet/hmclet.hpp"
namespace LibLSS {
namespace HMCLet {
template <typename Matrix>
class MassMatrixWithBurnin : public Matrix {
protected:
typedef Matrix super_t;
size_t memorySize;
size_t burninMaxIteration;
size_t stepID;
std::list<boost::multi_array<double, 1>> memory;
public:
MassMatrixWithBurnin(size_t numParams_)
: super_t(numParams_), memorySize(50), burninMaxIteration(300),
stepID(0) {}
void setMemorySize(size_t sz) { memorySize = sz; }
void setBurninMax(size_t maxIteration) {
burninMaxIteration = maxIteration;
}
void saveMass(CosmoTool::H5_CommonFileGroup &g);
void loadMass(CosmoTool::H5_CommonFileGroup &g);
void addMass(VectorType const &params);
void clear();
};
} // namespace HMCLet
} // namespace LibLSS
#endif
// ARES TAG: authors_num = 1
// ARES TAG: name(0) = Guilhem Lavaux
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
// ARES TAG: name(0) = 2019

View file

@ -0,0 +1,62 @@
#ifndef __LIBLSS_HMCLET_MASS_SAVER_HPP
#define __LIBLSS_HMCLET_MASS_SAVER_HPP
#include <CosmoTool/hdf5_array.hpp>
#include "libLSS/hmclet/hmclet.hpp"
#include "libLSS/hmclet/hmclet_qnhmc.hpp"
namespace LibLSS {
namespace HMCLet {
template <typename Mass_t>
struct MassSaver {
Mass_t &mass;
MassSaver(Mass_t &mass_) : mass(mass_) {}
void save(CosmoTool::H5_CommonFileGroup &fg) { mass.saveMass(fg); }
void restore(CosmoTool::H5_CommonFileGroup &fg) { mass.loadMass(fg); }
};
template <typename Mass_t, typename BMass_t>
struct QNMassSaver {
Mass_t &mass;
BMass_t &B;
QNMassSaver(Mass_t &mass_, BMass_t& b_) : mass(mass_), B(b_) {}
void save(CosmoTool::H5_CommonFileGroup &fg) { mass.saveMass(fg); B.save(fg); }
void restore(CosmoTool::H5_CommonFileGroup &fg) { mass.loadMass(fg); B.load(fg); }
};
template <typename Mass_t>
static void add_saver(
MarkovState &state, std::string const &name,
std::unique_ptr<SimpleSampler<Mass_t>> &sampler) {
Console::instance().print<LOG_DEBUG>(
"Creating a saver for the mass matrix in " + name);
auto obj_elt = new ObjectStateElement<MassSaver<Mass_t>, true>();
obj_elt->obj = new MassSaver<Mass_t>(sampler->getMass());
state.newElement(name, obj_elt, true);
}
template <typename Mass_t>
static void add_saver(
MarkovState &state, std::string const &name,
std::unique_ptr<QNHMCLet::Sampler<Mass_t,QNHMCLet::BDense>> &sampler) {
Console::instance().print<LOG_DEBUG>(
"Creating a saver for the QN mass matrix in " + name);
auto obj_elt = new ObjectStateElement<QNMassSaver<Mass_t,QNHMCLet::BDense>, true>();
obj_elt->obj = new QNMassSaver<Mass_t,QNHMCLet::BDense>(sampler->getMass(), sampler->getB());
state.newElement(name, obj_elt, true);
}
} // namespace HMCLet
} // namespace LibLSS
#endif

View file

@ -0,0 +1,118 @@
module convHMC
using TensorFlow
sess = 0; p = 0; δ = 0; g = 0; s = 0; n = 0; sel = 0; loss = 0; error = 0; ag = 0; output = 0;
function isotropic_weights(params, C0, C1, C2)
out_edge = stack([params[4], params[3], params[4]])
out_face = stack([params[3], params[2], params[3]])
inner = stack([params[2], params[1], params[2]])
face = stack([out_edge, out_face, out_edge])
middle = stack([out_face, inner, out_face])
return reshape(stack([face, middle, face]), (C0, C1, C2, 1, 1))
end
function get_isotropic_weights(num_layers, kernel)
w = Array{Any}(num_layers)
b = Array{Any}(num_layers)
for i = 1:num_layers
w[i] = isotropic_weights(p[(i - 1) * 5 + 1: (i - 1) * 5 + 4], kernel[1], kernel[2], kernel[3])
b[i] = p[i * 5]
end
return w, b
end
function get_3d_conv(num_layers, kernel)
w = Array{Any}(num_layers)
b = Array{Any}(num_layers)
for i = 1:num_layers
w[i] = reshape(p[(i - 1) * 28 + 1: (i - 1) * 28 + 27], (kernel[1], kernel[2], kernel[3], 1, 1))
b[i] = p[i * 28]
end
return w, b
end
function convolutional_network(x, w, b, num_layers, N0, N1, N2)
for i = 1:num_layers
x = nn.relu(nn.conv3d(x, w[i], strides = [1, 1, 1, 1, 1], padding = "SAME") + b[i]) + x
end
x = nn.relu(x)
return reshape(x, (N0, N1, N2))
end
function mse(x, g_, s_, n_, sel_, loss_params)
N0 = loss_params[1]
N1 = loss_params[2]
N2 = loss_params[3]
x = boolean_mask(reshape(x, N0 * N1 * N2), sel_)
return reduce_sum(0.5 * (multiply(x, s_) - g_)^2. / multiply(n_, s_) + 0.5 * log(n_))
end
function get_poisson_bias(_, __)
return -99, -99
end
function no_network(x, _, __, ___, ____, _____, ______)
return x
end
function poisson_bias(x, g_, s_, n_, sel_, loss_params)
N0 = loss_params[1]
N1 = loss_params[2]
N2 = loss_params[3]
x = boolean_mask(reshape(x, N0 * N1 * N2), sel_)
return reduce_sum((g_ .- s_ .* ( .- p[1] .* x)).^2. / (s_ .* n_))
end
function setup(num_layers, N0, N1, N2, num_params, extras, loss_params, network, get_variables, Λ)
global sess, p, δ, g, s, n, sel, output, loss, ag, error
sess = Session();
p = placeholder(Float64, shape = [num_params])
δ = placeholder(Float64, shape = [N0, N1, N2])
δ_ = reshape(δ, (1, N0, N1, N2, 1))
sel = placeholder(Bool, shape = [N0, N1, N2])
sel_ = reshape(sel, N0 * N1 * N2)
g = placeholder(Float64, shape = [N0, N1, N2])
g_ = boolean_mask(reshape(g, N0 * N1 * N2), sel_)
s = placeholder(Float64, shape = [N0, N1, N2])
s_ = boolean_mask(reshape(s, N0 * N1 * N2), sel_)
n = placeholder(Float64, shape = [1])
n_ = n[1]
w, b = get_variables(num_layers, extras)
output = network(δ_, w, b, num_layers, N0, N1, N2)
loss = Λ(output, g_, s_, n_, sel_, loss_params)
ag = gradients(loss, δ)
#error = gradients(loss, p)
run(sess, global_variables_initializer())
end
function evaluate(params, field, galaxy, selection, noise, mask)
return run(sess, loss, Dict(p => params, δ => field, g => galaxy, s => selection, n => [noise], sel => mask))
end
function adjointGradient(params, field, galaxy, selection, noise, mask)
return run(sess, ag, Dict(p => params, δ => field, g => galaxy, s => selection, n => [noise], sel => mask))
end
#function adjointNetworkGradient(params, field, galaxy, selection, noise, mask)
# gradient = run(sess, error, Dict(p => params, δ => field, g => galaxy, s => selection, n => [noise], sel => mask))
# params_gradient = gradient.values[gradient.indices]
# #println(params_gradient)
# #params_gradient = Array{Float64}(tot_num_conv * 5);
# #for i = 1:tot_num_conv
# # for j = 1:4
# # ind = find(x -> x == j, gradient[(i - 1) * 2 + 1].indices);
# # params_gradient[(i - 1) * 5 + j] = sum(gradient[(i - 1) * 2 + 1].values[ind]);
# # end
# # params_gradient[i * 5] = gradient[i * 2];
# #end
# return params_gradient
#end
function get_field(params, field)
return run(sess, output, Dict(p => params, δ => field));
end
end

View file

@ -0,0 +1,43 @@
using TensorFlow
using Distributions
sess = Session(Graph());
inputs = 3;
θ = placeholder(Float32, shape = [nothing, inputs])
m2lnL = placeholder(Float32, shape = [nothing])
layers = 2;
neurons_per_layer = 50;
α = 0.1;
function network(θ, layers, neurons_per_layer, α)
x = θ
weights = Array{Any}(layers + 1)
biases = Array{Any}(layers + 1)
for i=1:layers
if i == 1
weights[i] = get_variable("layer_" * string(i) * "_weights", [3, neurons_per_layer], Float32, initializer=Normal(0., sqrt(2./3.)))
biases[i] = get_variable("layer_" * string(i) * "_biases", [neurons_per_layer], Float32)
elseif i == layers
weights[i] = get_variable("layer_" * string(i) * "_weights", [neurons_per_layer, 1], Float32, initializer=Normal(0., sqrt(2./neurons_per_layer)))
biases[i] = get_variable("layer_" * string(i) * "_biases", [1], Float32)
else
weights[i] = get_variable("layer_" * string(i) * "_weights", [neurons_per_layer, neurons_per_layer], Float32, initializer=Normal(0., sqrt(2./neurons_per_layer)))
biases[i] = get_variable("layer_" * string(i) * "_biases", [neurons_per_layer], Float32)
end
x = x * weights[i] + biases[i]
x = max(α * x, x)
end
x = reshape(x, (-1))
return x, weights, biases
end
output, weights, biases = network(θ, layers, neurons_per_layer, α)
loss = mean(0.5 * (output / m2lnL - 1)^2)
gradient = gradients(loss, θ);
weight_gradients = [gradients(loss, weights[i]) for i=1:layers];
bias_gradients = [gradients(loss, biases[i]) for i=1:layers];

View file

@ -0,0 +1,171 @@
#+
# ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/tests/network/TF_conv.jl
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
module network
using libLSS
import libLSS.State
import libLSS.GhostPlanes, libLSS.get_ghost_plane
import libLSS.print, libLSS.LOG_INFO, libLSS.LOG_VERBOSE, libLSS.LOG_DEBUG
using TensorFlow
sess = Session(Graph())
p = nothing
#new_p = nothing
#assign_p = nothing
δ = nothing
g = nothing
s = nothing
mask = nothing
output = nothing
mock = nothing
loss = nothing
adgrad = nothing
wgrad = nothing
function setup(N0, number_of_parameters)
global p, new_p, assign_p, δ, g, s, mask, output, mock, loss, adgrad, wgrad
p = Array{Any}(number_of_parameters)
#new_p = Array{Any}(number_of_parameters)
#assign_p = Array{Any}(number_of_parameters)
for i=1:number_of_parameters
p[i] = placeholder(Float64, shape = [])
#p[i] = Variable(zeros(Float64, 1))
#new_p[i] = placeholder(Float64, shape = [])
#assign_p[i] = assign(p[i], expand_dims(new_p[i], 1))
end
δ = placeholder(Float64, shape = [N0, N0, N0])
g = placeholder(Float64, shape = [N0, N0, N0])
s = placeholder(Float64, shape = [N0, N0, N0])
mask = placeholder(Bool, shape = [N0, N0, N0])
output = build_network(δ, p)
mock = output .* s
loss = 0.5 * sum((boolean_mask(reshape(g, N0^3), reshape(mask, N0^3)) .- boolean_mask(reshape(s, N0^3), reshape(mask, N0^3)) .* boolean_mask(reshape(output, N0^3), reshape(mask, N0^3))).^2. ./(boolean_mask(reshape(s, N0^3), reshape(mask, N0^3)))) + 0.5 * sum(cast(mask, Float64))
adgrad = gradients(loss, δ)
wgrad = Array{Any}(number_of_parameters)
for i=1:number_of_parameters
wgrad[i]= gradients(loss, p[i])
end
run(sess, global_variables_initializer())
end
function build_network(input_tensor, weights)
α = Float64(0.01)
x = nn.conv3d(expand_dims(expand_dims(input_tensor, 4), 5), expand_dims(expand_dims(expand_dims(expand_dims(expand_dims(weights[1], 1), 2), 3), 4), 5), strides = [1, 1, 1, 1, 1], padding = "VALID")
x = x .+ weights[2]
x = max(α .* x, x)
x = nn.conv3d(x, expand_dims(expand_dims(expand_dims(expand_dims(expand_dims(weights[3], 1), 2), 3), 4), 5), strides = [1, 1, 1, 1, 1], padding = "VALID")
x = x .+ weights[4]
x = x + expand_dims(expand_dims(input_tensor, 4), 5)
x = max(α .* x, x)
x_ = nn.conv3d(x, expand_dims(expand_dims(expand_dims(expand_dims(expand_dims(weights[5], 1), 2), 3), 4), 5), strides = [1, 1, 1, 1, 1], padding = "VALID")
x_ = x_ .+ weights[6]
x_ = max(α .* x_, x_)
x_ = nn.conv3d(x_, expand_dims(expand_dims(expand_dims(expand_dims(expand_dims(weights[7], 1), 2), 3), 4), 5), strides = [1, 1, 1, 1, 1], padding = "VALID")
x_ = x_ .+ weights[8]
x_ = x_ + x
x_ = max(α .* x_, x_)
return squeeze(x_)
end
#number_of_parameters = 8
#N0 = 32
#setup(N0, number_of_parameters)
#using Distributions
#δ_ = reshape(rand(Normal(0., 1.), 32 * 32 * 32), (32, 32, 32));
#g_ = reshape(rand(Normal(0., 1.), 32 * 32 * 32), (32, 32, 32));
#p_ = zeros(number_of_parameters);
#s_ = reshape(rand(0:1, 32 * 32 * 32), (32, 32, 32));
#s_mask = s_.>0;
#using PyPlot
#imshow(squeeze(sum(δ_, 3), 3))
#imshow(squeeze(sum(g_, 3), 3))
#imshow(squeeze(sum(run(sess, output, Dict(δ=>δ_, p=>p_)), 3), (3)))
#imshow(squeeze(sum(run(sess, mock, Dict(δ=>δ_, p=>p_, s=>s_)), 3), (3)))
#loss_ = run(sess, loss, Dict(δ=>δ_, p=>p_, s=>s_, g=>g_, mask=>s_mask))
#adgrad_ = run(sess, adgrad, Dict(δ=>δ_, p=>p_, s=>s_, g=>g_, mask=>s_mask))
#wgrad_ = run(sess, wgrad, Dict(δ=>δ_, p=>p_, s=>s_, g=>g_, mask=>s_mask))
function initialize(state::State)
print(LOG_INFO, "Likelihood initialization in Julia")
number_of_parameters = 8
N0 = libLSS.get(state, "N0", Int64, synchronous=true)
NCAT = libLSS.get(state, "NCAT", Int64, synchronous=true)
setup(N0, number_of_parameters)
print(LOG_VERBOSE, "Found " *repr(NCAT) * " catalogues")
bias = libLSS.resize_array(state, "galaxy_bias_0", number_of_parameters, Float64)
bias[:] = 0
end
function get_required_planes(state::State)
print(LOG_INFO, "Check required planes")
return Array{UInt64,1}([])
end
function likelihood(state::State, ghosts::GhostPlanes, array::AbstractArray{Float64,3})
print(LOG_INFO, "Likelihood evaluation in Julia")
NCAT = libLSS.get(state, "NCAT", Int64, synchronous=true)
L = Float64(0.)
for catalog=1:NCAT
sc = repr(catalog - 1)
L += run(sess, loss, Dict(p=>libLSS.get_array_1d(state, "galaxy_bias_"*sc, Float64), δ=>array, g=>libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64), s=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64), mask=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64).>0.))
end
print(LOG_VERBOSE, "Likelihood is " * repr(L))
return L
end
function generate_mock_data(state::State, ghosts::GhostPlanes, array::AbstractArray{Float64,3})
print(LOG_INFO, "Generate mock")
sc = "0"
data = run(sess, mock, Dict(p=>libLSS.get_array_1d(state, "galaxy_bias_"*sc, Float64), δ=>array, s=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64)))
data = libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64)
print(LOG_INFO, "Shape is " * repr(size(data)) * " and " * repr(size(array)))
print(LOG_INFO, "Number of threads " * repr(Threads.nthreads()))
print(LOG_INFO, "Noise is not included")
print(LOG_INFO, "Max val is " * repr(maximum(array)) * " and data " * repr(maximum(data)))
end
function adjoint_gradient(state::State, array::AbstractArray{Float64,3}, ghosts::GhostPlanes, ag::AbstractArray{Float64,3})
print(LOG_VERBOSE, "Adjoint gradient in Julia")
NCAT = libLSS.get(state, "NCAT", Int64, synchronous=true)
ag[:,:,:] = 0
for catalog=1:NCAT
sc = repr(catalog - 1)
Smask = libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64).>0.
ag[Smask] += run(sess, adgrad, Dict(p=>libLSS.get_array_1d(state, "galaxy_bias_"*sc, Float64), δ=>array, g=>libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64), s=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64), mask=>Smask))[Smask]
end
end
function likelihood_bias(state::State, ghosts::GhostPlanes, array, catalog_id, catalog_bias)
sc = repr(catalog_id)
return run(sess, loss, Dict(p=>catalog_bias, δ=>array, g=>libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64), s=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64), mask=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64).>0.))
end
function get_step_hint(state, catalog_id)
return 0.1
end
function log_prior_bias(state, catalog_id, bias)
if bias[2] < 0
return Inf
end
return 0
end
function adjoint_bias(state::State, ghosts::GhostPlanes,
array, catalog_id, catalog_bias, adjoint_gradient_bias)
sc = repr(catalog_id)
adjoint_gradient_bias = run(sess, wgrad, Dict(p=>catalog_bias, δ=>array, g=>libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64), s=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64), mask=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64).>0.))
end
end

View file

@ -0,0 +1,140 @@
#+
# ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/tests/network/TF_likelihood.jl
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
module network
using libLSS
import libLSS.State
import libLSS.GhostPlanes, libLSS.get_ghost_plane
import libLSS.print, libLSS.LOG_INFO, libLSS.LOG_VERBOSE, libLSS.LOG_DEBUG
using TensorFlow
sess = Session(Graph())
p = nothing
new_p = nothing
assign_p = nothing
δ = nothing
g = nothing
s = nothing
mask = nothing
loss = nothing
adgrad = nothing
wgrad = nothing
function setup(N0, number_of_parameters)
global p, new_p, assign_p, δ, g, s, mask, loss, adgrad, wgrad
p = Variable(zeros(number_of_parameters))
new_p = placeholder(Float64, shape = [number_of_parameters])
assign_p = assign(p, new_p)
δ = placeholder(Float64, shape = [N0, N0, N0])
g = placeholder(Float64, shape = [N0, N0, N0])
s = placeholder(Float64, shape = [N0, N0, N0])
mask = placeholder(Bool, shape = [N0, N0, N0])
loss = 0.5 * sum((boolean_mask(reshape(g, N0^3), reshape(mask, N0^3)) .- boolean_mask(reshape(s, N0^3), reshape(mask, N0^3)) .* (1. .- p[1] .* boolean_mask(reshape(δ, N0^3), reshape(mask, N0^3)))).^2. ./(boolean_mask(reshape(s, N0^3), reshape(mask, N0^3)) .* p[2])) + 0.5 * sum(cast(mask, Float64)) .* log(p[2])
adgrad = gradients(loss, δ)
wgrad_slice = gradients(loss, p)
wgrad = [wgrad_slice.values, wgrad_slice.indices]
end
function initialize(state::State)
print(LOG_INFO, "Likelihood initialization in Julia")
number_of_parameters = 2
N0 = libLSS.get(state, "N0", Int64, synchronous=true)
NCAT = libLSS.get(state, "NCAT", Int64, synchronous=true)
setup(N0, number_of_parameters)
run(sess, global_variables_initializer())
print(LOG_VERBOSE, "Found " *repr(NCAT) * " catalogues")
bias = libLSS.resize_array(state, "galaxy_bias_0", number_of_parameters, Float64)
bias[:] = 1
run(sess, assign_p, Dict(new_p=>bias))
end
function get_required_planes(state::State)
print(LOG_INFO, "Check required planes")
return Array{UInt64,1}([])
end
function likelihood(state::State, ghosts::GhostPlanes, array::AbstractArray{Float64,3})
print(LOG_INFO, "Likelihood evaluation in Julia")
NCAT = libLSS.get(state, "NCAT", Int64, synchronous=true)
L = Float64(0.)
for catalog=1:NCAT
sc = repr(catalog - 1)
run(sess, assign_p, Dict(new_p=>libLSS.get_array_1d(state, "galaxy_bias_"*sc, Float64)))
L += run(sess, loss, Dict(δ=>array, g=>libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64), s=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64), mask=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64).>0.))
end
print(LOG_VERBOSE, "Likelihood is " * repr(L))
return L
end
function generate_mock_data(state::State, ghosts::GhostPlanes, array::AbstractArray{Float64,3})
print(LOG_INFO, "Generate mock")
sc = "0"
data = libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64)
b = libLSS.get_array_1d(state, "galaxy_bias_"*sc, Float64)
S = libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64)
s = size(data)
print(LOG_INFO, "Shape is " * repr(size(data)) * " and " * repr(size(array)))
print(LOG_INFO, "Number of threads " * repr(Threads.nthreads()))
N0=s[1]
N1=s[2]
N2=s[3]
noise = sqrt(b[1])
print(LOG_INFO, "Noise is " * repr(noise))
bias = b[2]
for i=1:N0,j=1:N1,k=1:N2
data[i,j,k] = S[i,j,k]*(1+bias*array[i,j,k]) + sqrt(S[i,j,k])*noise*libLSS.gaussian(state)
end
print(LOG_INFO, "Max val is " * repr(maximum(array)) * " and data " * repr(maximum(data)))
end
function adjoint_gradient(state::State, array::AbstractArray{Float64,3}, ghosts::GhostPlanes, ag::AbstractArray{Float64,3})
print(LOG_VERBOSE, "Adjoint gradient in Julia")
NCAT = libLSS.get(state, "NCAT", Int64, synchronous=true)
ag[:,:,:] = 0
for catalog=1:NCAT
sc = repr(catalog - 1)
run(sess, assign_p, Dict(new_p=>libLSS.get_array_1d(state, "galaxy_bias_"*sc, Float64)))
Smask = libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64).>0.
ag[Smask] += run(sess, adgrad, Dict(δ=>array, g=>libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64), s=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64), mask=>Smask))[Smask]
end
end
function likelihood_bias(state::State, ghosts::GhostPlanes, array, catalog_id, catalog_bias)
sc = repr(catalog_id)
run(sess, assign_p, Dict(new_p=>catalog_bias))
return run(sess, loss, Dict(δ=>array, g=>libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64), s=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64), mask=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64).>0.))
end
function get_step_hint(state, catalog_id)
return 0.1
end
function log_prior_bias(state, catalog_id, bias)
if bias[2] < 0
return Inf
end
return 0
end
function adjoint_bias(state::State, ghosts::GhostPlanes,
array, catalog_id, catalog_bias, adjoint_gradient_bias)
sc = repr(catalog_id)
run(sess, assign_p, Dict(new_p=>catalog_bias))
error = run(sess, wgrad, Dict(δ=>array, g=>libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64), s=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64), mask=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64).>0.))
for i=1:number_of_parameters
adjoint_gradient_bias[i] = sum(error[1][error[2] .== i])
end
end
end

View file

@ -0,0 +1,103 @@
module test_conv_like
include("convHMC.jl")
using libLSS
import libLSS.State
import libLSS.GhostPlanes, libLSS.get_ghost_plane
import libLSS.print, libLSS.LOG_INFO, libLSS.LOG_VERBOSE, libLSS.LOG_DEBUG
#import test_conv_like.convHMC.initialise
function initialize(state::State)
print(LOG_INFO, "Likelihood initialization in Julia")
NCAT = libLSS.get(state, "NCAT", Int64, synchronous=true)
print(LOG_VERBOSE, "Found " *repr(NCAT) * " catalogues")
N0 = libLSS.get(state, "localN0", Int64, synchronous=true)
N1 = 32
N2 = 32
num_layers = 1
C0 = 3
C1 = 3
C2 = 3
bias = libLSS.resize_array(state, "galaxy_bias_0", num_layers * 5 + 1, Float64)
#bias = libLSS.resize_array(state, "galaxy_bias_0", 29, Float64)
bias[:] = 0
bias[1] = 1
bias[6] = 100
#bias[28] = 1
#bias[29] = 100
#bias[11] = 1
#bias[16] = 1
#bias[21] = 1
#bias[26] = 100
test_conv_like.convHMC.setup(num_layers, N0, N1, N2, 5 * num_layers, [C0, C1, C2], [N0, N1, N2], test_conv_like.convHMC.convolutional_network, test_conv_like.convHMC.get_isotropic_weights, test_conv_like.convHMC.mse)
#test_conv_like.convHMC.setup(num_layers, N0, N1, N2, 28, [C0, C1, C2], [N0, N1, N2], test_conv_like.convHMC.convolutional_network, test_conv_like.convHMC.get_3d_conv, test_conv_like.convHMC.mse)
#bias = libLSS.resize_array(state, "galaxy_bias_0", 2, Float64)
#bias[1] = 100
#bias[2] = 1
#test_conv_like.convHMC.setup(num_layers, N0, N1, N2, 1, -99, [N0, N1, N2], test_conv_like.convHMC.no_network, test_conv_like.convHMC.get_poisson_bias, test_conv_like.convHMC.poisson_bias)
end
function get_required_planes(state::State)
print(LOG_INFO, "Check required planes")
return []
end
function likelihood(state::State, ghosts::GhostPlanes, array::AbstractArray{Float64,3})
print(LOG_INFO, "Likelihood evaluation in Julia")
NCAT = libLSS.get(state, "NCAT", Int64, synchronous=true)
L = Float64(0)
for catalog in 0:(NCAT-1)
sc = repr(catalog)
data = libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64)
params = libLSS.get_array_1d(state, "galaxy_bias_"*sc, Float64)
S = libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64)
Smask = S.>0
L += test_conv_like.convHMC.evaluate(params[1:end-1], array, data, S, params[end], Smask)
end
print(LOG_VERBOSE, "Likelihood is " * repr(L))
return L
end
function generate_mock_data(state::State, ghosts::GhostPlanes, array::AbstractArray{Float64,3})
#sc = "0"
#data = libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64)
#b = libLSS.get_array_1d(state, "galaxy_bias_"*sc, Float64)
#S = libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64)
#s = size(data)
#print(LOG_INFO, "Shape is " * repr(size(data)) * " and " * repr(size(array)))
#print(LOG_INFO, "Number of threads " * repr(Threads.nthreads()))
#N0=s[1]
#N1=s[2]
#N2=s[3]
#noise = sqrt(b[1])
#bias = b[2]
#for i=1:N0,j=1:N1,k=1:N2
# data[i,j,k] = S[i,j,k]*(1+bias*array[i,j,k] + noise*libLSS.gaussian(state))
#end
print(LOG_INFO, "Generate mock")
params = libLSS.get_array_1d(state, "galaxy_bias_0", Float64)
S = libLSS.get_array_3d(state, "galaxy_sel_window_0", Float64)
data = test_conv_like.convHMC.get_field(params[1:end-1], array) .* S
print(LOG_INFO, "Max val is " * repr(maximum(array)) * " and data " * repr(maximum(data)))
end
function adjoint_gradient(state::State, array::AbstractArray{Float64,3}, ghosts::GhostPlanes, ag::AbstractArray{Float64,3})
print(LOG_VERBOSE, "Adjoint gradient in Julia")
N0 = libLSS.get(state, "N0", Int64, synchronous=true)
NCAT = libLSS.get(state, "NCAT", Int64, synchronous=true)
L = Float64(0)
ag[:, :, :] = 0
for catalog in 0:(NCAT-1)
sc = repr(catalog)
data = libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64)
params = libLSS.get_array_1d(state, "galaxy_bias_0", Float64)
S = libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64)
Smask = S.>0
ag += test_conv_like.convHMC.adjointGradient(params[1:end-1], array, data, S, params[end], Smask)
end
end
end

View file

@ -0,0 +1,74 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/tests/test_dense_mass.cpp
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#define BOOST_TEST_MODULE mass_matrix
#define BOOST_TEST_NO_MAIN
#define BOOST_TEST_ALTERNATIVE_INIT_API
#include <boost/test/included/unit_test.hpp>
#include <boost/test/data/test_case.hpp>
#include "libLSS/tools/console.hpp"
#include "libLSS/tools/static_init.hpp"
#include "libLSS/samplers/core/random_number.hpp"
#include "libLSS/samplers/rgen/gsl_random_number.hpp"
#include "libLSS/mpi/generic_mpi.hpp"
#include <CosmoTool/algo.hpp>
#include <memory>
#include <H5Cpp.h>
#include "libLSS/hmclet/dense_mass.hpp"
#include "libLSS/samplers/core/random_number.hpp"
#include "libLSS/samplers/rgen/gsl_random_number.hpp"
namespace utf = boost::unit_test;
using namespace LibLSS;
BOOST_AUTO_TEST_CASE(dense_mass) {
MPI_Communication *comm = MPI_Communication::instance();
RandomNumberMPI<GSL_RandomNumber> rgen(comm, -1);
HMCLet::DenseMassMatrix M(3);
boost::multi_array<double, 1> numbers(boost::extents[3]);
auto numbers_w = fwrap(numbers);
double a[3];
auto& cons = Console::instance();
for (int i = 0; i < 20; i++) {
a[0] = rgen.gaussian();
a[1] = rgen.gaussian();
a[2] = rgen.gaussian();
numbers[0] = (a[0]+a[2])/std::sqrt(2.0);
numbers[1] = (a[0]-a[2])/std::sqrt(2.0);
numbers[2] = a[1];
M.addMass(numbers);
M.computeMainComponents();
auto C = M.components();
auto mean = M.getMean();
cons.format<LOG_DEBUG>("c00 = %g, c01 = %g, c02 = %g", C(0,0), C(0,1), C(0,2));
cons.format<LOG_DEBUG>("c10 = %g, c11 = %g, c12 = %g", C(1,0), C(1,1), C(1,2));
cons.format<LOG_DEBUG>("c20 = %g, c21 = %g, c22 = %g", C(2,0), C(2,1), C(2,2));
cons.format<LOG_DEBUG>("mean = %g,%g,%g", mean(0), mean(1), mean(2));
}
}
int main(int argc, char *argv[]) {
setupMPI(argc, argv);
StaticInit::execute();
int ret = utf::unit_test_main(&init_unit_test, argc, argv);
StaticInit::finalize();
doneMPI();
return ret;
}

View file

@ -0,0 +1,146 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/tests/test_hmclet.cpp
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#define BOOST_TEST_MODULE julia_bind
#define BOOST_TEST_NO_MAIN
#define BOOST_TEST_ALTERNATIVE_INIT_API
#include <boost/test/included/unit_test.hpp>
#include <boost/test/data/test_case.hpp>
#include "libLSS/tools/console.hpp"
#include "libLSS/tools/static_init.hpp"
#include "libLSS/samplers/core/random_number.hpp"
#include "libLSS/samplers/rgen/gsl_random_number.hpp"
#include "libLSS/mpi/generic_mpi.hpp"
#include <CosmoTool/algo.hpp>
#include <memory>
#include <H5Cpp.h>
#include <CosmoTool/hdf5_array.hpp>
#include "libLSS/hmclet/hmclet.hpp"
#include "libLSS/hmclet/hmclet_qnhmc.hpp"
#include "libLSS/hmclet/diagonal_mass.hpp"
namespace utf = boost::unit_test;
using CosmoTool::square;
using namespace LibLSS;
using namespace LibLSS::HMCLet;
static const double C[2][2] = { { 9. , 1.}, {1., 4.}};
static const double inv_C[2][2] = { { 0.11428571428571427 , -0.028571428571428574}, {-0.028571428571428574, 0.2571428571428572}};
class TestPosterior : virtual public JointPosterior {
public:
TestPosterior() : JointPosterior() {}
virtual ~TestPosterior() {}
virtual size_t getNumberOfParameters() const { return 2; }
virtual double evaluate(VectorType const &params) {
double const u0 = params[0] - 1;
double const u1 = params[1] - 4;
return 0.5 * (u0*u0 * inv_C[0][0] + 2*u0*u1*inv_C[1][0] + u1*u1*inv_C[1][1]);
}
virtual void
adjointGradient(VectorType const &params, VectorType &params_gradient) {
double const u0 = params[0] - 1;
double const u1 = params[1] - 4;
params_gradient[0] = u0 * inv_C[0][0] + 2*u1 * inv_C[0][1];
params_gradient[1] = u1 * inv_C[1][1] + 2*u0* inv_C[0][1];
}
};
BOOST_AUTO_TEST_CASE(hmclet_launch) {
auto posterior_ptr = std::make_shared<TestPosterior>();
SimpleSampler<DiagonalMassMatrix> sampler(posterior_ptr);
MPI_Communication *comm = MPI_Communication::instance();
RandomNumberMPI<GSL_RandomNumber> rgen(comm, -1);
boost::multi_array<double, 1> init_params(boost::extents[2]);
boost::multi_array<double, 1> init_step(boost::extents[2]);
init_params[0] = 100;
init_params[1] = 100;
init_step[0] = 1;
init_step[1] = 1;
boost::multi_array<double, 1> initMass(boost::extents[2]);
initMass[0] = 1;
initMass[1] = 1;
sampler.getMass().setInitialMass(initMass);
sampler.getMass().freeze();
// sampler.calibrate(comm, rgen, 2, init_params, init_step);
boost::multi_array<double, 2> p(boost::extents[10000][2]);
for (size_t i = 0; i < p.size(); i++) {
sampler.newSample(comm, rgen, init_params);
p[i][0] = init_params[0];
p[i][1] = init_params[1];
}
H5::H5File ff("test_sample.h5", H5F_ACC_TRUNC);
CosmoTool::hdf5_write_array(ff, "hmclet", p);
}
BOOST_AUTO_TEST_CASE(qnhmclet_launch) {
auto posterior_ptr = std::make_shared<TestPosterior>();
QNHMCLet::Sampler<DiagonalMassMatrix,QNHMCLet::BDense> sampler(posterior_ptr);
MPI_Communication *comm = MPI_Communication::instance();
RandomNumberMPI<GSL_RandomNumber> rgen(comm, -1);
boost::multi_array<double, 1> init_params(boost::extents[2]);
boost::multi_array<double, 1> init_step(boost::extents[2]);
boost::multi_array<double, 1> initMass(boost::extents[2]);
initMass[0] = 1;
initMass[1] = 1;
sampler.getMass().setInitialMass(initMass);
sampler.getMass().freeze();
init_params[0] = 100;
init_params[1] = 100;
init_step[0] = 1;
init_step[1] = 1;
boost::multi_array<double, 2> p(boost::extents[10000][2]);
H5::H5File ff("test_sample_qn.h5", H5F_ACC_TRUNC);
for (size_t i = 0; i < p.size(); i++) {
sampler.newSample(comm, rgen, init_params);
p[i][0] = init_params[0];
p[i][1] = init_params[1];
// auto gg = ff.createGroup(boost::str(boost::format("B_%d") % i));
// sampler.getB().save(gg);
}
CosmoTool::hdf5_write_array(ff, "qn_hmclet", p);
}
int main(int argc, char *argv[]) {
setupMPI(argc, argv);
StaticInit::execute();
int ret = utf::unit_test_main(&init_unit_test, argc, argv);
StaticInit::finalize();
doneMPI();
return ret;
}

View file

@ -0,0 +1,60 @@
#+
# ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/tests/test_julia.jl
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
module TestLikelihood
using ..libLSS
import ..libLSS.State, ..libLSS.GhostPlanes, ..libLSS.get_ghost_plane
import ..libLSS.print, ..libLSS.LOG_INFO, ..libLSS.LOG_VERBOSE, ..libLSS.LOG_DEBUG
import ..libLSS.BadGradient
function initialize(state::State)
print(LOG_VERBOSE, "Likelihood initialization in Julia")
# bias = libLSS.resize_array(state, "galaxy_bias_0", 1, Float64)
# bias[1] = 1
end
function get_required_planes(state::State)
return Array{UInt64,1}([])
end
function likelihood(state::State, ghosts::GhostPlanes, array::AbstractArray{Float64,3})
print(LOG_DEBUG, "my likelihood")
return 0
end
function get_step_hint(state, catalog_id, bias_id)
print(LOG_DEBUG, "get_step_hint")
return 0.1
end
function log_prior_bias(state, catalog_id, bias_tilde)
print(LOG_DEBUG, "log_prior_bias")
# Change of variable bias = exp(bias_tilde)
return sum(bias_tilde.^2)
end
function generate_mock_data(state::State, ghosts::GhostPlanes, array)
end
function likelihood_bias(state::State, ghosts::GhostPlanes, array, catalog_id, catalog_bias_tilde)
return 0
end
function adjoint_gradient(state::State, array, ghosts, ag)
end
function adjoint_bias(state::State, ghosts::GhostPlanes,
array, catalog_id, catalog_bias_tilde, adjoint_gradient_bias)
print(LOG_DEBUG,"Entering ag bias")
throw(BadGradient())
end
end

View file

@ -0,0 +1,96 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/tests/test_julia_hmclet.cpp
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#define BOOST_TEST_MODULE julia_hmclet
#define BOOST_TEST_NO_MAIN
#define BOOST_TEST_ALTERNATIVE_INIT_API
#include <boost/test/included/unit_test.hpp>
#include <boost/test/data/test_case.hpp>
#include "libLSS/julia/julia.hpp"
#include "libLSS/julia/julia_mcmc.hpp"
#include "libLSS/mcmc/global_state.hpp"
#include "libLSS/mcmc/state_element.hpp"
#include "libLSS/tools/static_init.hpp"
#include "libLSS/tools/console.hpp"
#include "libLSS/tests/setup_hades_test_run.hpp"
#include "libLSS/samplers/julia/julia_likelihood.hpp"
#include "libLSS/physics/forwards/borg_lpt.hpp"
#include "libLSS/tools/string_tools.hpp"
#include "libLSS/hmclet/julia_hmclet.hpp"
namespace utf = boost::unit_test;
using namespace LibLSS;
using namespace LibLSS_test;
struct JuliaFixture {
static MPI_Communication *comm;
static MarkovState *state;
static BoxModel box;
JuliaFixture() {
LIBLSS_AUTO_CONTEXT(LOG_DEBUG, ctx);
state = new MarkovState();
setup_hades_test_run(comm, 32, 600., *state);
setup_box(*state, box);
ObjectStateElement<BORGForwardModel, true> *model_elt =
new ObjectStateElement<BORGForwardModel, true>();
state->newScalar<bool>("bias_sampler_blocked", false);
state->newScalar<long>("MCMC_STEP", 0);
double ai = state->getScalar<double>("borg_a_initial");
model_elt->obj =
new BorgLptModel<>(comm, box, box, false, 1, 2.0, ai, 1.0, false);
state->newElement("BORG_model", model_elt);
}
~JuliaFixture() { Console::instance().print<LOG_DEBUG>("Destroying state."); delete state; }
};
MPI_Communication *JuliaFixture::comm = 0;
MarkovState *JuliaFixture::state;
BoxModel JuliaFixture::box;
BOOST_GLOBAL_FIXTURE(JuliaFixture);
BOOST_AUTO_TEST_CASE(julia_hmclet_fail) {
LikelihoodInfo info;
LibLSS_test::setup_likelihood_info(
*JuliaFixture::state, info);
Console::instance().print<LOG_DEBUG>(boost::format("Comm is %p") % JuliaFixture::comm);
auto density = std::make_shared<JuliaDensityLikelihood>(
JuliaFixture::comm, info, TEST_JULIA_LIKELIHOOD_CODE, "TestLikelihood");
return;
JuliaHmcletMeta meta(JuliaFixture::comm, density, "TestLikelihood", JuliaHmclet::types::DIAGONAL, 10, 10, 0.5, true);
density->initializeLikelihood(*JuliaFixture::state);
meta.init_markov(*JuliaFixture::state);
meta.sample(*JuliaFixture::state);
}
int main(int argc, char *argv[]) {
JuliaFixture::comm = setupMPI(argc, argv);
StaticInit::execute();
Console::instance().outputToFile(
"test_julia_hmclet.txt_" +
to_string(MPI_Communication::instance()->rank()));
int ret = utf::unit_test_main(&init_unit_test, argc, argv);
StaticInit::finalize();
doneMPI();
return ret;
}

View file

@ -0,0 +1,88 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/tests/test_network.cpp
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#define BOOST_TEST_MODULE julia_bind
#define BOOST_TEST_NO_MAIN
#define BOOST_TEST_ALTERNATIVE_INIT_API
#include <boost/test/included/unit_test.hpp>
#include <boost/test/data/test_case.hpp>
#include "libLSS/tools/console.hpp"
#include "libLSS/tools/static_init.hpp"
#include "libLSS/samplers/core/random_number.hpp"
#include "libLSS/samplers/rgen/gsl_random_number.hpp"
#include "libLSS/mpi/generic_mpi.hpp"
#include <CosmoTool/algo.hpp>
#include <memory>
#include <H5Cpp.h>
#include <CosmoTool/hdf5_array.hpp>
#include "libLSS/hmclet/hmclet.hpp"
namespace utf = boost::unit_test;
using CosmoTool::square;
using namespace LibLSS;
using namespace LibLSS::HMCLet;
class TestPosterior : virtual public JointPosterior {
public:
TestPosterior() : JointPosterior() {}
virtual ~TestPosterior() {}
virtual size_t getNumberOfParameters() const { return 2; }
virtual double evaluate(VectorType const &params) {
return 0.5 * square(params[0] - 1) / 10. + 0.5 * square(params[1] - 4) / 2.;
}
virtual void
adjointGradient(VectorType const &params, VectorType &params_gradient) {
params_gradient[0] = (params[0] - 1) / 10.;
params_gradient[1] = (params[1] - 4) / 2.;
}
};
BOOST_AUTO_TEST_CASE(hmclet_launch) {
auto posterior_ptr = std::make_shared<TestPosterior>();
SimpleSampler sampler(posterior_ptr);
MPI_Communication *comm = MPI_Communication::instance();
RandomNumberMPI<GSL_RandomNumber> rgen(comm, -1);
boost::multi_array<double, 1> init_params(boost::extents[2]);
boost::multi_array<double, 1> init_step(boost::extents[2]);
init_params[0] = 100;
init_params[1] = 100;
init_step[0] = 1;
init_step[1] = 1;
sampler.calibrate(comm, rgen, 10, init_params, init_step);
boost::multi_array<double, 2> p(boost::extents[1000][2]);
for (size_t i = 0; i < p.size(); i++) {
sampler.newSample(comm, rgen, init_params);
p[i][0] = init_params[0];
p[i][1] = init_params[1];
}
H5::H5File ff("test_sample.h5", H5F_ACC_TRUNC);
CosmoTool::hdf5_write_array(ff, "hmclet", p);
}
int main(int argc, char *argv[]) {
setupMPI(argc, argv);
StaticInit::execute();
int ret = utf::unit_test_main(&init_unit_test, argc, argv);
StaticInit::finalize();
doneMPI();
return ret;
}

View file

@ -0,0 +1,27 @@
SET(EXTRA_HMCLET ${CMAKE_SOURCE_DIR}/extra/hmclet/libLSS/tests)
SET(TEST_hmclet_LIST
hmclet
dense_mass
#conv_hmc
#weights
#conv_hmc_julia
)
#SET(TEST_weights_LIBS ${JULIA_LIBRARY})
#SET(TEST_conv_hmc_julia_LIBS ${JULIA_LIBRARY})
IF(BUILD_JULIA)
SET(TEST_hmclet_LIST ${TEST_hmclet_LIST} julia_hmclet)
set_property(
SOURCE ${EXTRA_HMCLET}/test_julia_hmclet.cpp
APPEND PROPERTY COMPILE_DEFINITIONS
TEST_JULIA_LIKELIHOOD_CODE="${EXTRA_HMCLET}/test_julia.jl"
)
SET(TEST_julia_hmclet_LIBS ${JULIA_LIBRARY})
add_test(NAME julia_hmclet COMMAND ${CURRENT_CMAKE_BINARY_DIR}/test_julia_hmclet)
ENDIF()

View file

@ -0,0 +1,56 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/src/hades_julia3.cpp
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#define SAMPLER_DATA_INIT "ares_init.hpp"
#define SAMPLER_BUNDLE "julia_bundle.hpp"
#define SAMPLER_BUNDLE_INIT "julia_bundle_init.hpp"
#define SAMPLER_NAME "HADES3"
#define SAMPLER_MOCK_GENERATOR "julia_mock_gen.hpp"
#include "common/sampler_base.cpp"
#include "libLSS/tools/color_mod.hpp"
using namespace LibLSS::Color;
namespace {
void init_splash() {
static string splash_str[] = {
" ",
" /\\_/\\____, "
"____________________________ ",
" ,___/\\_/\\ \\ ~ / " +
fg(RED, "HADES+JULIA3", BRIGHT) + " ",
" \\ ~ \\ ) XXX ",
" XXX / /\\_/\\___, (c) Jens Jasche 2012 - 2019",
" |---| \\o-o/-o-o/ ~ / Guilhem Lavaux 2014 - 2019",
" | ) / \\ XXX "
"____________________________ ",
" \\ / _| / \\ \\_/ ",
" --- ,-/ _ \\_/ \\ ",
" / ( /____,__| ) ",
" ( |_ ( ) \\) _| ",
" _/ _) \\ \\__/ (_ ",
" (,-(,(,(,/ \\,),),) "
"",
"Please acknowledge XXXX",
};
static const int numSplashStr = sizeof(splash_str) / sizeof(splash_str[0]);
for (int i = 0; i < numSplashStr; i++)
Console::instance().print<LOG_STD>(splash_str[i]);
}
void close_splash() {}
RegisterStaticInit reg_splash(init_splash, close_splash, 12);
} // namespace

View file

@ -0,0 +1,119 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/src/julia_bundle.hpp
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#ifndef _HADES_JULIA_BUNDLE_HPP
#define _HADES_JULIA_BUNDLE_HPP
#include "libLSS/samplers/hades/hades_linear_likelihood.hpp"
#include "libLSS/samplers/core/powerspec_tools.hpp"
#include "libLSS/samplers/ares/synthetic_selection.hpp"
#include "libLSS/samplers/rgen/density_sampler.hpp"
#include "libLSS/physics/forward_model.hpp"
#include "libLSS/physics/hades_log.hpp"
#include "libLSS/physics/hades_pt.hpp"
#include "hades_option.hpp"
#include "libLSS/borg_version.hpp"
#include "libLSS/physics/modified_ngp.hpp"
#include "libLSS/physics/modified_ngp_smooth.hpp"
#include "libLSS/physics/forwards/borg_lpt.hpp"
#include "libLSS/physics/forwards/borg_2lpt.hpp"
#include "libLSS/physics/forwards/borg_multi_pm.hpp"
#include "libLSS/samplers/generic/generic_sigma8.hpp"
#include "libLSS/samplers/julia/julia_likelihood.hpp"
#include <boost/algorithm/string.hpp>
namespace LibLSS {
namespace {
HMCOption::IntegratorScheme get_Scheme(const std::string &s) {
std::string scheme = boost::to_upper_copy<std::string>(s);
using namespace HMCOption;
if (scheme == "SI_2A" || scheme == "LEAP_FROG") {
return SI_2A;
} else if (scheme == "SI_2B") {
return SI_2B;
} else if (scheme == "SI_2C") {
return SI_2C;
} else if (scheme == "SI_3A") {
return SI_3A;
} else if (scheme == "SI_4B") {
return SI_4B;
} else if (scheme == "SI_4C") {
return SI_4C;
} else if (scheme == "SI_4D") {
return SI_4D;
} else if (scheme == "SI_6A") {
return SI_6A;
} else {
error_helper<ErrorBadState>(
boost::format("Invalid integration scheme %s") % scheme);
}
}
} // namespace
class DummyPowerSpectrum : public PowerSpectrumSampler_Base {
public:
DummyPowerSpectrum(MPI_Communication *comm)
: PowerSpectrumSampler_Base(comm) {}
virtual void initialize(MarkovState &state) { initialize_base(state); }
virtual void restore(MarkovState &state) { restore_base(state); }
virtual void sample(MarkovState &state) {}
};
struct SamplerBundle {
//BlockLoop foreground_block;
typedef std::list<MarkovSampler *> SamplerList;
std::function<MarkovSampler *(int, int)> foreground_sampler_generator;
DummyPowerSpectrum dummy_ps;
SamplerList foreground_samplers;
MPI_Communication *comm;
std::shared_ptr<GenericDensitySampler> density_mc;
std::shared_ptr<MarkovSampler> bias;
std::shared_ptr<JuliaDensityLikelihood> julia_likelihood;
bool delegate_ic_to_julia;
BlockLoop foreground_block;
SyntheticSelectionUpdater sel_updater;
SamplerBundle(MPI_Communication *comm)
: comm(comm), dummy_ps(comm), delegate_ic_to_julia(false) {}
void newForeground(int catalog, int fgmap) {
Console::instance().print<LOG_VERBOSE>("Adding new foreground sampler");
MarkovSampler *fgsample = foreground_sampler_generator(catalog, fgmap);
if (fgsample != 0) {
foreground_samplers.push_back(fgsample);
foreground_block << (*fgsample);
}
}
~SamplerBundle() {
Console::instance().print<LOG_VERBOSE>("Begin destroying the bundle");
for (SamplerList::iterator i = foreground_samplers.begin();
i != foreground_samplers.end(); ++i) {
delete (*i);
}
Console::instance().print<LOG_VERBOSE>("Done destroying the bundle");
}
};
} // namespace LibLSS
#endif

View file

@ -0,0 +1,256 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/src/julia_bundle_init.hpp
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#ifndef __HADES_JULIA_BUNDLE_INIT_HPP
#define __HADES_JULIA_BUNDLE_INIT_HPP
#include "julia_bundle.hpp"
#include "libLSS/hmclet/julia_slice.hpp"
#include "libLSS/hmclet/julia_hmclet.hpp"
#include "likelihood_info.hpp"
#include "libLSS/samplers/rgen/qnhmc/qnhmc_density_sampler.hpp"
#include "libLSS/samplers/core/generate_random_field.hpp"
#include "setup_models.hpp"
namespace LibLSS {
template <typename ptree>
void sampler_bundle_init(
MPI_Communication *mpi_world, ptree &params, SamplerBundle &bundle,
MainLoop &loop, bool resuming) {
using boost::format;
using CosmoTool::square;
using std::string;
ptree system_params = params.get_child("system");
ptree julia_params = params.get_child("julia");
auto block_loop_params = params.get_child_optional("block_loop");
auto borg_params = params.get_child("gravity");
int hades_mixing = params.template get<int>("hades.mixing", 20);
std::string lh_type =
params.template get<std::string>("hades.likelihood", "LINEAR");
std::shared_ptr<MarkovSampler> nmean, bias;
typedef GridDensityLikelihoodBase<3> grid_t;
std::shared_ptr<grid_t> likelihood;
MarkovState &state = loop.get_state();
auto &cons = Console::instance();
BorgModelElement *model = new BorgModelElement();
model->obj = 0;
loop.get_state().newElement("BORG_model", model);
loop.get_state().newScalar("BORG_version", BORG_GIT_VERSION);
BoxModel box;
box.xmin0 = state.getScalar<double>("corner0");
box.xmin1 = state.getScalar<double>("corner1");
box.xmin2 = state.getScalar<double>("corner2");
box.L0 = state.getScalar<double>("L0");
box.L1 = state.getScalar<double>("L1");
box.L2 = state.getScalar<double>("L2");
box.N0 = state.getScalar<long>("N0");
box.N1 = state.getScalar<long>("N1");
box.N2 = state.getScalar<long>("N2");
model->obj = buildModel(
MPI_Communication::instance(), state, box, params, borg_params);
string code_path = julia_params.template get<string>("likelihood_path");
string module_name = julia_params.template get<string>("likelihood_module");
string bias_sampler_type =
julia_params.template get<string>("bias_sampler_type");
// string bias_sampler = julia_params.template get<string>("bias_sampler");
LikelihoodInfo like_info;
LibLSS_prepare::setupLikelihoodInfo(
mpi_world, loop.get_state(), like_info, params, resuming);
likelihood = bundle.julia_likelihood =
std::make_shared<JuliaDensityLikelihood>(
bundle.comm, like_info, code_path, module_name);
bundle.delegate_ic_to_julia =
julia_params.template get<bool>("ic_in_julia", false);
auto burnin = julia_params.template get<size_t>("mass_burnin", 300);
auto memory = julia_params.template get<size_t>("mass_burnin_memory", 50);
if (bias_sampler_type == "hmclet") {
auto hmclet_maxEpsilon =
julia_params.template get_optional<double>("hmclet_maxEpsilon");
auto hmclet_maxNtime =
julia_params.template get_optional<int>("hmclet_maxNtime");
auto hmcMatrix =
julia_params.template get<std::string>("hmclet_matrix", "DIAGONAL");
auto massScaling =
julia_params.template get<double>("hmclet_massScale", 0.);
auto limiter =
julia_params.template get<double>("hmclet_correlationLimiter", 0.5);
auto frozen = julia_params.template get<bool>("hmclet_frozen", false);
JuliaHmclet::types::MatrixType matrixType = JuliaHmclet::types::DIAGONAL;
if (hmcMatrix == "DIAGONAL")
matrixType = JuliaHmclet::types::DIAGONAL;
else if (hmcMatrix == "DENSE")
matrixType = JuliaHmclet::types::DENSE;
else if (hmcMatrix == "QN_DIAGONAL")
matrixType = JuliaHmclet::types::QN_DIAGONAL;
else {
error_helper<ErrorBadState>(
"Invalid matrix type for HMC: " + hmcMatrix);
}
Console::instance().print<LOG_INFO>("Build hmclet");
auto julia_hmclet = std::make_shared<JuliaHmcletMeta>(
bundle.comm, bundle.julia_likelihood, module_name, matrixType, burnin,
memory, limiter, frozen);
julia_hmclet->postinit().ready(
[julia_hmclet, hmclet_maxEpsilon, hmclet_maxNtime,
massScaling]() -> void {
Console &cons = Console::instance();
cons.print<LOG_VERBOSE>(
format("Number of hmclets = %d") %
julia_hmclet->hmclets().size());
for (auto &hmc : julia_hmclet->hmclets()) {
if (hmclet_maxEpsilon) {
cons.print<LOG_VERBOSE>(
format("Setup hmclet epsilon=%g") % *hmclet_maxEpsilon);
hmc->setMaxEpsilon(*hmclet_maxEpsilon);
}
if (hmclet_maxNtime) {
cons.print<LOG_VERBOSE>(
format("Setup hmclet ntime=%d") % *hmclet_maxNtime);
hmc->setMaxNtime(*hmclet_maxNtime);
}
hmc->setMassScaling(massScaling);
}
});
bias = bundle.bias = julia_hmclet;
} else if (bias_sampler_type == "slice") {
bias = bundle.bias = std::make_shared<JuliaMetaSlice>(
bundle.comm, module_name, bundle.julia_likelihood, burnin, memory);
} else if (bias_sampler_type == "none") {
} else {
error_helper<ErrorParams>("Unknown bias sampler type");
}
// Initialize foregrounds
LibLSS_prepare::initForegrounds(
mpi_world, loop.get_state(),
[&bundle](int a, int b) { bundle.newForeground(a, b); }, params);
/* if (!system_params.template get<bool>("block_sigma8_sampler", true))
bundle.sigma8_sampler = new GenericSigma8Sampler(bundle.comm);
else
bundle.sigma8_sampler = 0;
*/
std::string algorithm_name =
params.template get<std::string>("hades.algorithm", "HMC");
if (algorithm_name == "HMC") {
// -----------------------------------
// HMC algorithm initialization
double maxEpsilon =
params.template get<double>("hades.max_epsilon", 0.02);
int maxTimeSteps = params.template get<int>("hades.max_timesteps", 100);
std::string I_scheme_s =
params.template get<std::string>("hades.scheme", "SI_2A");
HMCOption::IntegratorScheme I_scheme = get_Scheme(I_scheme_s);
auto density_mc =
std::make_unique<HMCDensitySampler>(mpi_world, likelihood);
density_mc->setIntegratorScheme(I_scheme);
density_mc->setMaxEpsilon(maxEpsilon);
density_mc->setMaxTimeSteps(maxTimeSteps);
// HMC algorithm initialization - end
// -----------------------------------
bundle.density_mc = std::move(density_mc);
} else if (algorithm_name == "QN-HMC") {
double maxEpsilon =
params.template get<double>("hades.max_epsilon", 0.02);
int maxTimeSteps = params.template get<int>("hades.max_timesteps", 100);
std::string I_scheme_s =
params.template get<std::string>("hades.scheme", "SI_2A");
HMCOption::IntegratorScheme I_scheme = get_Scheme(I_scheme_s);
auto density_mc =
std::make_unique<QNHMCDensitySampler>(mpi_world, likelihood);
density_mc->setIntegratorScheme(I_scheme);
density_mc->setMaxEpsilon(maxEpsilon);
density_mc->setMaxTimeSteps(maxTimeSteps);
bundle.density_mc = std::move(density_mc);
} else {
error_helper<ErrorBadState>(
"Invalid algorithm name: " + algorithm_name +
" (choice is HMC or QN-HMC)");
}
bool hblock = adapt_optional<bool>(
loop.get_state(), block_loop_params, "hades_sampler_blocked", false,
DO_NOT_RESTORE);
adapt_optional<bool>(
loop.get_state(), block_loop_params, "bias_sampler_blocked", false,
DO_NOT_RESTORE);
adapt_optional<bool>(
loop.get_state(), block_loop_params, "nmean_sampler_blocked", false,
DO_NOT_RESTORE);
Console::instance().print<LOG_INFO_SINGLE>(
format("Hades mixing per mcmc step is %d") % hades_mixing);
Console::instance().print<LOG_INFO_SINGLE>(
format("Hades density is blocked: %s") % (hblock ? "YES" : "NO"));
loop << bundle.dummy_ps << bundle.sel_updater;
// ==================
// MAIN LOOP PROGRAM
if (bias != 0) {
auto bias_loop = new BlockLoop(1);
if (bias != 0)
*bias_loop << *bias;
loop
<< (BlockLoop(hades_mixing)
<< *bundle.density_mc << *bias_loop
<< (BlockLoop(10) << bundle.foreground_block));
delete bias_loop;
} else {
loop << (BlockLoop(hades_mixing) << *bundle.density_mc)
<< (BlockLoop(10) << bundle.foreground_block);
}
// If active, sample sigma8
// if (bundle.sigma8_sampler != 0)
// loop << *bundle.sigma8_sampler;
}
template <typename ptree>
void
sampler_setup_ic(SamplerBundle &bundle, MainLoop &loop, ptree const &params) {
MarkovState &state = loop.get_state();
if (bundle.delegate_ic_to_julia)
bundle.julia_likelihood->generateInitialConditions(state);
else {
generateRandomField(bundle.comm, state);
double initialRandomScaling =
params.template get<double>("mcmc.init_random_scaling", 0.1);
state.get<CArrayType>("s_hat_field")->eigen() *= initialRandomScaling;
state.get<ArrayType>("s_field")->eigen() *= initialRandomScaling;
}
}
void sampler_bundle_cleanup() {}
} // namespace LibLSS
#endif

View file

@ -0,0 +1,45 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/src/julia_mock_gen.hpp
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#ifndef __HADES_JULIA_MOCK_GEN_HPP
#define __HADES_JULIA_MOCK_GEN_HPP
#include <CosmoTool/algo.hpp>
#include <cmath>
namespace LibLSS {
template <typename PTree>
void prepareMockData(
PTree &ptree, MPI_Communication *comm, MarkovState &state,
CosmologicalParameters &cosmo_params, SamplerBundle &bundle) {
ConsoleContext<LOG_INFO_SINGLE> ctx("prepareMockData");
using boost::format;
using CosmoTool::square;
double Rsmooth = ptree.template get<double>("system.hades_smoothing", 1.0);
createCosmologicalPowerSpectrum(state, cosmo_params);
bundle.sel_updater.sample(state);
bundle.density_mc->generateMockData(state);
{
std::shared_ptr<H5::H5File> f;
if (comm->rank() == 0)
f = std::make_shared<H5::H5File>("mock_data.h5", H5F_ACC_TRUNC);
state.mpiSaveState(f, comm, false);
}
// bundle.hmc->generateRandomField(state);
// state.get<CArrayType>("s_hat_field")->eigen() *= 0.02;
// state.get<ArrayType>("s_field")->eigen() *= 0.02;
}
} // namespace LibLSS
#endif

View file

@ -0,0 +1,23 @@
check_ares_module(BORG_PRESENT borg)
set(extra_hmclet ${CMAKE_SOURCE_DIR}/extra/hmclet/src)
SET(HADES_OPTION)
include_directories(${extra_hmclet})
IF (BUILD_JULIA)
cmessage(STATUS "Activate Hades_Julia core")
add_executable(hades_julia3 ${extra_hmclet}/hades_julia3.cpp ${extra_hmclet}/julia_mock_gen.hpp)
target_link_libraries(hades_julia3 hades borg_models LSS ${DEP_LIBS} ${JULIA_LIBRARY})
add_dependencies(hades_julia3 ${ares_DEPS})
set_property(SOURCE ${extra_hmclet}/hades_julia3.cpp APPEND PROPERTY OBJECT_DEPENDS
${extra_hmclet}/julia_mock_gen.hpp
${extra_hmclet}/julia_bundle.hpp
${extra_hmclet}/julia_bundle_init.hpp
${CMAKE_SOURCE_DIR}/src/ares_init.hpp)
ELSE()
cmessage(CWARNING "Julia missing, Hades_Julia disabled")
ENDIF()