Initial import

This commit is contained in:
Guilhem Lavaux 2023-05-29 10:41:03 +02:00
commit 56a50eead3
820 changed files with 192077 additions and 0 deletions

View file

@ -0,0 +1,19 @@
require_ares_module(hades)
SET(EXTRA_HMCLET ${CMAKE_SOURCE_DIR}/extra/hmclet)
SET(EXTRA_LIBLSS
${EXTRA_LIBLSS}
${EXTRA_HMCLET}/libLSS/hmclet/hmclet.cpp
${EXTRA_HMCLET}/libLSS/hmclet/hmclet_qnhmc.cpp
${EXTRA_HMCLET}/libLSS/hmclet/diagonal_mass.cpp
${EXTRA_HMCLET}/libLSS/hmclet/mass_burnin.cpp
${EXTRA_HMCLET}/libLSS/hmclet/dense_mass.cpp
)
IF(BUILD_JULIA)
SET(EXTRA_LIBLSS
${EXTRA_LIBLSS}
${EXTRA_HMCLET}/libLSS/hmclet/julia_slice.cpp
${EXTRA_HMCLET}/libLSS/hmclet/julia_hmclet.cpp
)
ENDIF()

View file

@ -0,0 +1,130 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/dense_mass.cpp
Copyright (C) 2014-2020 2019 <guilhem.lavaux@iap.fr>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#include <boost/format.hpp>
#include <functional>
#include <cmath>
#include "libLSS/tools/console.hpp"
#include <CosmoTool/hdf5_array.hpp>
#include "libLSS/tools/symplectic_integrator.hpp"
#include "libLSS/tools/fusewrapper.hpp"
#include "libLSS/samplers/rgen/slice_sweep.hpp"
#include "libLSS/hmclet/dense_mass.hpp"
#include "libLSS/tools/string_tools.hpp"
#include "libLSS/tools/hdf5_scalar.hpp"
using namespace LibLSS;
using namespace LibLSS::HMCLet;
namespace ph = std::placeholders;
using boost::format;
void DenseMassMatrix::saveMass(CosmoTool::H5_CommonFileGroup &g) {
boost::multi_array<double, 2> mass_buf(boost::extents[numParams][numParams]);
boost::multi_array<double, 1> mean_buf(boost::extents[numParams]);
Eigen::Map<Eigen::MatrixXd> map_buf(mass_buf.data(), numParams, numParams);
map_buf.noalias() = covariances;
Eigen::Map<Eigen::VectorXd>(mean_buf.data(), numParams).noalias() = mean;
CosmoTool::hdf5_write_array(g, "covariance", mass_buf);
CosmoTool::hdf5_write_array(g, "mean", mean_buf);
map_buf.noalias() = icCovar;
CosmoTool::hdf5_write_array(g, "icCovariance", mass_buf);
hdf5_save_scalar(g, "numInMass", numInMass);
map_buf.noalias() = finishedCovariances;
CosmoTool::hdf5_write_array(g, "finishedCovariances", mass_buf);
}
void DenseMassMatrix::loadMass(CosmoTool::H5_CommonFileGroup &g) {
boost::multi_array<double, 2> mass_buf(boost::extents[numParams][numParams]);
boost::multi_array<double, 1> mean_buf(boost::extents[numParams]);
Eigen::Map<Eigen::MatrixXd> map_buf(mass_buf.data(), numParams, numParams);
auto& cons = Console::instance();
CosmoTool::hdf5_read_array(g, "covariance", mass_buf, false);
covariances = map_buf;
CosmoTool::hdf5_read_array(g, "icCovariance", mass_buf, false);
icCovar = map_buf;
CosmoTool::hdf5_read_array(g, "finishedCovariances", mass_buf, false);
finishedCovariances.noalias() = map_buf;
CosmoTool::hdf5_read_array(g, "mean", mean_buf, false);
numInMass = hdf5_load_scalar<size_t>(g, "numInMass");
mean = Eigen::Map<Eigen::VectorXd>(mean_buf.data(), numParams);
Console::instance().print<LOG_INFO>("loaded mass.");
lltOfCovariances.compute(finishedCovariances);
}
void DenseMassMatrix::addMass(VectorType const &params) {
if (frozen)
return;
using CosmoTool::square;
auto f_params = Eigen::Map<const Eigen::VectorXd>(params.data(), numParams);
double coef = double(numInMass) / double(numInMass + 1);
double coef2 = 1 / double(numInMass);
double coef3 = 1 / double(numInMass + 1);
if (numInMass == 0)
mean = f_params;
else
mean = coef * mean + coef3 * f_params;
if (numInMass >= 1) {
auto c = f_params - mean;
covariances = coef * covariances + coef2 * c * c.adjoint();
}
numInMass++;
finishMass();
}
void DenseMassMatrix::finishMass() {
ConsoleContext<LOG_DEBUG> ctx("DenseMassMatrix::finishMass");
double w = initialMassWeight / double(initialMassWeight + numInMass);
double const corrector = limiter;
finishedCovariances = (1-w)*covariances + w*icCovar;
for (int i = 0; i < numParams; i++) {
for (int j = 0; j < numParams; j++) {
if (i!=j)
finishedCovariances(i,j) /= (1+corrector);
}
}
lltOfCovariances.compute(finishedCovariances);
}
void DenseMassMatrix::clear() {
ConsoleContext<LOG_DEBUG> ctx("DenseMassMatrix::clear");
covariances.fill(0);
finishedCovariances.fill(0);
mean.fill(0);
numInMass = 0;
initialMassWeight = 10;
finishMass();
}
void DenseMassMatrix::setInitialMass(
boost::multi_array_ref<double, 2> const &massMatrix) {
if (massMatrix.shape()[0] != numParams || massMatrix.shape()[1] != numParams)
error_helper<ErrorBadState>("Invalid mass matrix size");
for (size_t i = 0; i < numParams; i++) {
for (size_t j = 0; j < numParams; j++) {
icCovar(i, j) = massMatrix[i][j];
}
}
initialMassWeight = 10;
finishMass();
}
// ARES TAG: authors_num = 1
// ARES TAG: name(0) = Guilhem Lavaux
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
// ARES TAG: name(0) = 2019

View file

@ -0,0 +1,106 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/dense_mass.hpp
Copyright (C) 2014-2020 2019 <guilhem.lavaux@iap.fr>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#ifndef __LIBLSS_HMCLET_DENSE_MASS_HPP
# define __LIBLSS_HMCLET_DENSE_MASS_HPP
# include <memory>
# include <Eigen/Core>
# include <Eigen/Cholesky>
# include <Eigen/Eigenvalues>
# include <boost/multi_array.hpp>
# include "libLSS/samplers/core/random_number.hpp"
# include <CosmoTool/hdf5_array.hpp>
# include "libLSS/tools/errors.hpp"
# include "libLSS/hmclet/hmclet.hpp"
namespace LibLSS {
namespace HMCLet {
class DenseMassMatrix {
protected:
size_t numParams;
Eigen::MatrixXd finishedCovariances, icCovar, covariances;
Eigen::LLT<Eigen::MatrixXd> lltOfCovariances;
Eigen::VectorXd tmp_vector, mean;
size_t initialMassWeight;
size_t numInMass;
boost::multi_array<double, 1> tmp_data;
Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> es;
double limiter;
bool frozen;
public:
DenseMassMatrix(size_t numParams_)
: numParams(numParams_), finishedCovariances(numParams, numParams),
icCovar(numParams, numParams), covariances(numParams, numParams),
lltOfCovariances(numParams), tmp_vector(numParams), mean(numParams),
initialMassWeight(0), numInMass(0),
tmp_data(boost::extents[numParams]), limiter(0.5), frozen(false) {
icCovar.setIdentity();
clear();
}
void setInitialMass(boost::multi_array_ref<double, 2> const &params);
void freezeInitial() { icCovar = covariances; }
void freeze() { frozen = true; }
void setCorrelationLimiter(double limiter_) { limiter = limiter_; }
void saveMass(CosmoTool::H5_CommonFileGroup &g);
void loadMass(CosmoTool::H5_CommonFileGroup &g);
void addMass(VectorType const &params);
void clear();
template <
typename A, typename U = typename std::enable_if<
is_wrapper<A>::value, void>::type>
auto operator()(A const &q) {
auto tmpv = Eigen::Map<Eigen::VectorXd>(tmp_data.data(), numParams);
for (size_t i = 0; i < numParams; i++)
tmp_vector(i) = (*q)[i];
tmpv.noalias() = finishedCovariances * tmp_vector;
return fwrap(tmp_data);
}
auto operator()(VectorType const &q) { return operator()(fwrap(q)); }
template <typename A, typename B>
auto operator()(A const &a, B &&) {
return operator()(a);
}
auto sample(RandomNumber &rgen) -> decltype(fwrap(tmp_data)) {
boost::multi_array<double, 1> tmp_data2(boost::extents[numParams]);
auto tmpv2 = Eigen::Map<Eigen::VectorXd>(tmp_data2.data(), numParams);
auto tmpv = Eigen::Map<Eigen::VectorXd>(tmp_data.data(), numParams);
fwrap(tmp_data2) = rgen.gaussian(fwrap(b_fused_idx<double, 1>(
[](int) { return 1; }, boost::extents[numParams])));
tmpv = lltOfCovariances.matrixL().solve(tmpv2);
return fwrap(tmp_data);
}
void computeMainComponents() { es.compute(finishedCovariances); }
auto components() { return es.eigenvectors(); }
auto eigenValues() { return es.eigenvalues(); }
Eigen::VectorXd const &getMean() const { return mean; }
protected:
void finishMass();
};
} // namespace HMCLet
} // namespace LibLSS
#endif
// ARES TAG: authors_num = 1
// ARES TAG: name(0) = Guilhem Lavaux
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
// ARES TAG: name(0) = 2019

View file

@ -0,0 +1,103 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/diagonal_mass.cpp
Copyright (C) 2014-2020 2019 <guilhem.lavaux@iap.fr>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#include <boost/format.hpp>
#include <functional>
#include <cmath>
#include "libLSS/tools/console.hpp"
#include <CosmoTool/hdf5_array.hpp>
#include "libLSS/tools/symplectic_integrator.hpp"
#include "libLSS/tools/fusewrapper.hpp"
#include "libLSS/samplers/rgen/slice_sweep.hpp"
#include "libLSS/hmclet/diagonal_mass.hpp"
#include "libLSS/tools/string_tools.hpp"
#include "libLSS/tools/hdf5_scalar.hpp"
using namespace LibLSS;
using namespace LibLSS::HMCLet;
namespace ph = std::placeholders;
using boost::format;
void DiagonalMassMatrix::saveMass(CosmoTool::H5_CommonFileGroup &g) {
CosmoTool::hdf5_write_array(g, "mass", masses);
CosmoTool::hdf5_write_array(g, "mean", mean);
CosmoTool::hdf5_write_array(g, "icMass", icMass);
hdf5_save_scalar(g, "numInMass", numInMass);
hdf5_save_scalar(g, "frozen", frozen);
}
void DiagonalMassMatrix::loadMass(CosmoTool::H5_CommonFileGroup &g) {
CosmoTool::hdf5_read_array(g, "mass", masses);
CosmoTool::hdf5_read_array(g, "mean", mean);
CosmoTool::hdf5_read_array(g, "icMass", icMass);
numInMass = hdf5_load_scalar<size_t>(g, "numInMass");
frozen = hdf5_load_scalar<bool>(g, "frozen");
fwrap(inv_sqrt_masses) = std::sqrt(1 / fwrap(masses));
}
void DiagonalMassMatrix::addMass(VectorType const &params) {
if (frozen)
return;
using CosmoTool::square;
auto f_mean = fwrap(mean);
auto f_variances = fwrap(variances);
auto f_params = fwrap(params);
double coef = double(numInMass) / double(numInMass + 1);
double coef2 = 1 / double(numInMass);
double coef3 = 1 / double(numInMass + 1);
if (numInMass == 0)
f_mean = f_params;
else
f_mean = coef * f_mean + coef3 * f_params;
if (numInMass >= 1) {
auto c = f_params - f_mean;
f_variances = coef * f_variances + coef2 * c * c;
}
numInMass++;
finishMass();
}
void DiagonalMassMatrix::finishMass() {
ConsoleContext<LOG_DEBUG> ctx("DiagonalMassMatrix::finishMass");
auto fm = fwrap(variances);
double w = initialMassWeight / double(initialMassWeight + numInMass);
auto f_M = fwrap(masses);
auto f_inv_sq = fwrap(inv_sqrt_masses);
f_M = (1 - w) * fm + w * fwrap(icMass);
f_inv_sq = std::sqrt(1 / f_M);
ctx.print("mass weight = " + to_string(f_M.max() * 1e5));
ctx.print("inv_sqrt_masses weight = " + to_string(f_inv_sq.max()));
}
void DiagonalMassMatrix::clear() {
fwrap(variances) = 0;
fwrap(masses) = 0;
fwrap(mean) = 0;
numInMass = 0;
initialMassWeight = 5;
finishMass();
}
void DiagonalMassMatrix::setInitialMass(
VectorType const &diagonal_mass_matrix) {
fwrap(icMass) = diagonal_mass_matrix;
initialMassWeight = 5;
finishMass();
}
// ARES TAG: authors_num = 1
// ARES TAG: name(0) = Guilhem Lavaux
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
// ARES TAG: name(0) = 2019

View file

@ -0,0 +1,75 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/diagonal_mass.hpp
Copyright (C) 2014-2020 2019 <guilhem.lavaux@iap.fr>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#ifndef __LIBLSS_HMCLET_DIAGONAL_MASS_HPP
# define __LIBLSS_HMCLET_DIAGONAL_MASS_HPP
# include <memory>
# include <boost/multi_array.hpp>
# include "libLSS/samplers/core/random_number.hpp"
# include <CosmoTool/hdf5_array.hpp>
# include "libLSS/tools/errors.hpp"
# include "libLSS/hmclet/hmclet.hpp"
namespace LibLSS {
namespace HMCLet {
class DiagonalMassMatrix {
protected:
size_t numParams;
boost::multi_array<double, 1> masses, inv_sqrt_masses, icMass, variances;
boost::multi_array<double, 1> mean;
size_t initialMassWeight;
size_t numInMass;
bool frozen;
public:
DiagonalMassMatrix(size_t numParams_)
: numParams(numParams_), masses(boost::extents[numParams]),
inv_sqrt_masses(boost::extents[numParams]),
icMass(boost::extents[numParams]), mean(boost::extents[numParams]),
variances(boost::extents[numParams]), numInMass(0),
initialMassWeight(0), frozen(false) {}
void setInitialMass(VectorType const &params);
void freeze() { frozen = true; }
void freezeInitial() { fwrap(icMass) = fwrap(masses); }
void saveMass(CosmoTool::H5_CommonFileGroup &g);
void loadMass(CosmoTool::H5_CommonFileGroup &g);
void addMass(VectorType const &params);
void clear();
template <typename A>
auto operator()(A const &q) const {
return q * fwrap(masses);
}
template<typename A, typename B>
auto operator()(A const& a, B&& b) const {
return operator()(a);
}
auto sample(RandomNumber &rgen) const
-> decltype(rgen.gaussian(fwrap(inv_sqrt_masses))) {
return rgen.gaussian(fwrap(inv_sqrt_masses));
}
protected:
void finishMass();
};
} // namespace HMCLet
} // namespace LibLSS
#endif
// ARES TAG: authors_num = 1
// ARES TAG: name(0) = Guilhem Lavaux
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
// ARES TAG: name(0) = 2019

View file

@ -0,0 +1,152 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/hmclet.cpp
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#include <boost/format.hpp>
#include <functional>
#include <cmath>
#include "libLSS/tools/console.hpp"
#include <CosmoTool/hdf5_array.hpp>
#include "libLSS/tools/symplectic_integrator.hpp"
#include "libLSS/tools/fusewrapper.hpp"
#include "libLSS/samplers/rgen/slice_sweep.hpp"
#include "libLSS/hmclet/hmclet.hpp"
#include "libLSS/tools/string_tools.hpp"
#include "libLSS/tools/itertools.hpp"
using namespace LibLSS;
using namespace LibLSS::HMCLet;
namespace ph = std::placeholders;
using boost::format;
constexpr static int ROOT_RANK = 0;
template <typename MassType>
SimpleSampler<MassType>::SimpleSampler(
std::shared_ptr<JointPosterior> _posterior)
: numParams(_posterior->getNumberOfParameters()), massMatrix(numParams),
posterior(_posterior), momentum(boost::extents[numParams]) {
ConsoleContext<LOG_DEBUG> ctx("hmclet constructor");
fwrap(momentum) = 0;
}
template <typename MassType>
SimpleSampler<MassType>::~SimpleSampler() {}
template <typename MassType>
void SimpleSampler<MassType>::calibrate(
MPI_Communication *comm, RandomNumber &rng, size_t numSteps,
VectorType const &initial_params, VectorType const &initial_step) {
ConsoleContext<LOG_DEBUG> ctx("hmcLet calibrate");
using CosmoTool::square;
boost::multi_array<double, 1> params(boost::extents[numParams]);
fwrap(params) = initial_params;
massMatrix.clear();
// We do a few loops to have an idea of the width of the posterior.
for (size_t i = 0; i < numSteps; i++) {
for (size_t j = 0; j < numParams; j++) {
params[j] = slice_sweep_double(
comm, rng,
[this, j, &params](double x) -> double {
params[j] = x;
return -posterior->evaluate(params);
},
params[j], initial_step[j]);
}
massMatrix.addMass(params);
}
massMatrix.freezeInitial();
}
template <typename MassType>
void SimpleSampler<MassType>::newSample(
MPI_Communication *comm, RandomNumber &rgen, VectorType &params) {
ConsoleContext<LOG_DEBUG> ctx("hmcLet singleSampler");
auto paramSize = boost::extents[numParams];
SymplecticIntegrators integrator;
boost::multi_array<double, 1> tmp_gradient(paramSize), saveParams(paramSize),
savedMomentum(paramSize);
double Hstart, Hend, delta_H;
double epsilon;
int Ntime;
if (comm->rank() == ROOT_RANK) {
epsilon = maxEpsilon * (1 - rgen.uniform());
Ntime = 1 + int(maxNtime * rgen.uniform());
fwrap(momentum) = momentumScale * fwrap(momentum) + std::sqrt(1-momentumScale*momentumScale)* massMatrix.sample(rgen);
}
comm->broadcast_t(&epsilon, 1, ROOT_RANK);
comm->broadcast_t(&Ntime, 1, ROOT_RANK);
comm->broadcast_t(momentum.data(), numParams, ROOT_RANK);
fwrap(savedMomentum) = momentum;
LibLSS::copy_array(saveParams, params);
Hstart = posterior->evaluate(saveParams);
// Do the integration
ctx.print(boost::format("Integrate epsilon=%g ntime=%d") % epsilon % Ntime);
try {
integrator.integrate_dense(
std::bind(
&JointPosterior::adjointGradient, posterior.get(), ph::_1, ph::_2),
massMatrix, epsilon, Ntime, saveParams, momentum, tmp_gradient);
Hend = posterior->evaluate(saveParams);
double delta_Ekin;
{
auto p = fwrap(momentum);
auto old_p = fwrap(savedMomentum);
delta_Ekin = (0.5 * (p - old_p) * massMatrix(p + old_p)).sum();
}
delta_H = Hend - Hstart + delta_Ekin;
double log_u;
if (comm->rank() == ROOT_RANK)
log_u = std::log(1 - rgen.uniform());
comm->broadcast_t(&log_u, 1, ROOT_RANK);
ctx.print(
boost::format("deltaEkin = %g, delta_L = %g, deltaH = %g, log_u = %g") %
delta_Ekin % (Hend - Hstart) % delta_H % log_u);
if (log_u <= -delta_H) {
// Accept
LibLSS::copy_array(params, saveParams);
ctx.print("Accept");
}
} catch (HMCLet::ErrorBadGradient const &) {
ctx.print2<LOG_ERROR>(
"A bad gradient computation occured. Reject the sample");
}
massMatrix.addMass(params);
}
#include "libLSS/hmclet/diagonal_mass.hpp"
template class LibLSS::HMCLet::SimpleSampler<DiagonalMassMatrix>;
#include "libLSS/hmclet/dense_mass.hpp"
template class LibLSS::HMCLet::SimpleSampler<DenseMassMatrix>;
#include "libLSS/hmclet/mass_burnin.hpp"
template class LibLSS::HMCLet::SimpleSampler<MassMatrixWithBurnin<DiagonalMassMatrix>>;
template class LibLSS::HMCLet::SimpleSampler<MassMatrixWithBurnin<DenseMassMatrix>>;

View file

@ -0,0 +1,87 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/hmclet.hpp
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#ifndef __LIBLSS_HMCLET_HMCLET_HPP
#define __LIBLSS_HMCLET_HMCLET_HPP
#include <memory>
#include <boost/multi_array.hpp>
#include "libLSS/samplers/core/random_number.hpp"
#include <CosmoTool/hdf5_array.hpp>
#include "libLSS/tools/errors.hpp"
namespace LibLSS {
namespace HMCLet {
typedef boost::multi_array_ref<double, 1> VectorType;
LIBLSS_NEW_ERROR(ErrorBadGradient);
LIBLSS_NEW_ERROR(ErrorBadReject);
class JointPosterior {
public:
JointPosterior() {}
virtual ~JointPosterior() {}
virtual size_t getNumberOfParameters() const = 0;
virtual double evaluate(VectorType const &params) = 0;
virtual void adjointGradient(
VectorType const &params, VectorType &params_gradient) = 0;
};
class AbstractSimpleSampler {
public:
AbstractSimpleSampler() : maxEpsilon(0.02), maxNtime(50), momentumScale(0.0) {}
virtual void calibrate(
MPI_Communication *comm, RandomNumber &rng, size_t numSteps,
VectorType const &initial_params, VectorType const &initial_step) = 0;
virtual void newSample(
MPI_Communication *comm, RandomNumber &rng, VectorType &params) = 0;
void setMaxEpsilon(double epsilon_) { maxEpsilon = epsilon_; }
void setMaxNtime(size_t ntime_) { maxNtime = ntime_; }
void setMassScaling(double scale_) { momentumScale = scale_; }
virtual void reset() {}
double maxEpsilon;
double momentumScale;
size_t maxNtime;
};
template <typename MassType>
class SimpleSampler : public AbstractSimpleSampler {
public:
typedef MassType mass_t;
SimpleSampler(std::shared_ptr<JointPosterior> posterior);
~SimpleSampler();
virtual void calibrate(
MPI_Communication *comm, RandomNumber &rng, size_t numSteps,
VectorType const &initial_params, VectorType const &initial_step);
virtual void
newSample(MPI_Communication *comm, RandomNumber &rng, VectorType &params);
mass_t &getMass() { return massMatrix; }
protected:
size_t numParams;
mass_t massMatrix;
std::shared_ptr<JointPosterior> posterior;
boost::multi_array<double, 1> momentum;
};
} // namespace HMCLet
} // namespace LibLSS
#endif

View file

@ -0,0 +1,141 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/hmclet_qnhmc.cpp
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#include <boost/format.hpp>
#include <functional>
#include <cmath>
#include "libLSS/tools/console.hpp"
#include <CosmoTool/hdf5_array.hpp>
#include "libLSS/tools/symplectic_integrator.hpp"
#include "libLSS/tools/fusewrapper.hpp"
#include "libLSS/samplers/rgen/slice_sweep.hpp"
#include "libLSS/hmclet/hmclet_qnhmc.hpp"
#include "libLSS/tools/string_tools.hpp"
#include "libLSS/tools/itertools.hpp"
using namespace LibLSS;
using namespace LibLSS::QNHMCLet;
namespace ph = std::placeholders;
using boost::format;
constexpr static int ROOT_RANK = 0;
template <typename MassType, typename BMatrixType>
Sampler<MassType,BMatrixType>::Sampler(
std::shared_ptr<JointPosterior> _posterior)
: numParams(_posterior->getNumberOfParameters()), massMatrix(numParams),
posterior(_posterior), momentum(boost::extents[numParams]),
B(numParams) {
ConsoleContext<LOG_DEBUG> ctx("qnhmclet constructor");
fwrap(momentum) = 0;
}
template <typename MassType, typename BMatrixType>
Sampler<MassType,BMatrixType>::~Sampler() {}
template <typename MassType, typename BMatrixType>
void Sampler<MassType,BMatrixType>::newSample(
MPI_Communication *comm, RandomNumber &rgen, VectorType &params) {
ConsoleContext<LOG_DEBUG> ctx("qnhmcLet singleSampler");
auto paramSize = boost::extents[numParams];
SymplecticIntegrators integrator;
BMatrixType C(B);
boost::multi_array<double, 1> tmp_gradient(paramSize), integrateParams(paramSize),
savedMomentum(paramSize);
double Hstart, Hend, delta_H;
double epsilon;
int Ntime;
if (comm->rank() == ROOT_RANK) {
epsilon = maxEpsilon * (1 - rgen.uniform());
Ntime = 1 + int(maxNtime * rgen.uniform());
fwrap(momentum) = momentumScale * fwrap(momentum) + std::sqrt(1-momentumScale*momentumScale)* massMatrix.sample(rgen);
}
ctx.print("Momentum is " + to_string(momentum ));
comm->broadcast_t(&epsilon, 1, ROOT_RANK);
comm->broadcast_t(&Ntime, 1, ROOT_RANK);
comm->broadcast_t(momentum.data(), numParams, ROOT_RANK);
fwrap(savedMomentum) = momentum;
LibLSS::copy_array(integrateParams, params);
Hstart = posterior->evaluate(integrateParams);
// Do the integration
ctx.print(boost::format("Integrate epsilon=%g ntime=%d") % epsilon % Ntime);
try {
integrator.integrate_dense(
[this,&C,&ctx](Vector const& position, Vector& gradient) {
posterior->adjointGradient(position, gradient);
ctx.print("QN gradient " + to_string(gradient));
B.addInfo(position, gradient);
C(gradient);
ctx.print("QN[2] gradient " + to_string(gradient));
},
[this,&C](Vector const& p, auto& tmp_p) {
fwrap(tmp_p) = massMatrix(p);
C(tmp_p);
return fwrap(tmp_p);
}, epsilon, Ntime, integrateParams, momentum, tmp_gradient
);
Hend = posterior->evaluate(integrateParams);
double delta_Ekin;
{
auto p = fwrap(momentum);
auto old_p = fwrap(savedMomentum);
delta_Ekin = (0.5 * (p - old_p) * massMatrix(p + old_p)).sum();
}
delta_H = Hend - Hstart + delta_Ekin;
double log_u;
if (comm->rank() == ROOT_RANK)
log_u = std::log(1 - rgen.uniform());
comm->broadcast_t(&log_u, 1, ROOT_RANK);
ctx.print(
boost::format("deltaEkin = %g, delta_L = %g, deltaH = %g, log_u = %g") %
delta_Ekin % (Hend - Hstart) % delta_H % log_u);
if (log_u <= -delta_H) {
// Accept
LibLSS::copy_array(params, integrateParams);
ctx.print("Accept");
auto& q = B.get();
ctx.print(boost::format("B=[%g,%g;%g,%g]") % q[0][0] % q[0][1] % q[1][0] % q[1][1]);
} else {
if (std::isnan(delta_H)) {
// Try to recover by resetting completely B
throw HMCLet::ErrorBadReject("Bad integration");
} else {
// Reject
B = C; // Reset the drift matrix
}
}
} catch (HMCLet::ErrorBadGradient const &) {
ctx.print2<LOG_ERROR>(
"A bad gradient computation occured. Reject the sample");
throw HMCLet::ErrorBadReject("Bad gradient");
}
}
#include "libLSS/hmclet/diagonal_mass.hpp"
template class LibLSS::QNHMCLet::Sampler<HMCLet::DiagonalMassMatrix,BDense>;
#include "libLSS/hmclet/dense_mass.hpp"
template class LibLSS::QNHMCLet::Sampler<HMCLet::DenseMassMatrix,BDense>;

View file

@ -0,0 +1,182 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/hmclet_qnhmc.hpp
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#ifndef __LIBLSS_HMCLET_QNHMCLET_HPP
#define __LIBLSS_HMCLET_QNHMCLET_HPP
#include <memory>
#include <boost/multi_array.hpp>
#include "libLSS/samplers/core/random_number.hpp"
#include <CosmoTool/hdf5_array.hpp>
#include "libLSS/tools/errors.hpp"
#include "libLSS/hmclet/hmclet.hpp"
#include "libLSS/tools/hdf5_scalar.hpp"
#include <Eigen/Core>
namespace LibLSS {
// Implement QN-HMC algorithm
// http://auai.org/uai2016/proceedings/papers/102.pdf
namespace QNHMCLet {
using HMCLet::VectorType;
using HMCLet::JointPosterior;
class BDense {
protected:
size_t numParams;
size_t store;
boost::multi_array<double, 2> B;
boost::multi_array<double, 1> prev_theta, prev_grad_f;
Eigen::VectorXd s_k, y_k;
public:
BDense(size_t numParams_)
: numParams(numParams_),store(0),
B(boost::extents[numParams][numParams]), prev_theta(boost::extents[numParams]), prev_grad_f(boost::extents[numParams]), s_k(numParams), y_k(numParams) {
reset();
}
void reset() {
store = 0;
fwrap(prev_theta) = 0;
fwrap(prev_grad_f) = 0;
fwrap(B) = 0;
for (size_t i = 0; i < numParams; i++)
B[i][i] = 1e-5;
}
BDense(BDense const& other)
: numParams(other.numParams), store(other.store),
B(boost::extents[numParams][numParams]), prev_theta(boost::extents[numParams]), prev_grad_f(boost::extents[numParams]), s_k(numParams), y_k(numParams) {
fwrap(B) = other.B;
store = other.store;
s_k = other.s_k;
y_k = other.y_k;
fwrap(prev_theta) = other.prev_theta;
fwrap(prev_grad_f) = other.prev_grad_f;
}
BDense const& operator=(BDense const& other) {
Console::instance().c_assert(numParams == other.numParams, "Invalid B matrix state");;
//B.resize(boost::extents[numParams][numParams]);
fwrap(B) = other.B;
store = other.store;
s_k = other.s_k;
y_k = other.y_k;
fwrap(prev_theta) = other.prev_theta;
fwrap(prev_grad_f) = other.prev_grad_f;
return *this;
}
template<typename Theta, typename Gradient>
void addInfo(Theta const& theta, Gradient const& grad_f) {
auto w_prev_theta = fwrap(prev_theta);
auto w_prev_grad_f = fwrap(prev_grad_f);
auto B_map = Eigen::Map<Eigen::MatrixXd>(B.data(), numParams, numParams);
store++;
if (store == 1) {
w_prev_theta = theta;
w_prev_grad_f = grad_f;
return;
}
for (size_t i = 0; i < numParams; i++) {
s_k(i) = theta[i] - prev_theta[i];
y_k(i) = (grad_f[i] - prev_grad_f[i]);
}
double const alpha_0 = s_k.dot(y_k);
double const alpha = 1/alpha_0;
if (alpha_0*alpha_0 < 1e-5 * s_k.dot(s_k) * y_k.dot(y_k) ) {
// w_prev_theta = theta;
// w_prev_grad_f = grad_f;
Console::instance().print<LOG_DEBUG>(
boost::format("SKIPPED alpha = %lg, reduced = %lg" ) % alpha %
(alpha_0/std::sqrt(s_k.dot(s_k) * y_k.dot(y_k))));
return;
}
Console::instance().print<LOG_DEBUG>(
boost::format("alpha = %lg, s_k = %lg, y_k = %lg, reduced = %lg" ) % alpha % std::sqrt(s_k.dot(s_k)) % std::sqrt(y_k.dot(y_k)) %
(alpha_0/std::sqrt(s_k.dot(s_k) * y_k.dot(y_k))));
auto I = Eigen::MatrixXd::Identity(numParams,numParams);
Eigen::MatrixXd M = I - y_k * s_k.transpose() * alpha;
Eigen::MatrixXd N = s_k * s_k.transpose() * alpha;
B_map = M.transpose() * B_map * M;
B_map += N;
w_prev_theta = theta;
w_prev_grad_f = grad_f;
}
void operator()(boost::multi_array_ref<double,1>& x)
{
Eigen::Map<Eigen::VectorXd> m_x(x.data(), numParams);
Eigen::Map<Eigen::MatrixXd> m_B(B.data(), numParams, numParams);
m_x = m_B * m_x;
}
boost::multi_array_ref<double, 2> const& get() const { return B; }
void save(H5_CommonFileGroup& g) {
CosmoTool::hdf5_write_array(g, "B", B);
CosmoTool::hdf5_write_array(g, "prev_theta", prev_theta);
CosmoTool::hdf5_write_array(g, "prev_grad_f", prev_grad_f);
hdf5_save_scalar(g, "store", store);
}
void load(H5_CommonFileGroup& g) {
CosmoTool::hdf5_read_array(g, "B", B);
CosmoTool::hdf5_read_array(g, "prev_theta", prev_theta);
CosmoTool::hdf5_read_array(g, "prev_grad_f", prev_grad_f);
store = hdf5_load_scalar<int>(g, "store");
}
};
template <typename MassType, typename BMatrixType>
class Sampler : public HMCLet::AbstractSimpleSampler {
public:
typedef MassType mass_t;
Sampler(std::shared_ptr<JointPosterior> posterior);
~Sampler();
virtual void
newSample(MPI_Communication *comm, RandomNumber &rng, VectorType &params);
virtual void calibrate(
MPI_Communication *comm, RandomNumber &rng, size_t numSteps,
VectorType const &initial_params, VectorType const &initial_step) {}
mass_t &getMass() { return massMatrix; }
BMatrixType& getB() { return B; }
virtual void reset() {
Console::instance().print<LOG_DEBUG>("Resetting QN-HMC"); B.reset();
fwrap(momentum) = 0;
}
protected:
size_t numParams;
mass_t massMatrix;
BMatrixType B;
std::shared_ptr<JointPosterior> posterior;
typedef VectorType Vector;
boost::multi_array<double, 1> momentum;
};
} // namespace QNHMCLet
} // namespace LibLSS
#endif

View file

@ -0,0 +1,343 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/julia_hmclet.cpp
Copyright (C) 2014-2020 2018-2019 <guilhem.lavaux@iap.fr>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#include <string>
#include <boost/format.hpp>
#include "libLSS/mpi/generic_mpi.hpp"
#include "libLSS/julia/julia.hpp"
#include "libLSS/julia/julia_mcmc.hpp"
#include "libLSS/hmclet/julia_hmclet.hpp"
#include "libLSS/hmclet/hmclet_qnhmc.hpp"
#include "libLSS/samplers/core/types_samplers.hpp"
#include "libLSS/samplers/rgen/slice_sweep.hpp"
#include "libLSS/samplers/julia/julia_likelihood.hpp"
#include "libLSS/julia/julia_ghosts.hpp"
#include "libLSS/julia/julia_array.hpp"
#include "libLSS/hmclet/diagonal_mass.hpp"
#include "libLSS/hmclet/dense_mass.hpp"
#include "libLSS/hmclet/mass_burnin.hpp"
#include "libLSS/tools/itertools.hpp"
#include "libLSS/hmclet/mass_saver.hpp"
using namespace LibLSS;
using namespace LibLSS::JuliaLikelihood;
using namespace LibLSS::JuliaHmclet::details;
using boost::format;
using LibLSS::Julia::helpers::_r;
static constexpr int ROOT_RANK = 0;
// ----------------------------------------------------------------------------
// JuliaHmcletMeta
JuliaHmcletMeta::JuliaHmcletMeta(
MPI_Communication *comm_, std::shared_ptr<JuliaDensityLikelihood> likelihood_,
const std::string &likelihood_module_, MatrixType matrixType,
size_t burnin_, size_t memorySize_, double limiter_, bool frozen_)
: MarkovSampler(), comm(comm_), module_name(likelihood_module_),
likelihood(likelihood_), massMatrixType(matrixType),
burnin(burnin_), memorySize(memorySize_), limiter(limiter_), frozen(frozen_) {
ConsoleContext<LOG_INFO> ctx("JuliaHmcletMeta::JuliaHmcletMeta");
}
JuliaHmcletMeta::~JuliaHmcletMeta() {}
void JuliaHmcletMeta::initialize(MarkovState &state) { restore(state); }
static void julia_helper_diagonal_mass_matrix(
std::string const &module_name,
std::unique_ptr<AbstractSimpleSampler> &hmc_, Julia::Object &jl_state,
size_t burnin, size_t memorySize, bool frozen) {
auto hmc =
dynamic_cast<SimpleSampler<MassMatrixWithBurnin<DiagonalMassMatrix>> *>(
hmc_.get());
Julia::Object jl_mass =
Julia::invoke(module_name + ".fill_diagonal_mass_matrix", jl_state);
auto mass = jl_mass.unbox_array<double, 1>();
auto &hmc_mass = hmc->getMass();
hmc_mass.setInitialMass(mass);
hmc_mass.clear();
hmc_mass.setBurninMax(burnin);
hmc_mass.setMemorySize(memorySize);
if (frozen)
hmc_mass.freeze();
}
static void julia_helper_diagonal_mass_matrix_qn(
std::string const &module_name,
std::unique_ptr<AbstractSimpleSampler> &hmc_, Julia::Object &jl_state) {
Console::instance().print<LOG_DEBUG>("Initializing mass matrix QN");
auto hmc =
dynamic_cast<QNHMCLet::Sampler<DiagonalMassMatrix, QNHMCLet::BDense> *>(
hmc_.get());
Julia::Object jl_mass =
Julia::invoke(module_name + ".fill_diagonal_mass_matrix", jl_state);
auto mass = jl_mass.unbox_array<double, 1>();
Console::instance().print<LOG_DEBUG>("Got some mass-> " + to_string(mass));
auto &hmc_mass = hmc->getMass();
hmc_mass.setInitialMass(mass);
hmc_mass.clear();
hmc_mass.freeze();
}
static void julia_helper_dense_mass_matrix(
std::string const &module_name,
std::unique_ptr<AbstractSimpleSampler> &hmc_, Julia::Object &jl_state,
size_t burnin, size_t memorySize, double limiter, bool frozen) {
auto hmc =
dynamic_cast<SimpleSampler<MassMatrixWithBurnin<DenseMassMatrix>> *>(
hmc_.get());
Julia::Object jl_mass =
Julia::invoke(module_name + ".fill_dense_mass_matrix", jl_state);
auto mass = jl_mass.unbox_array<double, 2>();
auto &hmc_mass = hmc->getMass();
Console::instance().print<LOG_INFO>("Setup IC mass matrix");
hmc_mass.setInitialMass(mass);
hmc_mass.clear();
hmc_mass.setBurninMax(burnin);
hmc_mass.setMemorySize(memorySize);
hmc_mass.setCorrelationLimiter(limiter);
if (frozen)
hmc_mass.freeze();
}
std::tuple<samplerBuilder_t, massMatrixInit_t>
JuliaHmcletMeta::getAdequateSampler() {
ConsoleContext<LOG_VERBOSE> ctx("JuliaHmcletMeta::getAdequateSampler");
samplerBuilder_t f;
massMatrixInit_t f2;
if (massMatrixType == DIAGONAL) {
ctx.print("Using DIAGONAL mass matrix");
f = [](std::shared_ptr<JuliaHmcletPosterior> &posterior, MarkovState &state,
std::string const &name) {
typedef SimpleSampler<MassMatrixWithBurnin<DiagonalMassMatrix>> sampler_t;
auto sampler = std::unique_ptr<sampler_t>(new sampler_t(posterior));
add_saver(state, name, sampler);
return sampler;
};
f2 = std::bind(
&julia_helper_diagonal_mass_matrix, module_name, std::placeholders::_1,
std::placeholders::_2, burnin, memorySize, frozen);
} else if (massMatrixType == QN_DIAGONAL) {
f = [](std::shared_ptr<JuliaHmcletPosterior> &posterior, MarkovState &state,
std::string const &name) {
typedef QNHMCLet::Sampler<DiagonalMassMatrix,QNHMCLet::BDense> sampler_t;
auto sampler = std::unique_ptr<sampler_t>(new sampler_t(posterior));
add_saver(state, name, sampler);
return sampler;
};
f2 = std::bind(
&julia_helper_diagonal_mass_matrix_qn, module_name, std::placeholders::_1,
std::placeholders::_2);
} else if (massMatrixType == DENSE) {
ctx.print("Using DENSE mass matrix");
f = [](std::shared_ptr<JuliaHmcletPosterior> &posterior, MarkovState &state,
std::string const &name) {
typedef SimpleSampler<MassMatrixWithBurnin<DenseMassMatrix>> sampler_t;
auto sampler = std::unique_ptr<sampler_t>(new sampler_t(posterior));
add_saver(state, name, sampler);
return sampler;
};
f2 = std::bind(
&julia_helper_dense_mass_matrix, module_name, std::placeholders::_1,
std::placeholders::_2, burnin, memorySize, limiter, frozen);
}
return std::make_tuple(f, f2);
}
void JuliaHmcletMeta::restore(MarkovState &state) {
ConsoleContext<LOG_INFO> ctx("JuliaHmcletMeta::restore");
N0 = state.getScalar<long>("N0");
N1 = state.getScalar<long>("N1");
N2 = state.getScalar<long>("N2");
Ncatalog = state.getScalar<long>("NCAT");
FFTW_Manager_3d<double> mgr(N0, N1, N2, comm);
N2real = mgr.N2real;
localN0 = mgr.localN0;
long startN0 = mgr.startN0;
Julia::Object plane_array =
Julia::invoke(query_planes(module_name), Julia::pack(state));
auto planes = plane_array.unbox_array<uint64_t, 1>();
std::vector<size_t> owned_planes(localN0);
for (size_t i = 0; i < localN0; i++)
owned_planes[i] = startN0 + i;
ghosts.setup(comm, planes, owned_planes, std::array<size_t, 2>{N1, N2}, N0);
ctx.print("Resize posteriors");
posteriors.resize(Ncatalog);
hmcs.resize(Ncatalog);
samplerBuilder_t samplerBuilder;
state.newScalar<int>("hmclet_badreject", 0, true);
std::tie(samplerBuilder, massMatrixInit) = getAdequateSampler();
ctx.print("Register to likelihood post init");
likelihood->getPendingInit().ready([this, &state, samplerBuilder]() {
ConsoleContext<LOG_INFO> ctx2("JuliaHmcletMeta::restore::post_init");
for (size_t c = 0; c < Ncatalog; c++) {
auto &bias = *state.get<ArrayType1d>(format("galaxy_bias_%d") % c)->array;
ctx2.print("Make posterior");
posteriors[c] = std::make_shared<JuliaHmcletPosterior>(
comm, module_name, c, bias.size());
ctx2.print("Make hmclet");
hmcs[c] = samplerBuilder(
posteriors[c], state, str(format("galaxy_hmclet_%d") % c));
}
ready_hmclet.submit_ready();
});
}
void JuliaHmcletMeta::sample(MarkovState &state) {
ConsoleContext<LOG_VERBOSE> ctx("JuliaHmcletMeta::sample");
if (state.getScalar<bool>("bias_sampler_blocked"))
return;
Julia::Object jl_density;
auto &out_density = *state.get<ArrayType>("BORG_final_density")->array;
auto jl_state = Julia::pack(state);
long MCMC_STEP = state.getScalar<long>("MCMC_STEP");
RandomGen *rgen = state.get<RandomGen>("random_generator");
// Now we gather all the required planes on this node and dispatch
// our data to peers.
ghosts.synchronize(out_density);
Julia::Object jl_ghosts = Julia::newGhostManager(&ghosts, N2);
jl_density.box_array(out_density);
Julia::Object v_density =
Julia::view_array<3>(jl_density, {_r(1, localN0), _r(1, N1), _r(1, N2)});
if (MCMC_STEP == 0) {
for (size_t cat_idx = 0; cat_idx < Ncatalog; cat_idx++) {
VectorType &bias =
*(state.get<ArrayType1d>(format("galaxy_bias_%d") % cat_idx)->array);
if (!massMatrixInit) {
error_helper<ErrorBadState>(
"No mass matrix initializer provided to JuliaHmclet");
}
try {
massMatrixInit(hmcs[cat_idx], jl_state);
} catch (Julia::JuliaException const &) {
ctx.print2<LOG_WARNING>("Mass matrix not provided. Auto-seeding.");
size_t Nbias = bias.size();
boost::multi_array<double, 1> initial_step(boost::extents[Nbias]);
for (size_t j = 0; j < Nbias; j++)
initial_step[j] =
Julia::invoke(
module_name + ".get_step_hint", jl_state, cat_idx, j)
.unbox<double>();
posteriors[cat_idx]->updateGhosts(jl_ghosts);
posteriors[cat_idx]->updateState(jl_state, v_density);
hmcs[cat_idx]->calibrate(comm, rgen->get(), 10, bias, initial_step);
}
}
ctx.print("Done initializing mass matrix");
}
for (size_t cat_idx = 0; cat_idx < Ncatalog; cat_idx++) {
posteriors[cat_idx]->updateGhosts(jl_ghosts);
posteriors[cat_idx]->updateState(jl_state, v_density);
try {
hmcs[cat_idx]->newSample(
comm, rgen->get(),
*state.get<ArrayType1d>(format("galaxy_bias_%d") % cat_idx)->array);
} catch (LibLSS::HMCLet::ErrorBadReject const& e) {
state.getScalar<int>("hmclet_badreject")++;
ctx.print2<LOG_ERROR>("Bad reject. Note down and reset the hmc");
hmcs[cat_idx]->reset();
}
}
// Do not use posteriors beyond this without reupdating all arrays.
}
// ----------------------------------------------------------------------------
// JuliaHmcletPosterior
size_t JuliaHmcletPosterior::getNumberOfParameters() const {
return numBiasParams;
}
double JuliaHmcletPosterior::evaluate(VectorType const &params) {
ConsoleContext<LOG_DEBUG> ctx("JuliaHmcletPosterior::evaluate");
boost::multi_array<double, 1> a = params;
Julia::Object jl_p;
jl_p.box_array(a);
double L =
Julia::invoke(param_priors_name, *state, cat_id, jl_p).unbox<double>();
if (L == std::numeric_limits<double>::infinity())
return std::numeric_limits<double>::infinity();
L += Julia::invoke(likelihood_name, *state, *ghosts, *density, cat_id, jl_p)
.unbox<double>();
ctx.print("Reduce likelihood");
comm->all_reduce_t(MPI_IN_PLACE, &L, 1, MPI_SUM);
ctx.print("Returning L=" + to_string(L));
return L;
}
void JuliaHmcletPosterior::adjointGradient(
VectorType const &params, VectorType &params_gradient) {
ConsoleContext<LOG_DEBUG> ctx("JuliaHmcletPosterior::adjointGradient");
Julia::Object jl_p, jl_gradient;
boost::multi_array<double, 1> a(boost::extents[numBiasParams]);
int bad_gradient_count = 0;
fwrap(a) = params;
jl_p.box_array(a);
jl_gradient.box_array(params_gradient);
comm->broadcast_t(a.data(), numBiasParams, ROOT_RANK);
try {
Julia::invoke(
adjoint_name, *state, *ghosts, *density, cat_id, jl_p, jl_gradient);
} catch (Julia::JuliaException &e) {
if (Julia::isBadGradient(e))
bad_gradient_count = 1;
else
throw;
}
comm->all_reduce_t(MPI_IN_PLACE, &bad_gradient_count, 1, MPI_SUM);
if (bad_gradient_count > 0)
throw HMCLet::ErrorBadGradient("Bad gradient from Julia");
Console::instance().print<LOG_VERBOSE>("Got a gradient: " + to_string(params_gradient));
comm->all_reduce_t(
(double *)MPI_IN_PLACE, params_gradient.data(), numBiasParams, MPI_SUM);
}
// ARES TAG: authors_num = 1
// ARES TAG: name(0) = Guilhem Lavaux
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
// ARES TAG: name(0) = 2018-2019

View file

@ -0,0 +1,130 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/julia_hmclet.hpp
Copyright (C) 2014-2020 2018-2019 <guilhem.lavaux@iap.fr>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#ifndef __LIBLSS_JULIA_HMCLET_HPP
# define __LIBLSS_JULIA_HMCLET_HPP
# include <memory>
# include "libLSS/samplers/core/markov.hpp"
# include "libLSS/julia/julia.hpp"
# include "libLSS/samplers/julia/julia_likelihood.hpp"
# include "libLSS/hmclet/hmclet.hpp"
# include "libLSS/tools/mpi/ghost_planes.hpp"
namespace LibLSS {
namespace JuliaHmclet {
namespace types {
typedef ArrayType1d::ArrayType bias_t;
enum MatrixType {
DIAGONAL, DENSE, QN_DIAGONAL
};
}
namespace details {
using namespace types;
using namespace HMCLet;
class JuliaHmcletPosterior : virtual public JointPosterior {
protected:
MPI_Communication *comm;
std::string likelihood_module;
std::string likelihood_name;
std::string adjoint_name;
size_t cat_id;
Julia::Object *density;
Julia::Object *state;
Julia::Object *ghosts;
size_t numBiasParams;
std::string param_priors_name;
public:
JuliaHmcletPosterior(
MPI_Communication *comm_, const std::string likelihood_module_,
size_t cat_id_, size_t numBiasParams_)
: comm(comm_), likelihood_module(likelihood_module_),
likelihood_name(
JuliaLikelihood::likelihood_evaluate_bias(likelihood_module)),
adjoint_name(
JuliaLikelihood::likelihood_adjoint_bias(likelihood_module)),
cat_id(cat_id_), numBiasParams(numBiasParams_),
param_priors_name(likelihood_module + ".log_prior_bias") {}
virtual ~JuliaHmcletPosterior() {}
// We try to save a bit of julia stack protection.
void updateGhosts(Julia::Object &ghosts_) { ghosts = &ghosts_; }
void updateState(Julia::Object &state_, Julia::Object &density_) {
state = &state_;
density = &density_;
}
virtual size_t getNumberOfParameters() const;
virtual double evaluate(VectorType const &params);
virtual void
adjointGradient(VectorType const &params, VectorType &params_gradient);
};
typedef std::function<std::unique_ptr<AbstractSimpleSampler>(
std::shared_ptr<JuliaHmcletPosterior> &, MarkovState &,
std::string const &)>
samplerBuilder_t;
typedef std::function<void(
std::unique_ptr<AbstractSimpleSampler> &, Julia::Object &)>
massMatrixInit_t;
class JuliaHmcletMeta : virtual public MarkovSampler {
protected:
MPI_Communication *comm;
std::string module_name;
typedef HMCLet::AbstractSimpleSampler sampler_t;
typedef std::unique_ptr<sampler_t> SimpleSampler_p;
typedef std::vector<SimpleSampler_p> SimpleSampler_pv;
std::vector<std::shared_ptr<details::JuliaHmcletPosterior>> posteriors;
SimpleSampler_pv hmcs;
size_t Ncatalog, N0, N1, N2, N2real, localN0;
GhostPlanes<double, 2> ghosts;
std::shared_ptr<JuliaDensityLikelihood> likelihood;
Defer ready_hmclet;
MatrixType massMatrixType;
size_t burnin;
size_t memorySize;
double limiter;
bool frozen;
massMatrixInit_t massMatrixInit;
std::tuple<samplerBuilder_t, massMatrixInit_t> getAdequateSampler();
public:
JuliaHmcletMeta(
MPI_Communication *comm, std::shared_ptr<JuliaDensityLikelihood> likelihood_,
const std::string &likelihood_module, MatrixType massMatrixType_,
size_t burnin, size_t memorySize, double limiter, bool frozen);
~JuliaHmcletMeta();
Defer &postinit() { return ready_hmclet; }
SimpleSampler_pv &hmclets() { return hmcs; }
virtual void initialize(MarkovState &state);
virtual void restore(MarkovState &state);
virtual void sample(MarkovState &state);
};
} // namespace details
} // namespace JuliaHmclet
using JuliaHmclet::details::JuliaHmcletMeta;
} // namespace LibLSS
#endif
// ARES TAG: authors_num = 1
// ARES TAG: name(0) = Guilhem Lavaux
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
// ARES TAG: name(0) = 2018-2019

View file

@ -0,0 +1,168 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/julia_slice.cpp
Copyright (C) 2014-2020 2018-2019 <guilhem.lavaux@iap.fr>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#include <string>
#include "libLSS/mpi/generic_mpi.hpp"
#include "libLSS/julia/julia.hpp"
#include "libLSS/julia/julia_mcmc.hpp"
#include "libLSS/hmclet/julia_slice.hpp"
#include "libLSS/samplers/core/types_samplers.hpp"
#include "libLSS/samplers/rgen/slice_sweep.hpp"
#include "libLSS/samplers/julia/julia_likelihood.hpp"
#include "libLSS/julia/julia_ghosts.hpp"
#include "libLSS/julia/julia_array.hpp"
#include "libLSS/hmclet/mass_saver.hpp"
using namespace LibLSS;
using namespace LibLSS::JuliaLikelihood;
using LibLSS::Julia::helpers::_r;
JuliaMetaSlice::JuliaMetaSlice(
MPI_Communication *comm_, const std::string &likelihood_module_,
std::shared_ptr<JuliaDensityLikelihood> likelihood_, size_t burnin_, size_t memorySize_)
: MarkovSampler(), module_name(likelihood_module_), comm(comm_),
likelihood(likelihood_), burnin(burnin_), memorySize(memorySize_) {}
JuliaMetaSlice::~JuliaMetaSlice() {}
void JuliaMetaSlice::initialize(MarkovState &state) { restore(state); }
void JuliaMetaSlice::restore(MarkovState &state) {
N0 = state.getScalar<long>("N0");
N1 = state.getScalar<long>("N1");
N2 = state.getScalar<long>("N2");
N2real = state.getScalar<long>("N2real");
localN0 = state.getScalar<long>("localN0");
Ncatalog = state.getScalar<long>("NCAT");
Julia::Object plane_array =
Julia::invoke(query_planes(module_name), Julia::pack(state));
auto planes = plane_array.unbox_array<uint64_t, 1>();
std::vector<size_t> owned_planes(localN0);
for (size_t i = 0; i < localN0; i++)
owned_planes[i] = startN0 + i;
// Create and introduce the covariance matrix in the state.
// However this matrix is fully owned by JuliaMetaSlice. Only the saver
// is introduced as a mechanism to automatically save/restore the matrix.
//
likelihood->getPendingInit().ready([this, &state]() {
covariances.clear();
for (size_t i = 0; i < Ncatalog; i++) {
auto &bias =
*state.get<ArrayType1d>(boost::format("galaxy_bias_%d") % i)->array;
size_t numParams = bias.size();
auto covar = std::shared_ptr<mass_t>(new mass_t(numParams));
auto obj = new ObjectStateElement<HMCLet::MassSaver<mass_t>, true>();
obj->obj = new HMCLet::MassSaver<mass_t>(*covar.get());
state.newElement(boost::str(boost::format("galaxy_slice_%d") % i), obj, true);
covariances.push_back(covar);
Julia::Object jl_mass =
Julia::invoke(module_name + ".fill_dense_mass_matrix", Julia::pack(state));
auto mass = jl_mass.unbox_array<double, 2>();
Console::instance().print<LOG_INFO>("Setup IC mass matrix");
covar->setInitialMass(mass);
covar->clear();
covar->setBurninMax(burnin);
covar->setMemorySize(memorySize);
covar->setCorrelationLimiter(0.001); // The minimum to avoid blow up
}
});
ghosts.setup(
comm, planes, owned_planes, std::array<size_t, 2>{N1, N2real}, N0);
}
void JuliaMetaSlice::sample(MarkovState &state) {
using namespace Eigen;
ConsoleContext<LOG_VERBOSE> ctx("JuliaMetaSlice::sample");
Julia::Object jl_density;
if (state.getScalar<bool>("bias_sampler_blocked"))
return;
auto &out_density = *state.get<ArrayType>("BORG_final_density")->array;
auto jl_state = Julia::pack(state);
// Now we gather all the required planes on this node and dispatch
// our data to peers.
ghosts.synchronize(out_density);
RandomGen *rgen = state.get<RandomGen>("random_generator");
auto jl_ghosts = Julia::newGhostManager(&ghosts, N2);
jl_density.box_array(out_density);
std::string likelihood_name = likelihood_evaluate_bias(module_name);
std::string param_priors_name = module_name + ".log_prior_bias";
auto v_density =
Julia::view_array<3>(jl_density, {_r(1, localN0), _r(1, N1), _r(1, N2)});
for (int cat_idx = 0; cat_idx < Ncatalog; cat_idx++) {
auto &bias = *state.get<ArrayType1d>(galaxy_bias_name(cat_idx))->array;
size_t Nbiases = bias.size();
Map<VectorXd> current_bias(bias.data(), Nbiases);
VectorXd transformed_bias(Nbiases);
VectorXd new_transformed_bias(Nbiases);
boost::multi_array_ref<double, 1> new_bias(
&new_transformed_bias(0), boost::extents[Nbiases]);
Julia::Object jl_bias;
jl_bias.box_array(new_bias);
covariances[cat_idx]->computeMainComponents();
auto mean = covariances[cat_idx]->getMean();
auto components = covariances[cat_idx]->components();
transformed_bias.noalias() = components.adjoint() * (current_bias - mean);
for (int j = 0; j < Nbiases; j++) {
ctx.print(boost::format("catalog %d / bias %d") % cat_idx % j);
auto likelihood = [&, this, j, cat_idx](double x) -> double {
new_transformed_bias = transformed_bias;
new_transformed_bias(j) = x;
new_transformed_bias = components * new_transformed_bias + mean;
double L = Julia::invoke(
likelihood_name, jl_state, jl_ghosts, v_density, cat_idx,
jl_bias)
.unbox<double>();
ctx.print("Reduce likelihood");
comm->all_reduce_t(MPI_IN_PLACE, &L, 1, MPI_SUM);
ctx.print("Returning L=" + to_string(L));
L += Julia::invoke(param_priors_name, jl_state, cat_idx, jl_bias)
.unbox<double>();
return -L;
};
double step =
Julia::invoke(module_name + ".get_step_hint", jl_state, cat_idx, j)
.unbox<double>();
ctx.print("Advised step is " + to_string(step));
transformed_bias(j) = slice_sweep(
comm, rgen->get(), likelihood, transformed_bias(j), step, 0);
}
new_transformed_bias = components * transformed_bias + mean;
current_bias = new_transformed_bias;
covariances[cat_idx]->addMass(bias);
}
}
// ARES TAG: authors_num = 1
// ARES TAG: name(0) = Guilhem Lavaux
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
// ARES TAG: name(0) = 2018-2019

View file

@ -0,0 +1,53 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/julia_slice.hpp
Copyright (C) 2014-2020 2018-2019 <guilhem.lavaux@iap.fr>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#ifndef __JULIA_META_SLICE_HPP
# define __JULIA_META_SLICE_HPP
# include <vector>
# include "libLSS/samplers/core/markov.hpp"
# include "libLSS/tools/mpi/ghost_planes.hpp"
# include "libLSS/hmclet/mass_burnin.hpp"
# include "libLSS/hmclet/dense_mass.hpp"
# include "libLSS/samplers/julia/julia_likelihood.hpp"
namespace LibLSS {
class JuliaMetaSlice : public MarkovSampler {
protected:
GhostPlanes<double, 2> ghosts;
std::string module_name;
size_t N0, N1, N2, N2real, localN0, startN0, Ncatalog;
MPI_Communication *comm;
typedef HMCLet::MassMatrixWithBurnin<HMCLet::DenseMassMatrix> mass_t;
std::vector<std::shared_ptr<mass_t>> covariances;
std::shared_ptr<JuliaDensityLikelihood> likelihood;
size_t burnin, memorySize;
public:
JuliaMetaSlice(
MPI_Communication *comm, const std::string &likelihood_module,
std::shared_ptr<JuliaDensityLikelihood> likelihood_, size_t burnin_,
size_t memorySize_);
~JuliaMetaSlice();
virtual void initialize(MarkovState &state);
virtual void restore(MarkovState &state);
virtual void sample(MarkovState &state);
};
} // namespace LibLSS
#endif
// ARES TAG: authors_num = 1
// ARES TAG: name(0) = Guilhem Lavaux
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
// ARES TAG: name(0) = 2018-2019

View file

@ -0,0 +1,102 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/mass_burnin.cpp
Copyright (C) 2014-2020 2019 <guilhem.lavaux@iap.fr>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#include <boost/format.hpp>
#include <functional>
#include <cmath>
#include "libLSS/tools/console.hpp"
#include <CosmoTool/hdf5_array.hpp>
#include "libLSS/tools/symplectic_integrator.hpp"
#include "libLSS/tools/fusewrapper.hpp"
#include "libLSS/samplers/rgen/slice_sweep.hpp"
#include "libLSS/hmclet/mass_burnin.hpp"
#include "libLSS/tools/string_tools.hpp"
#include "libLSS/tools/hdf5_scalar.hpp"
#include "libLSS/tools/itertools.hpp"
using namespace LibLSS;
using namespace LibLSS::HMCLet;
namespace ph = std::placeholders;
using boost::format;
template <typename Matrix>
void MassMatrixWithBurnin<Matrix>::saveMass(CosmoTool::H5_CommonFileGroup &g) {
super_t::saveMass(g);
hdf5_save_scalar(g, "stepID", stepID);
Console::instance().print<LOG_VERBOSE>("Handling memory");
for (auto m : itertools::enumerate(memory)) {
int id = m.template get<0>();
auto const &a = m.template get<1>();
std::string s = str(boost::format("memory_%d") % id);
Console::instance().print<LOG_VERBOSE>(
boost::format("Saving memory %d / %s") % id % s);
CosmoTool::hdf5_write_array(g, s, a);
}
}
template <typename Matrix>
void MassMatrixWithBurnin<Matrix>::loadMass(CosmoTool::H5_CommonFileGroup &g) {
super_t::loadMass(g);
stepID = hdf5_load_scalar<size_t>(g, "stepID");
if (stepID > burninMaxIteration)
return;
memory.clear();
for (auto r : itertools::range(0, memorySize)) {
boost::multi_array<double, 1> m;
try {
CosmoTool::hdf5_read_array(g, str(boost::format("memory_%d") % r), m);
} catch (H5::Exception) {
break;
}
memory.push_back(m);
}
}
template <typename Matrix>
void MassMatrixWithBurnin<Matrix>::clear() {
super_t::clear();
memory.clear();
}
template <typename Matrix>
void MassMatrixWithBurnin<Matrix>::addMass(VectorType const &params) {
stepID++;
// If burnin is done, just proceed normally.
if (stepID > burninMaxIteration) {
// memory.clear();
// super_t::addMass(params);
return;
}
memory.push_back(params);
if (memory.size() > memorySize) {
memory.pop_front();
super_t::clear();
// Very dumb algorithm
for (auto &old_params : memory)
super_t::addMass(old_params);
} else {
super_t::addMass(params);
}
}
#include "libLSS/hmclet/diagonal_mass.hpp"
template class LibLSS::HMCLet::MassMatrixWithBurnin<DiagonalMassMatrix>;
#include "libLSS/hmclet/dense_mass.hpp"
template class LibLSS::HMCLet::MassMatrixWithBurnin<DenseMassMatrix>;
// ARES TAG: authors_num = 1
// ARES TAG: name(0) = Guilhem Lavaux
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
// ARES TAG: name(0) = 2019

View file

@ -0,0 +1,55 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/hmclet/mass_burnin.hpp
Copyright (C) 2014-2020 2019 <guilhem.lavaux@iap.fr>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#ifndef __LIBLSS_HMCLET_DIAGONAL_MASS_BURNIN_HPP
# define __LIBLSS_HMCLET_DIAGONAL_MASS_BURNIN_HPP
# include <memory>
# include <boost/multi_array.hpp>
# include "libLSS/samplers/core/random_number.hpp"
# include <CosmoTool/hdf5_array.hpp>
# include "libLSS/tools/errors.hpp"
# include "libLSS/hmclet/hmclet.hpp"
namespace LibLSS {
namespace HMCLet {
template <typename Matrix>
class MassMatrixWithBurnin : public Matrix {
protected:
typedef Matrix super_t;
size_t memorySize;
size_t burninMaxIteration;
size_t stepID;
std::list<boost::multi_array<double, 1>> memory;
public:
MassMatrixWithBurnin(size_t numParams_)
: super_t(numParams_), memorySize(50), burninMaxIteration(300),
stepID(0) {}
void setMemorySize(size_t sz) { memorySize = sz; }
void setBurninMax(size_t maxIteration) {
burninMaxIteration = maxIteration;
}
void saveMass(CosmoTool::H5_CommonFileGroup &g);
void loadMass(CosmoTool::H5_CommonFileGroup &g);
void addMass(VectorType const &params);
void clear();
};
} // namespace HMCLet
} // namespace LibLSS
#endif
// ARES TAG: authors_num = 1
// ARES TAG: name(0) = Guilhem Lavaux
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
// ARES TAG: name(0) = 2019

View file

@ -0,0 +1,62 @@
#ifndef __LIBLSS_HMCLET_MASS_SAVER_HPP
#define __LIBLSS_HMCLET_MASS_SAVER_HPP
#include <CosmoTool/hdf5_array.hpp>
#include "libLSS/hmclet/hmclet.hpp"
#include "libLSS/hmclet/hmclet_qnhmc.hpp"
namespace LibLSS {
namespace HMCLet {
template <typename Mass_t>
struct MassSaver {
Mass_t &mass;
MassSaver(Mass_t &mass_) : mass(mass_) {}
void save(CosmoTool::H5_CommonFileGroup &fg) { mass.saveMass(fg); }
void restore(CosmoTool::H5_CommonFileGroup &fg) { mass.loadMass(fg); }
};
template <typename Mass_t, typename BMass_t>
struct QNMassSaver {
Mass_t &mass;
BMass_t &B;
QNMassSaver(Mass_t &mass_, BMass_t& b_) : mass(mass_), B(b_) {}
void save(CosmoTool::H5_CommonFileGroup &fg) { mass.saveMass(fg); B.save(fg); }
void restore(CosmoTool::H5_CommonFileGroup &fg) { mass.loadMass(fg); B.load(fg); }
};
template <typename Mass_t>
static void add_saver(
MarkovState &state, std::string const &name,
std::unique_ptr<SimpleSampler<Mass_t>> &sampler) {
Console::instance().print<LOG_DEBUG>(
"Creating a saver for the mass matrix in " + name);
auto obj_elt = new ObjectStateElement<MassSaver<Mass_t>, true>();
obj_elt->obj = new MassSaver<Mass_t>(sampler->getMass());
state.newElement(name, obj_elt, true);
}
template <typename Mass_t>
static void add_saver(
MarkovState &state, std::string const &name,
std::unique_ptr<QNHMCLet::Sampler<Mass_t,QNHMCLet::BDense>> &sampler) {
Console::instance().print<LOG_DEBUG>(
"Creating a saver for the QN mass matrix in " + name);
auto obj_elt = new ObjectStateElement<QNMassSaver<Mass_t,QNHMCLet::BDense>, true>();
obj_elt->obj = new QNMassSaver<Mass_t,QNHMCLet::BDense>(sampler->getMass(), sampler->getB());
state.newElement(name, obj_elt, true);
}
} // namespace HMCLet
} // namespace LibLSS
#endif

View file

@ -0,0 +1,118 @@
module convHMC
using TensorFlow
sess = 0; p = 0; δ = 0; g = 0; s = 0; n = 0; sel = 0; loss = 0; error = 0; ag = 0; output = 0;
function isotropic_weights(params, C0, C1, C2)
out_edge = stack([params[4], params[3], params[4]])
out_face = stack([params[3], params[2], params[3]])
inner = stack([params[2], params[1], params[2]])
face = stack([out_edge, out_face, out_edge])
middle = stack([out_face, inner, out_face])
return reshape(stack([face, middle, face]), (C0, C1, C2, 1, 1))
end
function get_isotropic_weights(num_layers, kernel)
w = Array{Any}(num_layers)
b = Array{Any}(num_layers)
for i = 1:num_layers
w[i] = isotropic_weights(p[(i - 1) * 5 + 1: (i - 1) * 5 + 4], kernel[1], kernel[2], kernel[3])
b[i] = p[i * 5]
end
return w, b
end
function get_3d_conv(num_layers, kernel)
w = Array{Any}(num_layers)
b = Array{Any}(num_layers)
for i = 1:num_layers
w[i] = reshape(p[(i - 1) * 28 + 1: (i - 1) * 28 + 27], (kernel[1], kernel[2], kernel[3], 1, 1))
b[i] = p[i * 28]
end
return w, b
end
function convolutional_network(x, w, b, num_layers, N0, N1, N2)
for i = 1:num_layers
x = nn.relu(nn.conv3d(x, w[i], strides = [1, 1, 1, 1, 1], padding = "SAME") + b[i]) + x
end
x = nn.relu(x)
return reshape(x, (N0, N1, N2))
end
function mse(x, g_, s_, n_, sel_, loss_params)
N0 = loss_params[1]
N1 = loss_params[2]
N2 = loss_params[3]
x = boolean_mask(reshape(x, N0 * N1 * N2), sel_)
return reduce_sum(0.5 * (multiply(x, s_) - g_)^2. / multiply(n_, s_) + 0.5 * log(n_))
end
function get_poisson_bias(_, __)
return -99, -99
end
function no_network(x, _, __, ___, ____, _____, ______)
return x
end
function poisson_bias(x, g_, s_, n_, sel_, loss_params)
N0 = loss_params[1]
N1 = loss_params[2]
N2 = loss_params[3]
x = boolean_mask(reshape(x, N0 * N1 * N2), sel_)
return reduce_sum((g_ .- s_ .* ( .- p[1] .* x)).^2. / (s_ .* n_))
end
function setup(num_layers, N0, N1, N2, num_params, extras, loss_params, network, get_variables, Λ)
global sess, p, δ, g, s, n, sel, output, loss, ag, error
sess = Session();
p = placeholder(Float64, shape = [num_params])
δ = placeholder(Float64, shape = [N0, N1, N2])
δ_ = reshape(δ, (1, N0, N1, N2, 1))
sel = placeholder(Bool, shape = [N0, N1, N2])
sel_ = reshape(sel, N0 * N1 * N2)
g = placeholder(Float64, shape = [N0, N1, N2])
g_ = boolean_mask(reshape(g, N0 * N1 * N2), sel_)
s = placeholder(Float64, shape = [N0, N1, N2])
s_ = boolean_mask(reshape(s, N0 * N1 * N2), sel_)
n = placeholder(Float64, shape = [1])
n_ = n[1]
w, b = get_variables(num_layers, extras)
output = network(δ_, w, b, num_layers, N0, N1, N2)
loss = Λ(output, g_, s_, n_, sel_, loss_params)
ag = gradients(loss, δ)
#error = gradients(loss, p)
run(sess, global_variables_initializer())
end
function evaluate(params, field, galaxy, selection, noise, mask)
return run(sess, loss, Dict(p => params, δ => field, g => galaxy, s => selection, n => [noise], sel => mask))
end
function adjointGradient(params, field, galaxy, selection, noise, mask)
return run(sess, ag, Dict(p => params, δ => field, g => galaxy, s => selection, n => [noise], sel => mask))
end
#function adjointNetworkGradient(params, field, galaxy, selection, noise, mask)
# gradient = run(sess, error, Dict(p => params, δ => field, g => galaxy, s => selection, n => [noise], sel => mask))
# params_gradient = gradient.values[gradient.indices]
# #println(params_gradient)
# #params_gradient = Array{Float64}(tot_num_conv * 5);
# #for i = 1:tot_num_conv
# # for j = 1:4
# # ind = find(x -> x == j, gradient[(i - 1) * 2 + 1].indices);
# # params_gradient[(i - 1) * 5 + j] = sum(gradient[(i - 1) * 2 + 1].values[ind]);
# # end
# # params_gradient[i * 5] = gradient[i * 2];
# #end
# return params_gradient
#end
function get_field(params, field)
return run(sess, output, Dict(p => params, δ => field));
end
end

View file

@ -0,0 +1,43 @@
using TensorFlow
using Distributions
sess = Session(Graph());
inputs = 3;
θ = placeholder(Float32, shape = [nothing, inputs])
m2lnL = placeholder(Float32, shape = [nothing])
layers = 2;
neurons_per_layer = 50;
α = 0.1;
function network(θ, layers, neurons_per_layer, α)
x = θ
weights = Array{Any}(layers + 1)
biases = Array{Any}(layers + 1)
for i=1:layers
if i == 1
weights[i] = get_variable("layer_" * string(i) * "_weights", [3, neurons_per_layer], Float32, initializer=Normal(0., sqrt(2./3.)))
biases[i] = get_variable("layer_" * string(i) * "_biases", [neurons_per_layer], Float32)
elseif i == layers
weights[i] = get_variable("layer_" * string(i) * "_weights", [neurons_per_layer, 1], Float32, initializer=Normal(0., sqrt(2./neurons_per_layer)))
biases[i] = get_variable("layer_" * string(i) * "_biases", [1], Float32)
else
weights[i] = get_variable("layer_" * string(i) * "_weights", [neurons_per_layer, neurons_per_layer], Float32, initializer=Normal(0., sqrt(2./neurons_per_layer)))
biases[i] = get_variable("layer_" * string(i) * "_biases", [neurons_per_layer], Float32)
end
x = x * weights[i] + biases[i]
x = max(α * x, x)
end
x = reshape(x, (-1))
return x, weights, biases
end
output, weights, biases = network(θ, layers, neurons_per_layer, α)
loss = mean(0.5 * (output / m2lnL - 1)^2)
gradient = gradients(loss, θ);
weight_gradients = [gradients(loss, weights[i]) for i=1:layers];
bias_gradients = [gradients(loss, biases[i]) for i=1:layers];

View file

@ -0,0 +1,171 @@
#+
# ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/tests/network/TF_conv.jl
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
module network
using libLSS
import libLSS.State
import libLSS.GhostPlanes, libLSS.get_ghost_plane
import libLSS.print, libLSS.LOG_INFO, libLSS.LOG_VERBOSE, libLSS.LOG_DEBUG
using TensorFlow
sess = Session(Graph())
p = nothing
#new_p = nothing
#assign_p = nothing
δ = nothing
g = nothing
s = nothing
mask = nothing
output = nothing
mock = nothing
loss = nothing
adgrad = nothing
wgrad = nothing
function setup(N0, number_of_parameters)
global p, new_p, assign_p, δ, g, s, mask, output, mock, loss, adgrad, wgrad
p = Array{Any}(number_of_parameters)
#new_p = Array{Any}(number_of_parameters)
#assign_p = Array{Any}(number_of_parameters)
for i=1:number_of_parameters
p[i] = placeholder(Float64, shape = [])
#p[i] = Variable(zeros(Float64, 1))
#new_p[i] = placeholder(Float64, shape = [])
#assign_p[i] = assign(p[i], expand_dims(new_p[i], 1))
end
δ = placeholder(Float64, shape = [N0, N0, N0])
g = placeholder(Float64, shape = [N0, N0, N0])
s = placeholder(Float64, shape = [N0, N0, N0])
mask = placeholder(Bool, shape = [N0, N0, N0])
output = build_network(δ, p)
mock = output .* s
loss = 0.5 * sum((boolean_mask(reshape(g, N0^3), reshape(mask, N0^3)) .- boolean_mask(reshape(s, N0^3), reshape(mask, N0^3)) .* boolean_mask(reshape(output, N0^3), reshape(mask, N0^3))).^2. ./(boolean_mask(reshape(s, N0^3), reshape(mask, N0^3)))) + 0.5 * sum(cast(mask, Float64))
adgrad = gradients(loss, δ)
wgrad = Array{Any}(number_of_parameters)
for i=1:number_of_parameters
wgrad[i]= gradients(loss, p[i])
end
run(sess, global_variables_initializer())
end
function build_network(input_tensor, weights)
α = Float64(0.01)
x = nn.conv3d(expand_dims(expand_dims(input_tensor, 4), 5), expand_dims(expand_dims(expand_dims(expand_dims(expand_dims(weights[1], 1), 2), 3), 4), 5), strides = [1, 1, 1, 1, 1], padding = "VALID")
x = x .+ weights[2]
x = max(α .* x, x)
x = nn.conv3d(x, expand_dims(expand_dims(expand_dims(expand_dims(expand_dims(weights[3], 1), 2), 3), 4), 5), strides = [1, 1, 1, 1, 1], padding = "VALID")
x = x .+ weights[4]
x = x + expand_dims(expand_dims(input_tensor, 4), 5)
x = max(α .* x, x)
x_ = nn.conv3d(x, expand_dims(expand_dims(expand_dims(expand_dims(expand_dims(weights[5], 1), 2), 3), 4), 5), strides = [1, 1, 1, 1, 1], padding = "VALID")
x_ = x_ .+ weights[6]
x_ = max(α .* x_, x_)
x_ = nn.conv3d(x_, expand_dims(expand_dims(expand_dims(expand_dims(expand_dims(weights[7], 1), 2), 3), 4), 5), strides = [1, 1, 1, 1, 1], padding = "VALID")
x_ = x_ .+ weights[8]
x_ = x_ + x
x_ = max(α .* x_, x_)
return squeeze(x_)
end
#number_of_parameters = 8
#N0 = 32
#setup(N0, number_of_parameters)
#using Distributions
#δ_ = reshape(rand(Normal(0., 1.), 32 * 32 * 32), (32, 32, 32));
#g_ = reshape(rand(Normal(0., 1.), 32 * 32 * 32), (32, 32, 32));
#p_ = zeros(number_of_parameters);
#s_ = reshape(rand(0:1, 32 * 32 * 32), (32, 32, 32));
#s_mask = s_.>0;
#using PyPlot
#imshow(squeeze(sum(δ_, 3), 3))
#imshow(squeeze(sum(g_, 3), 3))
#imshow(squeeze(sum(run(sess, output, Dict(δ=>δ_, p=>p_)), 3), (3)))
#imshow(squeeze(sum(run(sess, mock, Dict(δ=>δ_, p=>p_, s=>s_)), 3), (3)))
#loss_ = run(sess, loss, Dict(δ=>δ_, p=>p_, s=>s_, g=>g_, mask=>s_mask))
#adgrad_ = run(sess, adgrad, Dict(δ=>δ_, p=>p_, s=>s_, g=>g_, mask=>s_mask))
#wgrad_ = run(sess, wgrad, Dict(δ=>δ_, p=>p_, s=>s_, g=>g_, mask=>s_mask))
function initialize(state::State)
print(LOG_INFO, "Likelihood initialization in Julia")
number_of_parameters = 8
N0 = libLSS.get(state, "N0", Int64, synchronous=true)
NCAT = libLSS.get(state, "NCAT", Int64, synchronous=true)
setup(N0, number_of_parameters)
print(LOG_VERBOSE, "Found " *repr(NCAT) * " catalogues")
bias = libLSS.resize_array(state, "galaxy_bias_0", number_of_parameters, Float64)
bias[:] = 0
end
function get_required_planes(state::State)
print(LOG_INFO, "Check required planes")
return Array{UInt64,1}([])
end
function likelihood(state::State, ghosts::GhostPlanes, array::AbstractArray{Float64,3})
print(LOG_INFO, "Likelihood evaluation in Julia")
NCAT = libLSS.get(state, "NCAT", Int64, synchronous=true)
L = Float64(0.)
for catalog=1:NCAT
sc = repr(catalog - 1)
L += run(sess, loss, Dict(p=>libLSS.get_array_1d(state, "galaxy_bias_"*sc, Float64), δ=>array, g=>libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64), s=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64), mask=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64).>0.))
end
print(LOG_VERBOSE, "Likelihood is " * repr(L))
return L
end
function generate_mock_data(state::State, ghosts::GhostPlanes, array::AbstractArray{Float64,3})
print(LOG_INFO, "Generate mock")
sc = "0"
data = run(sess, mock, Dict(p=>libLSS.get_array_1d(state, "galaxy_bias_"*sc, Float64), δ=>array, s=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64)))
data = libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64)
print(LOG_INFO, "Shape is " * repr(size(data)) * " and " * repr(size(array)))
print(LOG_INFO, "Number of threads " * repr(Threads.nthreads()))
print(LOG_INFO, "Noise is not included")
print(LOG_INFO, "Max val is " * repr(maximum(array)) * " and data " * repr(maximum(data)))
end
function adjoint_gradient(state::State, array::AbstractArray{Float64,3}, ghosts::GhostPlanes, ag::AbstractArray{Float64,3})
print(LOG_VERBOSE, "Adjoint gradient in Julia")
NCAT = libLSS.get(state, "NCAT", Int64, synchronous=true)
ag[:,:,:] = 0
for catalog=1:NCAT
sc = repr(catalog - 1)
Smask = libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64).>0.
ag[Smask] += run(sess, adgrad, Dict(p=>libLSS.get_array_1d(state, "galaxy_bias_"*sc, Float64), δ=>array, g=>libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64), s=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64), mask=>Smask))[Smask]
end
end
function likelihood_bias(state::State, ghosts::GhostPlanes, array, catalog_id, catalog_bias)
sc = repr(catalog_id)
return run(sess, loss, Dict(p=>catalog_bias, δ=>array, g=>libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64), s=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64), mask=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64).>0.))
end
function get_step_hint(state, catalog_id)
return 0.1
end
function log_prior_bias(state, catalog_id, bias)
if bias[2] < 0
return Inf
end
return 0
end
function adjoint_bias(state::State, ghosts::GhostPlanes,
array, catalog_id, catalog_bias, adjoint_gradient_bias)
sc = repr(catalog_id)
adjoint_gradient_bias = run(sess, wgrad, Dict(p=>catalog_bias, δ=>array, g=>libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64), s=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64), mask=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64).>0.))
end
end

View file

@ -0,0 +1,140 @@
#+
# ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/tests/network/TF_likelihood.jl
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
module network
using libLSS
import libLSS.State
import libLSS.GhostPlanes, libLSS.get_ghost_plane
import libLSS.print, libLSS.LOG_INFO, libLSS.LOG_VERBOSE, libLSS.LOG_DEBUG
using TensorFlow
sess = Session(Graph())
p = nothing
new_p = nothing
assign_p = nothing
δ = nothing
g = nothing
s = nothing
mask = nothing
loss = nothing
adgrad = nothing
wgrad = nothing
function setup(N0, number_of_parameters)
global p, new_p, assign_p, δ, g, s, mask, loss, adgrad, wgrad
p = Variable(zeros(number_of_parameters))
new_p = placeholder(Float64, shape = [number_of_parameters])
assign_p = assign(p, new_p)
δ = placeholder(Float64, shape = [N0, N0, N0])
g = placeholder(Float64, shape = [N0, N0, N0])
s = placeholder(Float64, shape = [N0, N0, N0])
mask = placeholder(Bool, shape = [N0, N0, N0])
loss = 0.5 * sum((boolean_mask(reshape(g, N0^3), reshape(mask, N0^3)) .- boolean_mask(reshape(s, N0^3), reshape(mask, N0^3)) .* (1. .- p[1] .* boolean_mask(reshape(δ, N0^3), reshape(mask, N0^3)))).^2. ./(boolean_mask(reshape(s, N0^3), reshape(mask, N0^3)) .* p[2])) + 0.5 * sum(cast(mask, Float64)) .* log(p[2])
adgrad = gradients(loss, δ)
wgrad_slice = gradients(loss, p)
wgrad = [wgrad_slice.values, wgrad_slice.indices]
end
function initialize(state::State)
print(LOG_INFO, "Likelihood initialization in Julia")
number_of_parameters = 2
N0 = libLSS.get(state, "N0", Int64, synchronous=true)
NCAT = libLSS.get(state, "NCAT", Int64, synchronous=true)
setup(N0, number_of_parameters)
run(sess, global_variables_initializer())
print(LOG_VERBOSE, "Found " *repr(NCAT) * " catalogues")
bias = libLSS.resize_array(state, "galaxy_bias_0", number_of_parameters, Float64)
bias[:] = 1
run(sess, assign_p, Dict(new_p=>bias))
end
function get_required_planes(state::State)
print(LOG_INFO, "Check required planes")
return Array{UInt64,1}([])
end
function likelihood(state::State, ghosts::GhostPlanes, array::AbstractArray{Float64,3})
print(LOG_INFO, "Likelihood evaluation in Julia")
NCAT = libLSS.get(state, "NCAT", Int64, synchronous=true)
L = Float64(0.)
for catalog=1:NCAT
sc = repr(catalog - 1)
run(sess, assign_p, Dict(new_p=>libLSS.get_array_1d(state, "galaxy_bias_"*sc, Float64)))
L += run(sess, loss, Dict(δ=>array, g=>libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64), s=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64), mask=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64).>0.))
end
print(LOG_VERBOSE, "Likelihood is " * repr(L))
return L
end
function generate_mock_data(state::State, ghosts::GhostPlanes, array::AbstractArray{Float64,3})
print(LOG_INFO, "Generate mock")
sc = "0"
data = libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64)
b = libLSS.get_array_1d(state, "galaxy_bias_"*sc, Float64)
S = libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64)
s = size(data)
print(LOG_INFO, "Shape is " * repr(size(data)) * " and " * repr(size(array)))
print(LOG_INFO, "Number of threads " * repr(Threads.nthreads()))
N0=s[1]
N1=s[2]
N2=s[3]
noise = sqrt(b[1])
print(LOG_INFO, "Noise is " * repr(noise))
bias = b[2]
for i=1:N0,j=1:N1,k=1:N2
data[i,j,k] = S[i,j,k]*(1+bias*array[i,j,k]) + sqrt(S[i,j,k])*noise*libLSS.gaussian(state)
end
print(LOG_INFO, "Max val is " * repr(maximum(array)) * " and data " * repr(maximum(data)))
end
function adjoint_gradient(state::State, array::AbstractArray{Float64,3}, ghosts::GhostPlanes, ag::AbstractArray{Float64,3})
print(LOG_VERBOSE, "Adjoint gradient in Julia")
NCAT = libLSS.get(state, "NCAT", Int64, synchronous=true)
ag[:,:,:] = 0
for catalog=1:NCAT
sc = repr(catalog - 1)
run(sess, assign_p, Dict(new_p=>libLSS.get_array_1d(state, "galaxy_bias_"*sc, Float64)))
Smask = libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64).>0.
ag[Smask] += run(sess, adgrad, Dict(δ=>array, g=>libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64), s=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64), mask=>Smask))[Smask]
end
end
function likelihood_bias(state::State, ghosts::GhostPlanes, array, catalog_id, catalog_bias)
sc = repr(catalog_id)
run(sess, assign_p, Dict(new_p=>catalog_bias))
return run(sess, loss, Dict(δ=>array, g=>libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64), s=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64), mask=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64).>0.))
end
function get_step_hint(state, catalog_id)
return 0.1
end
function log_prior_bias(state, catalog_id, bias)
if bias[2] < 0
return Inf
end
return 0
end
function adjoint_bias(state::State, ghosts::GhostPlanes,
array, catalog_id, catalog_bias, adjoint_gradient_bias)
sc = repr(catalog_id)
run(sess, assign_p, Dict(new_p=>catalog_bias))
error = run(sess, wgrad, Dict(δ=>array, g=>libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64), s=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64), mask=>libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64).>0.))
for i=1:number_of_parameters
adjoint_gradient_bias[i] = sum(error[1][error[2] .== i])
end
end
end

View file

@ -0,0 +1,103 @@
module test_conv_like
include("convHMC.jl")
using libLSS
import libLSS.State
import libLSS.GhostPlanes, libLSS.get_ghost_plane
import libLSS.print, libLSS.LOG_INFO, libLSS.LOG_VERBOSE, libLSS.LOG_DEBUG
#import test_conv_like.convHMC.initialise
function initialize(state::State)
print(LOG_INFO, "Likelihood initialization in Julia")
NCAT = libLSS.get(state, "NCAT", Int64, synchronous=true)
print(LOG_VERBOSE, "Found " *repr(NCAT) * " catalogues")
N0 = libLSS.get(state, "localN0", Int64, synchronous=true)
N1 = 32
N2 = 32
num_layers = 1
C0 = 3
C1 = 3
C2 = 3
bias = libLSS.resize_array(state, "galaxy_bias_0", num_layers * 5 + 1, Float64)
#bias = libLSS.resize_array(state, "galaxy_bias_0", 29, Float64)
bias[:] = 0
bias[1] = 1
bias[6] = 100
#bias[28] = 1
#bias[29] = 100
#bias[11] = 1
#bias[16] = 1
#bias[21] = 1
#bias[26] = 100
test_conv_like.convHMC.setup(num_layers, N0, N1, N2, 5 * num_layers, [C0, C1, C2], [N0, N1, N2], test_conv_like.convHMC.convolutional_network, test_conv_like.convHMC.get_isotropic_weights, test_conv_like.convHMC.mse)
#test_conv_like.convHMC.setup(num_layers, N0, N1, N2, 28, [C0, C1, C2], [N0, N1, N2], test_conv_like.convHMC.convolutional_network, test_conv_like.convHMC.get_3d_conv, test_conv_like.convHMC.mse)
#bias = libLSS.resize_array(state, "galaxy_bias_0", 2, Float64)
#bias[1] = 100
#bias[2] = 1
#test_conv_like.convHMC.setup(num_layers, N0, N1, N2, 1, -99, [N0, N1, N2], test_conv_like.convHMC.no_network, test_conv_like.convHMC.get_poisson_bias, test_conv_like.convHMC.poisson_bias)
end
function get_required_planes(state::State)
print(LOG_INFO, "Check required planes")
return []
end
function likelihood(state::State, ghosts::GhostPlanes, array::AbstractArray{Float64,3})
print(LOG_INFO, "Likelihood evaluation in Julia")
NCAT = libLSS.get(state, "NCAT", Int64, synchronous=true)
L = Float64(0)
for catalog in 0:(NCAT-1)
sc = repr(catalog)
data = libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64)
params = libLSS.get_array_1d(state, "galaxy_bias_"*sc, Float64)
S = libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64)
Smask = S.>0
L += test_conv_like.convHMC.evaluate(params[1:end-1], array, data, S, params[end], Smask)
end
print(LOG_VERBOSE, "Likelihood is " * repr(L))
return L
end
function generate_mock_data(state::State, ghosts::GhostPlanes, array::AbstractArray{Float64,3})
#sc = "0"
#data = libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64)
#b = libLSS.get_array_1d(state, "galaxy_bias_"*sc, Float64)
#S = libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64)
#s = size(data)
#print(LOG_INFO, "Shape is " * repr(size(data)) * " and " * repr(size(array)))
#print(LOG_INFO, "Number of threads " * repr(Threads.nthreads()))
#N0=s[1]
#N1=s[2]
#N2=s[3]
#noise = sqrt(b[1])
#bias = b[2]
#for i=1:N0,j=1:N1,k=1:N2
# data[i,j,k] = S[i,j,k]*(1+bias*array[i,j,k] + noise*libLSS.gaussian(state))
#end
print(LOG_INFO, "Generate mock")
params = libLSS.get_array_1d(state, "galaxy_bias_0", Float64)
S = libLSS.get_array_3d(state, "galaxy_sel_window_0", Float64)
data = test_conv_like.convHMC.get_field(params[1:end-1], array) .* S
print(LOG_INFO, "Max val is " * repr(maximum(array)) * " and data " * repr(maximum(data)))
end
function adjoint_gradient(state::State, array::AbstractArray{Float64,3}, ghosts::GhostPlanes, ag::AbstractArray{Float64,3})
print(LOG_VERBOSE, "Adjoint gradient in Julia")
N0 = libLSS.get(state, "N0", Int64, synchronous=true)
NCAT = libLSS.get(state, "NCAT", Int64, synchronous=true)
L = Float64(0)
ag[:, :, :] = 0
for catalog in 0:(NCAT-1)
sc = repr(catalog)
data = libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64)
params = libLSS.get_array_1d(state, "galaxy_bias_0", Float64)
S = libLSS.get_array_3d(state, "galaxy_sel_window_"*sc, Float64)
Smask = S.>0
ag += test_conv_like.convHMC.adjointGradient(params[1:end-1], array, data, S, params[end], Smask)
end
end
end

View file

@ -0,0 +1,74 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/tests/test_dense_mass.cpp
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#define BOOST_TEST_MODULE mass_matrix
#define BOOST_TEST_NO_MAIN
#define BOOST_TEST_ALTERNATIVE_INIT_API
#include <boost/test/included/unit_test.hpp>
#include <boost/test/data/test_case.hpp>
#include "libLSS/tools/console.hpp"
#include "libLSS/tools/static_init.hpp"
#include "libLSS/samplers/core/random_number.hpp"
#include "libLSS/samplers/rgen/gsl_random_number.hpp"
#include "libLSS/mpi/generic_mpi.hpp"
#include <CosmoTool/algo.hpp>
#include <memory>
#include <H5Cpp.h>
#include "libLSS/hmclet/dense_mass.hpp"
#include "libLSS/samplers/core/random_number.hpp"
#include "libLSS/samplers/rgen/gsl_random_number.hpp"
namespace utf = boost::unit_test;
using namespace LibLSS;
BOOST_AUTO_TEST_CASE(dense_mass) {
MPI_Communication *comm = MPI_Communication::instance();
RandomNumberMPI<GSL_RandomNumber> rgen(comm, -1);
HMCLet::DenseMassMatrix M(3);
boost::multi_array<double, 1> numbers(boost::extents[3]);
auto numbers_w = fwrap(numbers);
double a[3];
auto& cons = Console::instance();
for (int i = 0; i < 20; i++) {
a[0] = rgen.gaussian();
a[1] = rgen.gaussian();
a[2] = rgen.gaussian();
numbers[0] = (a[0]+a[2])/std::sqrt(2.0);
numbers[1] = (a[0]-a[2])/std::sqrt(2.0);
numbers[2] = a[1];
M.addMass(numbers);
M.computeMainComponents();
auto C = M.components();
auto mean = M.getMean();
cons.format<LOG_DEBUG>("c00 = %g, c01 = %g, c02 = %g", C(0,0), C(0,1), C(0,2));
cons.format<LOG_DEBUG>("c10 = %g, c11 = %g, c12 = %g", C(1,0), C(1,1), C(1,2));
cons.format<LOG_DEBUG>("c20 = %g, c21 = %g, c22 = %g", C(2,0), C(2,1), C(2,2));
cons.format<LOG_DEBUG>("mean = %g,%g,%g", mean(0), mean(1), mean(2));
}
}
int main(int argc, char *argv[]) {
setupMPI(argc, argv);
StaticInit::execute();
int ret = utf::unit_test_main(&init_unit_test, argc, argv);
StaticInit::finalize();
doneMPI();
return ret;
}

View file

@ -0,0 +1,146 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/tests/test_hmclet.cpp
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#define BOOST_TEST_MODULE julia_bind
#define BOOST_TEST_NO_MAIN
#define BOOST_TEST_ALTERNATIVE_INIT_API
#include <boost/test/included/unit_test.hpp>
#include <boost/test/data/test_case.hpp>
#include "libLSS/tools/console.hpp"
#include "libLSS/tools/static_init.hpp"
#include "libLSS/samplers/core/random_number.hpp"
#include "libLSS/samplers/rgen/gsl_random_number.hpp"
#include "libLSS/mpi/generic_mpi.hpp"
#include <CosmoTool/algo.hpp>
#include <memory>
#include <H5Cpp.h>
#include <CosmoTool/hdf5_array.hpp>
#include "libLSS/hmclet/hmclet.hpp"
#include "libLSS/hmclet/hmclet_qnhmc.hpp"
#include "libLSS/hmclet/diagonal_mass.hpp"
namespace utf = boost::unit_test;
using CosmoTool::square;
using namespace LibLSS;
using namespace LibLSS::HMCLet;
static const double C[2][2] = { { 9. , 1.}, {1., 4.}};
static const double inv_C[2][2] = { { 0.11428571428571427 , -0.028571428571428574}, {-0.028571428571428574, 0.2571428571428572}};
class TestPosterior : virtual public JointPosterior {
public:
TestPosterior() : JointPosterior() {}
virtual ~TestPosterior() {}
virtual size_t getNumberOfParameters() const { return 2; }
virtual double evaluate(VectorType const &params) {
double const u0 = params[0] - 1;
double const u1 = params[1] - 4;
return 0.5 * (u0*u0 * inv_C[0][0] + 2*u0*u1*inv_C[1][0] + u1*u1*inv_C[1][1]);
}
virtual void
adjointGradient(VectorType const &params, VectorType &params_gradient) {
double const u0 = params[0] - 1;
double const u1 = params[1] - 4;
params_gradient[0] = u0 * inv_C[0][0] + 2*u1 * inv_C[0][1];
params_gradient[1] = u1 * inv_C[1][1] + 2*u0* inv_C[0][1];
}
};
BOOST_AUTO_TEST_CASE(hmclet_launch) {
auto posterior_ptr = std::make_shared<TestPosterior>();
SimpleSampler<DiagonalMassMatrix> sampler(posterior_ptr);
MPI_Communication *comm = MPI_Communication::instance();
RandomNumberMPI<GSL_RandomNumber> rgen(comm, -1);
boost::multi_array<double, 1> init_params(boost::extents[2]);
boost::multi_array<double, 1> init_step(boost::extents[2]);
init_params[0] = 100;
init_params[1] = 100;
init_step[0] = 1;
init_step[1] = 1;
boost::multi_array<double, 1> initMass(boost::extents[2]);
initMass[0] = 1;
initMass[1] = 1;
sampler.getMass().setInitialMass(initMass);
sampler.getMass().freeze();
// sampler.calibrate(comm, rgen, 2, init_params, init_step);
boost::multi_array<double, 2> p(boost::extents[10000][2]);
for (size_t i = 0; i < p.size(); i++) {
sampler.newSample(comm, rgen, init_params);
p[i][0] = init_params[0];
p[i][1] = init_params[1];
}
H5::H5File ff("test_sample.h5", H5F_ACC_TRUNC);
CosmoTool::hdf5_write_array(ff, "hmclet", p);
}
BOOST_AUTO_TEST_CASE(qnhmclet_launch) {
auto posterior_ptr = std::make_shared<TestPosterior>();
QNHMCLet::Sampler<DiagonalMassMatrix,QNHMCLet::BDense> sampler(posterior_ptr);
MPI_Communication *comm = MPI_Communication::instance();
RandomNumberMPI<GSL_RandomNumber> rgen(comm, -1);
boost::multi_array<double, 1> init_params(boost::extents[2]);
boost::multi_array<double, 1> init_step(boost::extents[2]);
boost::multi_array<double, 1> initMass(boost::extents[2]);
initMass[0] = 1;
initMass[1] = 1;
sampler.getMass().setInitialMass(initMass);
sampler.getMass().freeze();
init_params[0] = 100;
init_params[1] = 100;
init_step[0] = 1;
init_step[1] = 1;
boost::multi_array<double, 2> p(boost::extents[10000][2]);
H5::H5File ff("test_sample_qn.h5", H5F_ACC_TRUNC);
for (size_t i = 0; i < p.size(); i++) {
sampler.newSample(comm, rgen, init_params);
p[i][0] = init_params[0];
p[i][1] = init_params[1];
// auto gg = ff.createGroup(boost::str(boost::format("B_%d") % i));
// sampler.getB().save(gg);
}
CosmoTool::hdf5_write_array(ff, "qn_hmclet", p);
}
int main(int argc, char *argv[]) {
setupMPI(argc, argv);
StaticInit::execute();
int ret = utf::unit_test_main(&init_unit_test, argc, argv);
StaticInit::finalize();
doneMPI();
return ret;
}

View file

@ -0,0 +1,60 @@
#+
# ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/tests/test_julia.jl
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
module TestLikelihood
using ..libLSS
import ..libLSS.State, ..libLSS.GhostPlanes, ..libLSS.get_ghost_plane
import ..libLSS.print, ..libLSS.LOG_INFO, ..libLSS.LOG_VERBOSE, ..libLSS.LOG_DEBUG
import ..libLSS.BadGradient
function initialize(state::State)
print(LOG_VERBOSE, "Likelihood initialization in Julia")
# bias = libLSS.resize_array(state, "galaxy_bias_0", 1, Float64)
# bias[1] = 1
end
function get_required_planes(state::State)
return Array{UInt64,1}([])
end
function likelihood(state::State, ghosts::GhostPlanes, array::AbstractArray{Float64,3})
print(LOG_DEBUG, "my likelihood")
return 0
end
function get_step_hint(state, catalog_id, bias_id)
print(LOG_DEBUG, "get_step_hint")
return 0.1
end
function log_prior_bias(state, catalog_id, bias_tilde)
print(LOG_DEBUG, "log_prior_bias")
# Change of variable bias = exp(bias_tilde)
return sum(bias_tilde.^2)
end
function generate_mock_data(state::State, ghosts::GhostPlanes, array)
end
function likelihood_bias(state::State, ghosts::GhostPlanes, array, catalog_id, catalog_bias_tilde)
return 0
end
function adjoint_gradient(state::State, array, ghosts, ag)
end
function adjoint_bias(state::State, ghosts::GhostPlanes,
array, catalog_id, catalog_bias_tilde, adjoint_gradient_bias)
print(LOG_DEBUG,"Entering ag bias")
throw(BadGradient())
end
end

View file

@ -0,0 +1,96 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/tests/test_julia_hmclet.cpp
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#define BOOST_TEST_MODULE julia_hmclet
#define BOOST_TEST_NO_MAIN
#define BOOST_TEST_ALTERNATIVE_INIT_API
#include <boost/test/included/unit_test.hpp>
#include <boost/test/data/test_case.hpp>
#include "libLSS/julia/julia.hpp"
#include "libLSS/julia/julia_mcmc.hpp"
#include "libLSS/mcmc/global_state.hpp"
#include "libLSS/mcmc/state_element.hpp"
#include "libLSS/tools/static_init.hpp"
#include "libLSS/tools/console.hpp"
#include "libLSS/tests/setup_hades_test_run.hpp"
#include "libLSS/samplers/julia/julia_likelihood.hpp"
#include "libLSS/physics/forwards/borg_lpt.hpp"
#include "libLSS/tools/string_tools.hpp"
#include "libLSS/hmclet/julia_hmclet.hpp"
namespace utf = boost::unit_test;
using namespace LibLSS;
using namespace LibLSS_test;
struct JuliaFixture {
static MPI_Communication *comm;
static MarkovState *state;
static BoxModel box;
JuliaFixture() {
LIBLSS_AUTO_CONTEXT(LOG_DEBUG, ctx);
state = new MarkovState();
setup_hades_test_run(comm, 32, 600., *state);
setup_box(*state, box);
ObjectStateElement<BORGForwardModel, true> *model_elt =
new ObjectStateElement<BORGForwardModel, true>();
state->newScalar<bool>("bias_sampler_blocked", false);
state->newScalar<long>("MCMC_STEP", 0);
double ai = state->getScalar<double>("borg_a_initial");
model_elt->obj =
new BorgLptModel<>(comm, box, box, false, 1, 2.0, ai, 1.0, false);
state->newElement("BORG_model", model_elt);
}
~JuliaFixture() { Console::instance().print<LOG_DEBUG>("Destroying state."); delete state; }
};
MPI_Communication *JuliaFixture::comm = 0;
MarkovState *JuliaFixture::state;
BoxModel JuliaFixture::box;
BOOST_GLOBAL_FIXTURE(JuliaFixture);
BOOST_AUTO_TEST_CASE(julia_hmclet_fail) {
LikelihoodInfo info;
LibLSS_test::setup_likelihood_info(
*JuliaFixture::state, info);
Console::instance().print<LOG_DEBUG>(boost::format("Comm is %p") % JuliaFixture::comm);
auto density = std::make_shared<JuliaDensityLikelihood>(
JuliaFixture::comm, info, TEST_JULIA_LIKELIHOOD_CODE, "TestLikelihood");
return;
JuliaHmcletMeta meta(JuliaFixture::comm, density, "TestLikelihood", JuliaHmclet::types::DIAGONAL, 10, 10, 0.5, true);
density->initializeLikelihood(*JuliaFixture::state);
meta.init_markov(*JuliaFixture::state);
meta.sample(*JuliaFixture::state);
}
int main(int argc, char *argv[]) {
JuliaFixture::comm = setupMPI(argc, argv);
StaticInit::execute();
Console::instance().outputToFile(
"test_julia_hmclet.txt_" +
to_string(MPI_Communication::instance()->rank()));
int ret = utf::unit_test_main(&init_unit_test, argc, argv);
StaticInit::finalize();
doneMPI();
return ret;
}

View file

@ -0,0 +1,88 @@
/*+
ARES/HADES/BORG Package -- ./extra/hmclet/libLSS/tests/test_network.cpp
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
Additional contributions from:
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
+*/
#define BOOST_TEST_MODULE julia_bind
#define BOOST_TEST_NO_MAIN
#define BOOST_TEST_ALTERNATIVE_INIT_API
#include <boost/test/included/unit_test.hpp>
#include <boost/test/data/test_case.hpp>
#include "libLSS/tools/console.hpp"
#include "libLSS/tools/static_init.hpp"
#include "libLSS/samplers/core/random_number.hpp"
#include "libLSS/samplers/rgen/gsl_random_number.hpp"
#include "libLSS/mpi/generic_mpi.hpp"
#include <CosmoTool/algo.hpp>
#include <memory>
#include <H5Cpp.h>
#include <CosmoTool/hdf5_array.hpp>
#include "libLSS/hmclet/hmclet.hpp"
namespace utf = boost::unit_test;
using CosmoTool::square;
using namespace LibLSS;
using namespace LibLSS::HMCLet;
class TestPosterior : virtual public JointPosterior {
public:
TestPosterior() : JointPosterior() {}
virtual ~TestPosterior() {}
virtual size_t getNumberOfParameters() const { return 2; }
virtual double evaluate(VectorType const &params) {
return 0.5 * square(params[0] - 1) / 10. + 0.5 * square(params[1] - 4) / 2.;
}
virtual void
adjointGradient(VectorType const &params, VectorType &params_gradient) {
params_gradient[0] = (params[0] - 1) / 10.;
params_gradient[1] = (params[1] - 4) / 2.;
}
};
BOOST_AUTO_TEST_CASE(hmclet_launch) {
auto posterior_ptr = std::make_shared<TestPosterior>();
SimpleSampler sampler(posterior_ptr);
MPI_Communication *comm = MPI_Communication::instance();
RandomNumberMPI<GSL_RandomNumber> rgen(comm, -1);
boost::multi_array<double, 1> init_params(boost::extents[2]);
boost::multi_array<double, 1> init_step(boost::extents[2]);
init_params[0] = 100;
init_params[1] = 100;
init_step[0] = 1;
init_step[1] = 1;
sampler.calibrate(comm, rgen, 10, init_params, init_step);
boost::multi_array<double, 2> p(boost::extents[1000][2]);
for (size_t i = 0; i < p.size(); i++) {
sampler.newSample(comm, rgen, init_params);
p[i][0] = init_params[0];
p[i][1] = init_params[1];
}
H5::H5File ff("test_sample.h5", H5F_ACC_TRUNC);
CosmoTool::hdf5_write_array(ff, "hmclet", p);
}
int main(int argc, char *argv[]) {
setupMPI(argc, argv);
StaticInit::execute();
int ret = utf::unit_test_main(&init_unit_test, argc, argv);
StaticInit::finalize();
doneMPI();
return ret;
}

View file

@ -0,0 +1,27 @@
SET(EXTRA_HMCLET ${CMAKE_SOURCE_DIR}/extra/hmclet/libLSS/tests)
SET(TEST_hmclet_LIST
hmclet
dense_mass
#conv_hmc
#weights
#conv_hmc_julia
)
#SET(TEST_weights_LIBS ${JULIA_LIBRARY})
#SET(TEST_conv_hmc_julia_LIBS ${JULIA_LIBRARY})
IF(BUILD_JULIA)
SET(TEST_hmclet_LIST ${TEST_hmclet_LIST} julia_hmclet)
set_property(
SOURCE ${EXTRA_HMCLET}/test_julia_hmclet.cpp
APPEND PROPERTY COMPILE_DEFINITIONS
TEST_JULIA_LIKELIHOOD_CODE="${EXTRA_HMCLET}/test_julia.jl"
)
SET(TEST_julia_hmclet_LIBS ${JULIA_LIBRARY})
add_test(NAME julia_hmclet COMMAND ${CURRENT_CMAKE_BINARY_DIR}/test_julia_hmclet)
ENDIF()