Initial import
This commit is contained in:
commit
56a50eead3
820 changed files with 192077 additions and 0 deletions
40
extra/hades/libLSS/hades.cmake
Normal file
40
extra/hades/libLSS/hades.cmake
Normal file
|
@ -0,0 +1,40 @@
|
|||
|
||||
SET(EXTRA_HADES ${CMAKE_SOURCE_DIR}/extra/hades/libLSS)
|
||||
SET(EXTRA_LIBLSS ${EXTRA_LIBLSS}
|
||||
${EXTRA_HADES}/samplers/hades/base_likelihood.cpp
|
||||
${EXTRA_HADES}/physics/likelihoods/base.cpp
|
||||
${EXTRA_HADES}/physics/forward_model.cpp
|
||||
${EXTRA_HADES}/samplers/rgen/hmc/hmc_density_sampler.cpp
|
||||
${EXTRA_HADES}/samplers/hades/hades_linear_likelihood.cpp
|
||||
${EXTRA_HADES}/samplers/core/gridLikelihoodBase.cpp
|
||||
${EXTRA_HADES}/samplers/core/simpleLikelihood.cpp
|
||||
${EXTRA_HADES}/samplers/core/generate_random_field.cpp
|
||||
${EXTRA_HADES}/physics/model_io.cpp
|
||||
${EXTRA_HADES}/physics/forwards/primordial.cpp
|
||||
${EXTRA_HADES}/physics/forwards/fnl.cpp
|
||||
${EXTRA_HADES}/physics/forwards/transfer_ehu.cpp
|
||||
${EXTRA_HADES}/physics/forwards/registry.cpp
|
||||
${EXTRA_HADES}/physics/chain_forward_model.cpp
|
||||
${EXTRA_HADES}/physics/branch.cpp
|
||||
${EXTRA_HADES}/physics/sum.cpp
|
||||
${EXTRA_HADES}/physics/haar.cpp
|
||||
${EXTRA_HADES}/physics/hades_pt.cpp
|
||||
${EXTRA_HADES}/physics/hades_log.cpp
|
||||
${EXTRA_HADES}/physics/forwards/upgrade.cpp
|
||||
${EXTRA_HADES}/samplers/rgen/frozen/frozen_phase_density_sampler.cpp
|
||||
${EXTRA_HADES}/samplers/model_params.cpp
|
||||
${EXTRA_HADES}/samplers/bias_model_params.cpp
|
||||
${EXTRA_HADES}/tools/hermiticity_fixup.cpp
|
||||
)
|
||||
|
||||
include(${EXTRA_HADES}/../scripts/models.cmake)
|
||||
|
||||
hades_register_forward_models(
|
||||
libLSS/physics/forwards/primordial.hpp
|
||||
libLSS/physics/forwards/transfer_ehu.hpp
|
||||
libLSS/physics/forwards/fnl.hpp
|
||||
libLSS/physics/hades_pt.hpp
|
||||
libLSS/physics/haar.hpp
|
||||
libLSS/physics/hades_log.hpp
|
||||
libLSS/physics/forwards/upgrade.hpp
|
||||
)
|
130
extra/hades/libLSS/physics/branch.cpp
Normal file
130
extra/hades/libLSS/physics/branch.cpp
Normal file
|
@ -0,0 +1,130 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/branch.cpp
|
||||
Copyright (C) 2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#include <boost/multi_array.hpp>
|
||||
#include "libLSS/physics/cosmo.hpp"
|
||||
#include "libLSS/mpi/generic_mpi.hpp"
|
||||
#include "libLSS/physics/forward_model.hpp"
|
||||
#include <list>
|
||||
#include <boost/variant.hpp>
|
||||
#include "libLSS/tools/errors.hpp"
|
||||
#include "libLSS/physics/branch.hpp"
|
||||
|
||||
using namespace LibLSS;
|
||||
|
||||
BranchForwardModel::BranchForwardModel(
|
||||
MPI_Communication *comm, const BoxModel &box)
|
||||
: BORGForwardModel(comm, box) {}
|
||||
|
||||
BranchForwardModel::BranchForwardModel(
|
||||
MPI_Communication *comm, const BoxModel &box, const BoxModel &outbox)
|
||||
: BORGForwardModel(comm, box, outbox) {}
|
||||
|
||||
BranchForwardModel::~BranchForwardModel() {}
|
||||
|
||||
void BranchForwardModel::forwardModelSimple(CArrayRef &delta_init) {}
|
||||
|
||||
void BranchForwardModel::forwardModel_v2(ModelInput<3> delta_init) {
|
||||
LIBLSS_AUTO_CONTEXT(LOG_VERBOSE, ctx);
|
||||
|
||||
for (auto &model : model_list) {
|
||||
model->forwardModel_v2(delta_init.shallowClone());
|
||||
}
|
||||
}
|
||||
|
||||
void BranchForwardModel::getDensityFinal(ModelOutput<3> output) {
|
||||
error_helper<ErrorNotImplemented>(
|
||||
"getDensityFinal does not exist for BranchForwardModel.");
|
||||
}
|
||||
|
||||
void BranchForwardModel::clear_chain() {
|
||||
auto cleaner = [](auto &u) { u.reset(); };
|
||||
boost::apply_visitor(cleaner, previous);
|
||||
boost::apply_visitor(cleaner, next);
|
||||
}
|
||||
|
||||
void BranchForwardModel::forwardModelRsdField(
|
||||
ArrayRef &deltaf, double *vobs_ext) {}
|
||||
|
||||
// adjointModel auto release particles. Beware !
|
||||
|
||||
void BranchForwardModel::adjointModel_v2(ModelInputAdjoint<3> gradient_delta) {
|
||||
LIBLSS_AUTO_CONTEXT(LOG_VERBOSE, ctx);
|
||||
|
||||
if (gradient_delta) {
|
||||
error_helper<ErrorParams>(
|
||||
"Input to adjointModel_v2 must be null for branch.");
|
||||
}
|
||||
}
|
||||
|
||||
void BranchForwardModel::getAdjointModelOutput(
|
||||
ModelOutputAdjoint<3> ag_output) {
|
||||
// FIXME: Very dumb choice at the moment, first one has the right of choice.
|
||||
ag_output.setRequestedIO((*model_list.begin())->getPreferredInput());
|
||||
|
||||
ModelOutputAdjoint<3> common_output = ag_output.makeTempLike();
|
||||
|
||||
for (auto &model : model_list) {
|
||||
ModelOutputAdjoint<3> tmp_output = common_output.shallowClone();
|
||||
|
||||
model->getAdjointModelOutput(std::move(tmp_output));
|
||||
switch (ag_output.current) {
|
||||
case PREFERRED_REAL:
|
||||
fwrap(ag_output.getRealOutput()) =
|
||||
fwrap(ag_output.getRealOutput()) + common_output.getRealOutput();
|
||||
break;
|
||||
case PREFERRED_FOURIER:
|
||||
fwrap(ag_output.getFourierOutput()) =
|
||||
fwrap(ag_output.getFourierOutput()) + common_output.getRealOutput();
|
||||
break;
|
||||
default:
|
||||
error_helper<ErrorBadState>("Unknown ModelIO type");
|
||||
break;
|
||||
}
|
||||
clearAdjointGradient();
|
||||
}
|
||||
}
|
||||
|
||||
void BranchForwardModel::releaseParticles() {
|
||||
// Fill up with the chain
|
||||
for (auto model : model_list) {
|
||||
model->releaseParticles();
|
||||
}
|
||||
}
|
||||
|
||||
void BranchForwardModel::addModel(std::shared_ptr<BORGForwardModel> model) {
|
||||
if (get_box_model() != model->get_box_model()) {
|
||||
error_helper<ErrorParams>(
|
||||
"Invalid model configuration with IO of the chain.");
|
||||
}
|
||||
model_list.push_back(model);
|
||||
}
|
||||
|
||||
void BranchForwardModel::setAdjointRequired(bool required) {
|
||||
for (auto model : model_list) {
|
||||
model->setAdjointRequired(required);
|
||||
}
|
||||
}
|
||||
|
||||
void BranchForwardModel::updateCosmo() {
|
||||
// Fill up with the chain
|
||||
for (auto model : model_list) {
|
||||
model->setCosmoParams(cosmo_params);
|
||||
}
|
||||
}
|
||||
|
||||
void BranchForwardModel::clearAdjointGradient() {
|
||||
for (auto model : model_list) {
|
||||
model->clearAdjointGradient();
|
||||
}
|
||||
}
|
||||
|
||||
// ARES TAG: authors_num = 1
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
||||
// ARES TAG: year(0) = 2020
|
76
extra/hades/libLSS/physics/branch.hpp
Normal file
76
extra/hades/libLSS/physics/branch.hpp
Normal file
|
@ -0,0 +1,76 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/branch.hpp
|
||||
Copyright (C) 2018-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2018-2019 Doogesh Kodi Ramanah <ramanah@iap.fr>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#pragma once
|
||||
#ifndef __LIBLSS_BRANCH_FORWARD_MODEL_HPP
|
||||
# define __LIBLSS_BRANCH_FORWARD_MODEL_HPP
|
||||
|
||||
# include <boost/multi_array.hpp>
|
||||
# include "libLSS/physics/cosmo.hpp"
|
||||
# include "libLSS/mpi/generic_mpi.hpp"
|
||||
# include "libLSS/physics/forward_model.hpp"
|
||||
# include <list>
|
||||
# include <boost/variant.hpp>
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
class BranchForwardModel : public BORGForwardModel {
|
||||
public:
|
||||
ModelOutput<3> final_output;
|
||||
ModelOutputAdjoint<3> ag_final_output;
|
||||
typedef std::shared_ptr<DFT_Manager::U_ArrayReal> S_U_ArrayReal;
|
||||
typedef std::shared_ptr<DFT_Manager::U_ArrayFourier> S_U_ArrayFourier;
|
||||
S_U_ArrayReal final_real, ag_final_real;
|
||||
S_U_ArrayFourier final_fourier, ag_final_fourier;
|
||||
boost::variant<S_U_ArrayReal, S_U_ArrayFourier> previous, next;
|
||||
|
||||
BranchForwardModel(MPI_Communication *comm, const BoxModel &box);
|
||||
|
||||
BranchForwardModel(
|
||||
MPI_Communication *comm, const BoxModel &box, const BoxModel &outbox);
|
||||
|
||||
~BranchForwardModel() override;
|
||||
|
||||
void forwardModelSimple(CArrayRef &delta_init) override;
|
||||
|
||||
void forwardModel_v2(ModelInput<3> delta_init) override;
|
||||
|
||||
void getDensityFinal(ModelOutput<3> output) override;
|
||||
|
||||
void clear_chain();
|
||||
|
||||
void forwardModelRsdField(ArrayRef &deltaf, double *vobs_ext) override;
|
||||
|
||||
// adjointModel auto release particles. Beware !
|
||||
|
||||
void adjointModel_v2(ModelInputAdjoint<3> gradient_delta) override;
|
||||
void getAdjointModelOutput(ModelOutputAdjoint<3> ag_output) override;
|
||||
void releaseParticles() override;
|
||||
|
||||
void addModel(std::shared_ptr<BORGForwardModel> model);
|
||||
|
||||
void setAdjointRequired(bool required) override;
|
||||
void clearAdjointGradient() override;
|
||||
|
||||
protected:
|
||||
std::list<std::shared_ptr<BORGForwardModel>> model_list;
|
||||
|
||||
void updateCosmo() override;
|
||||
};
|
||||
}; // namespace LibLSS
|
||||
|
||||
#endif
|
||||
|
||||
// ARES TAG: authors_num = 2
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
||||
// ARES TAG: year(0) = 2018-2020
|
||||
// ARES TAG: name(1) = Doogesh Kodi Ramanah
|
||||
// ARES TAG: email(1) = ramanah@iap.fr
|
||||
// ARES TAG: year(1) = 2018-2019
|
357
extra/hades/libLSS/physics/chain_forward_model.cpp
Normal file
357
extra/hades/libLSS/physics/chain_forward_model.cpp
Normal file
|
@ -0,0 +1,357 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/chain_forward_model.cpp
|
||||
Copyright (C) 2018-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2018-2019 Doogesh Kodi Ramanah <ramanah@iap.fr>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#include <boost/multi_array.hpp>
|
||||
#include "libLSS/physics/cosmo.hpp"
|
||||
#include "libLSS/mpi/generic_mpi.hpp"
|
||||
#include "libLSS/physics/forward_model.hpp"
|
||||
#include <list>
|
||||
#include <boost/variant.hpp>
|
||||
#include "libLSS/tools/errors.hpp"
|
||||
#include "libLSS/physics/chain_forward_model.hpp"
|
||||
#include <boost/variant.hpp>
|
||||
#include "libLSS/tools/overload.hpp"
|
||||
|
||||
using namespace LibLSS;
|
||||
|
||||
ChainForwardModel::ChainForwardModel(
|
||||
MPI_Communication *comm, const BoxModel &box)
|
||||
: BORGForwardModel(comm, box), accumulate(false) {}
|
||||
|
||||
ChainForwardModel::ChainForwardModel(
|
||||
MPI_Communication *comm, const BoxModel &box, const BoxModel &outbox)
|
||||
: BORGForwardModel(comm, box, outbox), accumulate(false) {}
|
||||
|
||||
ChainForwardModel::~ChainForwardModel() {}
|
||||
|
||||
void ChainForwardModel::accumulateAdjoint(bool do_accumulate) {
|
||||
accumulate = do_accumulate;
|
||||
}
|
||||
|
||||
bool ChainForwardModel::densityInvalidated() const {
|
||||
bool r = false;
|
||||
|
||||
for (auto const &model : model_list) {
|
||||
r = r || model->densityInvalidated();
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
void ChainForwardModel::forwardModelSimple(CArrayRef &delta_init) {}
|
||||
|
||||
void ChainForwardModel::forwardModel_v2(ModelInput<3> delta_init) {
|
||||
LIBLSS_AUTO_CONTEXT(LOG_DEBUG, ctx);
|
||||
ModelInput<3> input_array;
|
||||
ModelInput<3> next_input(std::move(delta_init));
|
||||
ModelOutput<3> next_output;
|
||||
S_U_ArrayReal tmp_real;
|
||||
S_U_ArrayFourier tmp_fourier;
|
||||
BoxModel box_in, box_out;
|
||||
|
||||
// Fill up with the chain
|
||||
auto iter = model_list.begin();
|
||||
while (iter != model_list.end()) {
|
||||
auto model = *iter;
|
||||
++iter;
|
||||
|
||||
bool final_pass = (iter == model_list.end());
|
||||
input_array = std::move(next_input);
|
||||
PreferredIO nextIO =
|
||||
final_pass ? model->getPreferredOutput() : (*iter)->getPreferredInput();
|
||||
if (nextIO == PREFERRED_NONE) {
|
||||
nextIO = model->getPreferredOutput();
|
||||
if (nextIO == PREFERRED_NONE) {
|
||||
nextIO = input_array.current;
|
||||
}
|
||||
}
|
||||
|
||||
box_in = model->get_box_model();
|
||||
box_out = model->get_box_model_output();
|
||||
// TODO: check box compatibilities
|
||||
|
||||
switch (nextIO) {
|
||||
case PREFERRED_REAL:
|
||||
tmp_real.reset();
|
||||
tmp_real = std::move(model->out_mgr->allocate_ptr_array());
|
||||
next_output = std::move(ModelOutput<3>(
|
||||
model->out_mgr, box_out, tmp_real->get_array(), tmp_real));
|
||||
next_input = std::move(ModelInput<3>(
|
||||
model->out_mgr, box_out, tmp_real->get_array(), tmp_real));
|
||||
if (final_pass) {
|
||||
final_real = tmp_real;
|
||||
final_output = std::move(ModelOutput<3>(
|
||||
model->out_mgr, box_out, final_real->get_array(), final_real));
|
||||
}
|
||||
next = tmp_real;
|
||||
break;
|
||||
case PREFERRED_FOURIER:
|
||||
tmp_fourier.reset();
|
||||
tmp_fourier = std::move(model->out_mgr->allocate_ptr_complex_array());
|
||||
next_output = std::move(ModelOutput<3>(
|
||||
model->out_mgr, box_out, tmp_fourier->get_array(), tmp_fourier));
|
||||
next_input = std::move(ModelInput<3>(
|
||||
model->out_mgr, box_out, tmp_fourier->get_array(), tmp_fourier));
|
||||
if (final_pass) {
|
||||
final_fourier = tmp_fourier;
|
||||
final_output = std::move(ModelOutput<3>(
|
||||
model->out_mgr, box_out, final_fourier->get_array(),
|
||||
final_fourier));
|
||||
}
|
||||
next = tmp_fourier;
|
||||
break;
|
||||
default:
|
||||
error_helper<ErrorNotImplemented>("Invalid IO type.");
|
||||
break;
|
||||
}
|
||||
model->forwardModel_v2(std::move(input_array));
|
||||
model->getDensityFinal(std::move(next_output));
|
||||
previous = next;
|
||||
}
|
||||
}
|
||||
|
||||
void ChainForwardModel::getDensityFinal(ModelOutput<3> output) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
output.setRequestedIO(final_output.active);
|
||||
output.copyFrom(final_output);
|
||||
// Try to save some memory by releasing early the temporary allocation.
|
||||
final_real.reset();
|
||||
final_fourier.reset();
|
||||
clear_chain();
|
||||
}
|
||||
|
||||
void ChainForwardModel::clear_chain() {
|
||||
auto cleaner = [](auto &u) { u.reset(); };
|
||||
boost::apply_visitor(cleaner, previous);
|
||||
boost::apply_visitor(cleaner, next);
|
||||
}
|
||||
|
||||
void ChainForwardModel::forwardModelRsdField(
|
||||
ArrayRef &deltaf, double *vobs_ext) {}
|
||||
|
||||
static void accumulator(ModelInputBase<3> &accum, ModelInputBase<3> &input) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
|
||||
typedef ModelIO<3>::CArrayRef CArrayRef;
|
||||
typedef ModelIO<3>::ArrayRef ArrayRef;
|
||||
|
||||
// This ensures that we capture incompatibilities.
|
||||
|
||||
accum.needDestroyInput();
|
||||
boost::apply_visitor(
|
||||
overload(
|
||||
[&input](CArrayRef *v) {
|
||||
// The preference is applied to the one being accumulated on the first ag.
|
||||
input.setRequestedIO(PREFERRED_FOURIER);
|
||||
fwrap(*v) = input.getFourierConst();
|
||||
},
|
||||
[&input](ArrayRef *v) {
|
||||
// The preference is applied to the one being accumulated on the first ag.
|
||||
input.setRequestedIO(PREFERRED_REAL);
|
||||
fwrap(*v) = input.getRealConst();
|
||||
},
|
||||
[&](auto const *v) {
|
||||
Console::instance().c_assert(false, "Impossible situation");
|
||||
}),
|
||||
accum.getHolder());
|
||||
}
|
||||
|
||||
// adjointModel auto release particles. Beware !
|
||||
|
||||
void ChainForwardModel::adjointModel_v2(ModelInputAdjoint<3> gradient_delta) {
|
||||
LIBLSS_AUTO_CONTEXT(LOG_DEBUG, ctx);
|
||||
|
||||
if (accumulate) {
|
||||
if (!accumulateAg) {
|
||||
accumulateAg = std::move(gradient_delta);
|
||||
} else {
|
||||
accumulator(accumulateAg, gradient_delta);
|
||||
}
|
||||
return;
|
||||
} else {
|
||||
accumulateAg = std::move(gradient_delta);
|
||||
}
|
||||
|
||||
trigger_ag();
|
||||
}
|
||||
|
||||
void ChainForwardModel::trigger_ag() {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
|
||||
ModelInputAdjoint<3> next_ag_input(std::move(accumulateAg)), ag_input;
|
||||
ModelOutputAdjoint<3> ag_output;
|
||||
std::shared_ptr<DFT_Manager::U_ArrayReal> tmp_real;
|
||||
std::shared_ptr<DFT_Manager::U_ArrayFourier> tmp_fourier;
|
||||
BoxModel box_in, box_out;
|
||||
PreferredIO nextIO;
|
||||
|
||||
// Fill up with the chain
|
||||
// Ordering here should be opposite to that in Forward model
|
||||
auto iter = model_list_adjoint.begin();
|
||||
while (iter != model_list_adjoint.end()) {
|
||||
auto model = *iter;
|
||||
|
||||
++iter;
|
||||
bool final_pass = (iter == model_list_adjoint.end());
|
||||
|
||||
box_in = model->get_box_model_output();
|
||||
box_out = model->get_box_model();
|
||||
|
||||
nextIO =
|
||||
final_pass ? model->getPreferredInput() : (*iter)->getPreferredOutput();
|
||||
if (nextIO == PREFERRED_NONE) {
|
||||
nextIO = model->getPreferredInput();
|
||||
if (nextIO == PREFERRED_NONE) {
|
||||
nextIO = next_ag_input.current;
|
||||
}
|
||||
}
|
||||
|
||||
ag_input = std::move(next_ag_input);
|
||||
|
||||
switch (nextIO) {
|
||||
case PREFERRED_REAL:
|
||||
ctx.print("Next wants real");
|
||||
tmp_real.reset();
|
||||
tmp_real = std::move(model->lo_mgr->allocate_ptr_array());
|
||||
ag_output = std::move(ModelOutputAdjoint<3>(
|
||||
model->lo_mgr, box_out, tmp_real->get_array(), tmp_real));
|
||||
next_ag_input = std::move(ModelInputAdjoint<3>(
|
||||
model->lo_mgr, box_out, tmp_real->get_array(), tmp_real));
|
||||
if (final_pass) {
|
||||
ag_final_real = tmp_real;
|
||||
ag_final_output = std::move(ModelOutputAdjoint<3>(
|
||||
model->lo_mgr, box_out, ag_final_real->get_array(), ag_final_real));
|
||||
}
|
||||
next = tmp_real;
|
||||
break;
|
||||
case PREFERRED_FOURIER:
|
||||
ctx.print("Next wants Fourier");
|
||||
tmp_fourier.reset();
|
||||
tmp_fourier = std::move(model->lo_mgr->allocate_ptr_complex_array());
|
||||
ag_output = std::move(ModelOutputAdjoint<3>(
|
||||
model->lo_mgr, box_out, tmp_fourier->get_array(), tmp_fourier));
|
||||
next_ag_input = std::move(ModelInputAdjoint<3>(
|
||||
model->lo_mgr, box_out, tmp_fourier->get_array(), tmp_fourier));
|
||||
if (final_pass) {
|
||||
ag_final_fourier = tmp_fourier;
|
||||
ag_final_output = std::move(ModelOutputAdjoint<3>(
|
||||
model->lo_mgr, box_out, ag_final_fourier->get_array(),
|
||||
ag_final_fourier));
|
||||
}
|
||||
next = tmp_fourier;
|
||||
break;
|
||||
default:
|
||||
error_helper<ErrorNotImplemented>("Invalid IO type");
|
||||
break;
|
||||
}
|
||||
|
||||
model->adjointModel_v2(std::move(ag_input));
|
||||
model->getAdjointModelOutput(std::move(ag_output));
|
||||
previous = next;
|
||||
}
|
||||
}
|
||||
|
||||
void ChainForwardModel::getAdjointModelOutput(ModelOutputAdjoint<3> ag_output) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
|
||||
if (accumulate && accumulateAg) {
|
||||
trigger_ag();
|
||||
}
|
||||
|
||||
ag_output.setRequestedIO(ag_final_output.active);
|
||||
ag_output.copyFrom(ag_final_output);
|
||||
|
||||
ag_final_fourier.reset();
|
||||
ag_final_real.reset();
|
||||
clear_chain();
|
||||
}
|
||||
|
||||
void ChainForwardModel::releaseParticles() {
|
||||
// Fill up with the chain
|
||||
for (auto model : model_list) {
|
||||
model->releaseParticles();
|
||||
}
|
||||
}
|
||||
|
||||
void ChainForwardModel::addModel(
|
||||
std::shared_ptr<BORGForwardModel> model, std::string const &n) {
|
||||
named_models[n] = model;
|
||||
addModel(model);
|
||||
}
|
||||
|
||||
std::shared_ptr<BORGForwardModel>
|
||||
ChainForwardModel::queryModel(std::string const &n) {
|
||||
return named_models[n];
|
||||
}
|
||||
|
||||
void ChainForwardModel::addModel(std::shared_ptr<BORGForwardModel> model) {
|
||||
if (model_list.size() == 0) {
|
||||
if (box_input != model->get_box_model()) {
|
||||
error_helper<ErrorParams>(
|
||||
"Invalid model configuration with IO of the chain.");
|
||||
}
|
||||
} else {
|
||||
if (box_output != model->get_box_model()) {
|
||||
error_helper<ErrorParams>(
|
||||
"Invalid model configuration with IO of the chain.");
|
||||
}
|
||||
}
|
||||
model_list.push_back(model);
|
||||
model_list_adjoint.insert(model_list_adjoint.begin(), model);
|
||||
|
||||
box_output = model->get_box_model_output();
|
||||
out_mgr = model->out_mgr;
|
||||
}
|
||||
|
||||
void ChainForwardModel::setAdjointRequired(bool required) {
|
||||
for (auto &model : model_list) {
|
||||
model->setAdjointRequired(required);
|
||||
}
|
||||
}
|
||||
|
||||
void ChainForwardModel::updateCosmo() {
|
||||
// Fill up with the chain
|
||||
for (auto &model : model_list) {
|
||||
model->setCosmoParams(cosmo_params);
|
||||
}
|
||||
}
|
||||
|
||||
void ChainForwardModel::setModelParams(ModelDictionnary const ¶ms) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
for (auto &model : model_list) {
|
||||
model->setModelParams(params);
|
||||
}
|
||||
}
|
||||
|
||||
void ChainForwardModel::clearAdjointGradient() {
|
||||
for (auto &model : model_list) {
|
||||
model->clearAdjointGradient();
|
||||
}
|
||||
}
|
||||
|
||||
boost::any ChainForwardModel::getModelParam(
|
||||
std::string const &name, std::string const ¶meter) {
|
||||
// Nothing to return here
|
||||
if (name == modelName)
|
||||
return boost::any();
|
||||
|
||||
for (auto &model : model_list) {
|
||||
auto ret = model->getModelParam(name, parameter);
|
||||
if (!ret.empty())
|
||||
return ret;
|
||||
}
|
||||
return boost::any();
|
||||
}
|
||||
|
||||
// ARES TAG: authors_num = 2
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
||||
// ARES TAG: year(0) = 2018-2020
|
||||
// ARES TAG: name(1) = Doogesh Kodi Ramanah
|
||||
// ARES TAG: email(1) = ramanah@iap.fr
|
||||
// ARES TAG: year(1) = 2018-2019
|
99
extra/hades/libLSS/physics/chain_forward_model.hpp
Normal file
99
extra/hades/libLSS/physics/chain_forward_model.hpp
Normal file
|
@ -0,0 +1,99 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/chain_forward_model.hpp
|
||||
Copyright (C) 2018-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2018-2019 Doogesh Kodi Ramanah <ramanah@iap.fr>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#ifndef __LIBLSS_CHAIN_FORWARD_MODEL_HPP
|
||||
# define __LIBLSS_CHAIN_FORWARD_MODEL_HPP
|
||||
|
||||
# include <boost/multi_array.hpp>
|
||||
# include "libLSS/physics/cosmo.hpp"
|
||||
# include "libLSS/mpi/generic_mpi.hpp"
|
||||
# include "libLSS/physics/forward_model.hpp"
|
||||
# include <list>
|
||||
# include <map>
|
||||
# include <boost/variant.hpp>
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
class ChainForwardModel : public BORGForwardModel {
|
||||
public:
|
||||
ModelOutput<3> final_output;
|
||||
ModelOutputAdjoint<3> ag_final_output;
|
||||
typedef std::shared_ptr<DFT_Manager::U_ArrayReal> S_U_ArrayReal;
|
||||
typedef std::shared_ptr<DFT_Manager::U_ArrayFourier> S_U_ArrayFourier;
|
||||
S_U_ArrayReal final_real, ag_final_real;
|
||||
S_U_ArrayFourier final_fourier, ag_final_fourier;
|
||||
boost::variant<S_U_ArrayReal, S_U_ArrayFourier> previous, next;
|
||||
bool accumulate;
|
||||
ModelInputAdjoint<3> accumulateAg;
|
||||
|
||||
ChainForwardModel(MPI_Communication *comm, const BoxModel &box);
|
||||
|
||||
ChainForwardModel(
|
||||
MPI_Communication *comm, const BoxModel &box, const BoxModel &outbox);
|
||||
|
||||
virtual ~ChainForwardModel();
|
||||
|
||||
bool densityInvalidated() const override;
|
||||
|
||||
void forwardModelSimple(CArrayRef &delta_init) override;
|
||||
|
||||
void forwardModel_v2(ModelInput<3> delta_init) override;
|
||||
|
||||
void getDensityFinal(ModelOutput<3> output) override;
|
||||
|
||||
void clear_chain();
|
||||
|
||||
void forwardModelRsdField(ArrayRef &deltaf, double *vobs_ext) override;
|
||||
|
||||
// adjointModel auto release particles. Beware !
|
||||
|
||||
void adjointModel_v2(ModelInputAdjoint<3> gradient_delta) override;
|
||||
|
||||
void getAdjointModelOutput(ModelOutputAdjoint<3> ag_output) override;
|
||||
|
||||
void releaseParticles() override;
|
||||
|
||||
void addModel(std::shared_ptr<BORGForwardModel> model);
|
||||
|
||||
void
|
||||
addModel(std::shared_ptr<BORGForwardModel> model, std::string const &name);
|
||||
|
||||
std::shared_ptr<BORGForwardModel> queryModel(std::string const &name);
|
||||
|
||||
void setAdjointRequired(bool required) override;
|
||||
|
||||
void clearAdjointGradient() override;
|
||||
|
||||
void setModelParams(ModelDictionnary const ¶ms) override;
|
||||
|
||||
boost::any getModelParam(
|
||||
std::string const &model_name, std::string const ¶meter) override;
|
||||
|
||||
void accumulateAdjoint(bool do_accumulate) override;
|
||||
|
||||
protected:
|
||||
std::list<std::shared_ptr<BORGForwardModel>> model_list;
|
||||
std::list<std::shared_ptr<BORGForwardModel>> model_list_adjoint;
|
||||
std::map<std::string, std::shared_ptr<BORGForwardModel>> named_models;
|
||||
|
||||
void updateCosmo() override;
|
||||
|
||||
void trigger_ag();
|
||||
};
|
||||
}; // namespace LibLSS
|
||||
|
||||
#endif
|
||||
|
||||
// ARES TAG: authors_num = 2
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
||||
// ARES TAG: year(0) = 2018-2020
|
||||
// ARES TAG: name(1) = Doogesh Kodi Ramanah
|
||||
// ARES TAG: email(1) = ramanah@iap.fr
|
||||
// ARES TAG: year(1) = 2018-2019
|
114
extra/hades/libLSS/physics/forward_model.cpp
Normal file
114
extra/hades/libLSS/physics/forward_model.cpp
Normal file
|
@ -0,0 +1,114 @@
|
|||
/// Docs for forward_model
|
||||
/*+
|
||||
ARES/HADES/BORG Package -- -- ./libLSS/physics/forward_model.hpp
|
||||
Copyright (C) 2014-2019 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2019 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2016-2019)
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms of either the CeCILL license or the GNU General Public
|
||||
license, as included with the software package.
|
||||
|
||||
The text of the license is located in Licence_CeCILL_V2.1-en.txt
|
||||
and GPL.txt in the root directory of the source package.
|
||||
|
||||
+*/
|
||||
|
||||
#include <boost/preprocessor/stringize.hpp>
|
||||
#include <boost/preprocessor/seq/for_each.hpp>
|
||||
#include <boost/multi_array.hpp>
|
||||
#include "libLSS/physics/cosmo.hpp"
|
||||
#include "libLSS/mpi/generic_mpi.hpp"
|
||||
#include "libLSS/tools/mpi_fftw_helper.hpp"
|
||||
#include "libLSS/tools/errors.hpp"
|
||||
#include "libLSS/tools/hdf5_type.hpp"
|
||||
#include "libLSS/physics/forward_model.hpp"
|
||||
|
||||
using namespace LibLSS;
|
||||
|
||||
void BORGForwardModel::setup(bool distinct_io) {
|
||||
Console::instance().print<LOG_VERBOSE>("Setup FWD");
|
||||
LibLSS::ConsoleContext<LOG_VERBOSE> ctx("BORGForwardModel::setup");
|
||||
volume = L0 * L1 * L2;
|
||||
volNorm = volume / (N0 * N1 * N2);
|
||||
|
||||
vobs.resize(boost::extents[3]);
|
||||
|
||||
lo_mgr = std::make_unique<DFT_Manager>(
|
||||
box_input.N0, box_input.N1, box_input.N2, comm);
|
||||
|
||||
startN0 = lo_mgr->startN0;
|
||||
localN0 = lo_mgr->localN0;
|
||||
N2_HC = lo_mgr->N2_HC;
|
||||
N2real = lo_mgr->N2real;
|
||||
|
||||
if (distinct_io) {
|
||||
out_mgr = std::make_unique<DFT_Manager>(
|
||||
box_output.N0, box_output.N1, box_output.N2, comm);
|
||||
} else
|
||||
out_mgr = lo_mgr;
|
||||
|
||||
analysis_plan = 0;
|
||||
synthesis_plan = 0;
|
||||
}
|
||||
|
||||
void BORGForwardModel::setupDefault() {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
ctx.print("Allocate real");
|
||||
tmp_real_field = lo_mgr->allocate_ptr_array();
|
||||
ctx.print("Allocate complex");
|
||||
tmp_complex_field = lo_mgr->allocate_ptr_complex_array();
|
||||
|
||||
ctx.print("Plan r2c");
|
||||
analysis_plan = lo_mgr->create_r2c_plan(
|
||||
tmp_real_field->get_array().data(),
|
||||
tmp_complex_field->get_array().data());
|
||||
ctx.print("Plan 2c2r");
|
||||
synthesis_plan = lo_mgr->create_c2r_plan(
|
||||
tmp_complex_field->get_array().data(),
|
||||
tmp_real_field->get_array().data());
|
||||
}
|
||||
|
||||
void BORGForwardModel::setCosmoParams(
|
||||
const CosmologicalParameters &p_cosmo_params) {
|
||||
this->cosmo_params = p_cosmo_params;
|
||||
this->params["cosmology"] = p_cosmo_params;
|
||||
updateCosmo();
|
||||
}
|
||||
|
||||
#define COSMO_ATTRIBUTE(r, q, element) \
|
||||
{ \
|
||||
BOOST_PP_STRINGIZE(element), \
|
||||
[](CosmologicalParameters &p, double v) { p.element = v; } \
|
||||
},
|
||||
|
||||
// clang-format off
|
||||
static std::map<
|
||||
std::string, std::function<void(CosmologicalParameters &, double)>>
|
||||
g_dispatcher{
|
||||
BOOST_PP_SEQ_FOR_EACH(COSMO_ATTRIBUTE, _,
|
||||
(sigma8)(omega_r)
|
||||
(omega_m)(omega_k)(omega_b)(omega_q)(w)(n_s)(fnl)(wprime)(h)
|
||||
)
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
void BORGForwardModel::setModelParams(ModelDictionnary const ¶ms) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
|
||||
bool runUpdateCosmo = false;
|
||||
for (auto &x : params) {
|
||||
// Special cases, cosmology can also be updated that way.
|
||||
if (x.first.rfind("cosmology.") == 0) {
|
||||
ctx.format("Updating cosmo params %s", x.first.substr(10));
|
||||
g_dispatcher[x.first.substr(10)](
|
||||
this->cosmo_params, boost::any_cast<double>(x.second));
|
||||
runUpdateCosmo = true;
|
||||
} else
|
||||
this->params[x.first] = x.second;
|
||||
}
|
||||
if (runUpdateCosmo)
|
||||
setCosmoParams(this->cosmo_params);
|
||||
}
|
399
extra/hades/libLSS/physics/forward_model.hpp
Normal file
399
extra/hades/libLSS/physics/forward_model.hpp
Normal file
|
@ -0,0 +1,399 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/forward_model.hpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#ifndef __LIBLSS_BORG_FORWARD_MODEL_HPP
|
||||
#define __LIBLSS_BORG_FORWARD_MODEL_HPP
|
||||
|
||||
#include <map>
|
||||
#include <boost/any.hpp>
|
||||
#include <boost/multi_array.hpp>
|
||||
#include "libLSS/physics/cosmo.hpp"
|
||||
#include "libLSS/mpi/generic_mpi.hpp"
|
||||
#include "libLSS/tools/mpi_fftw_helper.hpp"
|
||||
#include "libLSS/tools/errors.hpp"
|
||||
#include "libLSS/tools/memusage.hpp"
|
||||
#include "libLSS/tools/hdf5_type.hpp"
|
||||
#include "libLSS/physics/model_io.hpp"
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
typedef std::map<std::string, boost::any> ModelDictionnary;
|
||||
|
||||
struct BORGForwardModelTypes {
|
||||
typedef FFTW_Manager_3d<double> DFT_Manager;
|
||||
typedef boost::multi_array<double, 2> PhaseArray;
|
||||
typedef boost::multi_array_ref<double, 2> PhaseArrayRef;
|
||||
typedef ModelIO<3>::ArrayRef ArrayRef;
|
||||
typedef ModelIO<3>::CArrayRef CArrayRef;
|
||||
};
|
||||
|
||||
class outOfBoundParam : virtual public ErrorBase {
|
||||
public:
|
||||
explicit outOfBoundParam(std::string const &p) : ErrorBase(p) {}
|
||||
};
|
||||
|
||||
/**
|
||||
* This defines the interface for a forward physical model in BORG.
|
||||
*/
|
||||
class BORGForwardModel : public BORGForwardModelTypes {
|
||||
public:
|
||||
std::shared_ptr<DFT_Manager> lo_mgr;
|
||||
std::shared_ptr<DFT_Manager> out_mgr;
|
||||
|
||||
MPI_Communication *communicator() { return comm; }
|
||||
|
||||
BORGForwardModel(BORGForwardModel &&other) = default;
|
||||
|
||||
explicit BORGForwardModel(MPI_Communication *comm, const BoxModel &box)
|
||||
: comm(comm), L0(box.L0), L1(box.L1), L2(box.L2), N0(box.N0),
|
||||
N1(box.N1), N2(box.N2), xmin0(box.xmin0), xmin1(box.xmin1),
|
||||
xmin2(box.xmin2), forwardModelHold(false), box_input(box),
|
||||
box_output(box) {
|
||||
setup(false);
|
||||
}
|
||||
|
||||
explicit BORGForwardModel(
|
||||
MPI_Communication *comm, const BoxModel &box, const BoxModel &box_out)
|
||||
: comm(comm), L0(box.L0), L1(box.L1), L2(box.L2), N0(box.N0),
|
||||
N1(box.N1), N2(box.N2), xmin0(box.xmin0), xmin1(box.xmin1),
|
||||
xmin2(box.xmin2), forwardModelHold(false), box_input(box),
|
||||
box_output(box_out) {
|
||||
setup(true);
|
||||
}
|
||||
|
||||
virtual ~BORGForwardModel() {
|
||||
if (analysis_plan) {
|
||||
lo_mgr->destroy_plan(analysis_plan);
|
||||
lo_mgr->destroy_plan(synthesis_plan);
|
||||
}
|
||||
}
|
||||
|
||||
void setName(std::string const &name_) { modelName = name_; }
|
||||
|
||||
void holdParticles() { forwardModelHold = true; }
|
||||
|
||||
// Default is the standard historical behaviour
|
||||
virtual PreferredIO getPreferredInput() const { return PREFERRED_FOURIER; }
|
||||
virtual PreferredIO getPreferredOutput() const { return PREFERRED_REAL; }
|
||||
|
||||
// This executes the forward model but rely on auxiliary functions to pass down the result. So it holds
|
||||
// internal information and need a release call.
|
||||
virtual void forwardModelSimple(CArrayRef &delta_init) {
|
||||
forwardModel_v2(ModelInput<3>(lo_mgr, box_input, delta_init, true));
|
||||
}
|
||||
|
||||
virtual void forwardModel(
|
||||
CArrayRef &delta_init, ArrayRef &delta_output, bool adjointNext) {
|
||||
// Default implementation uses the v2 interface
|
||||
// Warning. adjointNext do not exist anymore in the v2, it is superseded by setAdjointRequired.
|
||||
fwrap(delta_init) = fwrap(delta_init) * get_box_model().volume();
|
||||
forwardModel_v2(ModelInput<3>(lo_mgr, box_input, delta_init));
|
||||
getDensityFinal(ModelOutput<3>(lo_mgr, box_output, delta_output));
|
||||
}
|
||||
|
||||
virtual void forwardModel(
|
||||
CArrayRef &delta_init, ArrayRef &delta_output, ArrayRef &vx,
|
||||
ArrayRef &vy, ArrayRef &vz, bool adjointNext) {
|
||||
error_helper<ErrorNotImplemented>(
|
||||
"forwardModel is not supported velocity output");
|
||||
}
|
||||
virtual void forwardModelRsdField(ArrayRef &deltaf, double *vobs_ext) {
|
||||
error_helper<ErrorNotImplemented>(
|
||||
"forwardModelRsdField not supported here.");
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Run a forwardModel with APIv2. Only the input must be provided.
|
||||
*
|
||||
* @param delta_init ModelInput object holding the input array.
|
||||
*/
|
||||
virtual void forwardModel_v2(ModelInput<3> delta_init) {
|
||||
error_helper<ErrorNotImplemented>("forwardModel_v2 not supported here.");
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output of the forward model.
|
||||
*
|
||||
* @param delta_output ModelOutput object holding the output array.
|
||||
*/
|
||||
virtual void getDensityFinal(ModelOutput<3> delta_output) {
|
||||
error_helper<ErrorNotImplemented>(
|
||||
"forwardModel_v2 (getDensityFinal) not supported here.");
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Runs the adjoint model on the provided input vector.
|
||||
*
|
||||
* @param ag_delta_input Input vector adjoint model.
|
||||
*/
|
||||
virtual void adjointModel_v2(ModelInputAdjoint<3> ag_delta_input) {
|
||||
error_helper<ErrorNotImplemented>("adjointModel_v2 not supported here.");
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Changes the behavior of adjointModel_v2 to accumulate all the vectors prior to computing the result.
|
||||
*
|
||||
* This changes the behavior of the adjoint forward model to accept to
|
||||
* accumulate new adjoint vectors before computing the final result through
|
||||
* `getAdjointModelOutput`.
|
||||
*
|
||||
* @param do_accumulate switch on the accumulate behaviour
|
||||
*/
|
||||
virtual void accumulateAdjoint(bool do_accumulate) {
|
||||
error_helper<ErrorNotImplemented>("accumulateAdjoint not supported here.");
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Retrieve the output vector after the adjoint model has been run.
|
||||
*
|
||||
* @param ag_delta_output
|
||||
*/
|
||||
virtual void getAdjointModelOutput(ModelOutputAdjoint<3> ag_delta_output) {
|
||||
error_helper<ErrorNotImplemented>(
|
||||
"adjointModel_v2 (getAdjointModelOutput) not supported here.");
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Apply the jacobian
|
||||
*
|
||||
* @param gradient_delta
|
||||
*/
|
||||
[[deprecated("Replaced by adjointModel_v2, with better API")]]
|
||||
virtual void adjointModel(ArrayRef &gradient_delta) {
|
||||
adjointModel_v2(ModelInputAdjoint<3>(lo_mgr, box_input, gradient_delta));
|
||||
getAdjointModelOutput(
|
||||
ModelOutputAdjoint<3>(lo_mgr, box_output, gradient_delta));
|
||||
}
|
||||
|
||||
virtual void releaseParticles() {}
|
||||
|
||||
void setCosmoParams(const CosmologicalParameters &p_cosmo_params);
|
||||
|
||||
// FIXME: Add a setModelParams API point with model name
|
||||
//
|
||||
|
||||
/**
|
||||
* @brief set model parameters call for all models subtended by this one.
|
||||
*
|
||||
* @params params a dictionnary of parameters
|
||||
*/
|
||||
virtual void setModelParams(ModelDictionnary const ¶ms);
|
||||
|
||||
|
||||
// FIXME: Add a getModelParam without name.
|
||||
|
||||
/**
|
||||
* @brief Query a single parameter from a specific sub-model.
|
||||
*/
|
||||
virtual boost::any
|
||||
getModelParam(std::string const &name, std::string const ¶meterName) {
|
||||
return boost::any();
|
||||
}
|
||||
|
||||
void setObserver(const ArrayType1d::ArrayType &v) { this->vobs = v; }
|
||||
|
||||
/**
|
||||
* @brief Indicate whether an adjoint model is required.
|
||||
* The caller indicates it wants to be able to run adjointGradient.
|
||||
* This may involve allocating a lot more memory during forward
|
||||
* evaluation.
|
||||
*
|
||||
* @param on
|
||||
*/
|
||||
virtual void setAdjointRequired(bool on) {}
|
||||
|
||||
/**
|
||||
* @brief Clear the internal buffers for adjoint gradient.
|
||||
*
|
||||
*/
|
||||
virtual void clearAdjointGradient() {}
|
||||
|
||||
void save(CosmoTool::H5_CommonFileGroup &fg) {}
|
||||
|
||||
void restore(CosmoTool::H5_CommonFileGroup &fg) {}
|
||||
|
||||
BoxModel get_box_model() const {
|
||||
BoxModel box;
|
||||
|
||||
box.L0 = L0;
|
||||
box.L1 = L1;
|
||||
box.L2 = L2;
|
||||
box.N0 = N0;
|
||||
box.N1 = N1;
|
||||
box.N2 = N2;
|
||||
box.xmin0 = xmin0;
|
||||
box.xmin1 = xmin1;
|
||||
box.xmin2 = xmin2;
|
||||
return box;
|
||||
}
|
||||
|
||||
BoxModel get_box_model_output() const { return box_output; }
|
||||
|
||||
virtual bool densityInvalidated() const { return true; }
|
||||
|
||||
private:
|
||||
BORGForwardModel(const BORGForwardModel &) {}
|
||||
BORGForwardModel &operator=(const BORGForwardModel &) { return *this; }
|
||||
|
||||
void setup(bool distinct_io);
|
||||
|
||||
protected:
|
||||
void setupDefault();
|
||||
|
||||
MPI_Communication *comm;
|
||||
double L0, L1, L2;
|
||||
double volume, volNorm;
|
||||
long N0, N1, N2, startN0, localN0, N2_HC, N2real;
|
||||
double xmin0, xmin1, xmin2;
|
||||
DFT_Manager::plan_type synthesis_plan, analysis_plan;
|
||||
bool forwardModelHold;
|
||||
BoxModel box_input, box_output;
|
||||
|
||||
std::unique_ptr<DFT_Manager::U_ArrayFourier> tmp_complex_field;
|
||||
std::unique_ptr<DFT_Manager::U_ArrayReal> tmp_real_field;
|
||||
|
||||
typedef DFT_Manager::U_ArrayReal U_Array;
|
||||
typedef DFT_Manager::U_ArrayFourier U_CArray;
|
||||
|
||||
typedef std::unique_ptr<U_Array> U_Array_p;
|
||||
typedef std::unique_ptr<U_CArray> U_CArray_p;
|
||||
|
||||
typedef U_Array::array_type Array;
|
||||
typedef U_CArray::array_type CArray;
|
||||
|
||||
CosmologicalParameters cosmo_params;
|
||||
ModelDictionnary params;
|
||||
ArrayType1d::ArrayType vobs;
|
||||
std::string modelName;
|
||||
|
||||
virtual void updateCosmo() {}
|
||||
|
||||
void ensureInputEqualOutput() {
|
||||
if (box_input == box_output)
|
||||
return;
|
||||
error_helper<ErrorBadState>(
|
||||
"Input box must be the same as the output box.");
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* This define a gravity model which uses particles to trace matter flows.
|
||||
*/
|
||||
class ParticleBasedForwardModel : public BORGForwardModel {
|
||||
public:
|
||||
using BORGForwardModel::BORGForwardModel;
|
||||
|
||||
typedef boost::multi_array<double, 2>::const_array_view<2>::type
|
||||
PhaseSubArray;
|
||||
typedef boost::multi_array<double, 2>::array_view<2>::type PhaseSubArrayRW;
|
||||
typedef boost::multi_array<size_t, 1>::const_array_view<1>::type IdSubArray;
|
||||
typedef std::function<void(
|
||||
double, size_t, IdSubArray, PhaseSubArray, PhaseSubArray)>
|
||||
StepNotifier;
|
||||
|
||||
StepNotifier currentNotifier;
|
||||
|
||||
virtual IdSubArray getLagrangianIdentifiers() const {
|
||||
boost::multi_array_types::index_gen i_gen;
|
||||
typedef boost::multi_array_types::index_range range;
|
||||
|
||||
error_helper<ErrorNotImplemented>(
|
||||
"getLagrangianIdentifiers is not implemented for this model.");
|
||||
return boost::multi_array_ref<size_t, 1>(
|
||||
0, boost::extents[0])[i_gen[range()]];
|
||||
}
|
||||
|
||||
virtual size_t getNumberOfParticles() const = 0;
|
||||
|
||||
/**
|
||||
* @brief Get the Particle Positions object
|
||||
*
|
||||
* @return PhaseSubArray
|
||||
*/
|
||||
virtual PhaseSubArray getParticlePositions() = 0;
|
||||
|
||||
/**
|
||||
* @brief Get the Particle Velocities object
|
||||
*
|
||||
* @return PhaseSubArray
|
||||
*/
|
||||
virtual PhaseSubArray getParticleVelocities() = 0;
|
||||
|
||||
/**
|
||||
* @brief Get the Velocity Multiplier
|
||||
*
|
||||
* @return double
|
||||
*/
|
||||
virtual double getVelocityMultiplier() { return 1.0; }
|
||||
|
||||
/**
|
||||
* @brief Get the Supersampling Rate
|
||||
*
|
||||
* @return unsigned int
|
||||
*/
|
||||
virtual unsigned int getSupersamplingRate() const = 0;
|
||||
|
||||
/**
|
||||
* @brief
|
||||
*
|
||||
* This computes the adjoint gradient on the particle positions, velocities
|
||||
* Not all models may support this. The default implementation triggers an error.
|
||||
*
|
||||
* @param grad_pos
|
||||
* @param grad_vel
|
||||
*/
|
||||
virtual void
|
||||
adjointModelParticles(PhaseArrayRef &grad_pos, PhaseArrayRef &grad_vel) {
|
||||
error_helper<ErrorNotImplemented>(
|
||||
"adjointModelParticles is not implemented in this model.");
|
||||
}
|
||||
|
||||
virtual void setStepNotifier(StepNotifier notifier) {
|
||||
currentNotifier = notifier;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* This is a type alias to specify how to store the BORG gravity model
|
||||
* in the MarkovState dictionnary.
|
||||
*/
|
||||
typedef SharedObjectStateElement<BORGForwardModel> BorgModelElement;
|
||||
|
||||
template <typename Model>
|
||||
void borgSaveParticles(
|
||||
CosmoTool::H5_CommonFileGroup &g, Model &m, bool savepos, bool savevel,
|
||||
int step = -1) {
|
||||
typedef boost::multi_array<double, 2> VectorArray;
|
||||
typedef UninitializedArray<VectorArray> U_VectorArray;
|
||||
using range = boost::multi_array_types::index_range;
|
||||
size_t numPart = m.getNumberOfParticles();
|
||||
U_VectorArray pos_vel(boost::extents[numPart][3]);
|
||||
|
||||
if (savepos) {
|
||||
m.copyParticlePositions(pos_vel.get_array(), step);
|
||||
CosmoTool::hdf5_write_array(g, "u_pos", pos_vel.get_array());
|
||||
}
|
||||
|
||||
if (savevel) {
|
||||
m.copyParticleVelocities(pos_vel.get_array(), step);
|
||||
CosmoTool::hdf5_write_array(g, "u_vel", pos_vel.get_array());
|
||||
}
|
||||
|
||||
auto ids = m.getLagrangianIdentifiers();
|
||||
typename decltype(ids)::index_gen i_gen;
|
||||
|
||||
boost::multi_array<size_t, 1> final_ids(boost::extents[numPart]);
|
||||
LibLSS::copy_array(final_ids, ids[i_gen[range(0, numPart)]]);
|
||||
|
||||
CosmoTool::hdf5_write_array(g, "u_lagrangian_id", final_ids);
|
||||
}
|
||||
|
||||
}; // namespace LibLSS
|
||||
|
||||
#endif
|
77
extra/hades/libLSS/physics/forwards/fnl.cpp
Normal file
77
extra/hades/libLSS/physics/forwards/fnl.cpp
Normal file
|
@ -0,0 +1,77 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/forwards/fnl.cpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#include <set>
|
||||
#include <map>
|
||||
#include "libLSS/physics/forwards/fnl.hpp"
|
||||
#include "libLSS/physics/forwards/registry.hpp"
|
||||
#include "libLSS/physics/model_io.hpp"
|
||||
#include "libLSS/samplers/core/powerspec_tools.hpp"
|
||||
#include "libLSS/tools/itertools.hpp"
|
||||
#include <CosmoTool/cosmopower.hpp>
|
||||
|
||||
using namespace LibLSS;
|
||||
ForwardFNL::ForwardFNL(MPI_Communication *comm, const BoxModel &box)
|
||||
: BORGForwardModel(comm, box) {
|
||||
ensureInputEqualOutput();
|
||||
}
|
||||
|
||||
void ForwardFNL::forwardModel_v2(ModelInput<3> delta_init) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
|
||||
// Setup the IO basis that is required for this forward.
|
||||
delta_init.setRequestedIO(PREFERRED_REAL);
|
||||
|
||||
hold_input = std::move(delta_init);
|
||||
}
|
||||
|
||||
void ForwardFNL::getDensityFinal(ModelOutput<3> delta_output) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
delta_output.setRequestedIO(PREFERRED_REAL);
|
||||
auto w_delta_init = fwrap(hold_input.getReal());
|
||||
auto w_delta_output = fwrap(delta_output.getRealOutput());
|
||||
|
||||
w_delta_output =
|
||||
(w_delta_init * w_delta_init * cosmo_params.fnl + w_delta_init);
|
||||
}
|
||||
|
||||
void ForwardFNL::adjointModel_v2(ModelInputAdjoint<3> in_gradient_delta) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
in_gradient_delta.setRequestedIO(PREFERRED_REAL);
|
||||
hold_ag_input = std::move(in_gradient_delta);
|
||||
}
|
||||
|
||||
void ForwardFNL::getAdjointModelOutput(
|
||||
ModelOutputAdjoint<3> out_gradient_delta) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
out_gradient_delta.setRequestedIO(PREFERRED_REAL);
|
||||
auto w_in_gradient = fwrap(hold_ag_input.getReal());
|
||||
auto w_out_gradient = fwrap(out_gradient_delta.getRealOutput());
|
||||
auto w_delta_init = fwrap(hold_input.getReal());
|
||||
|
||||
w_out_gradient =
|
||||
2 * w_delta_init * cosmo_params.fnl * w_in_gradient + w_in_gradient;
|
||||
}
|
||||
|
||||
void ForwardFNL::clearAdjointGradient() {
|
||||
hold_ag_input.clear();
|
||||
hold_input.clear();
|
||||
}
|
||||
|
||||
void ForwardFNL::forwardModelRsdField(ArrayRef &, double *) {}
|
||||
|
||||
void ForwardFNL::releaseParticles() {}
|
||||
|
||||
static std::shared_ptr<BORGForwardModel> build_primordial_FNL(
|
||||
MPI_Communication *comm, BoxModel const &box, PropertyProxy const ¶ms) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
return std::make_shared<ForwardFNL>(comm, box);
|
||||
}
|
||||
|
||||
LIBLSS_REGISTER_FORWARD_IMPL(PRIMORDIAL_FNL, build_primordial_FNL);
|
65
extra/hades/libLSS/physics/forwards/fnl.hpp
Normal file
65
extra/hades/libLSS/physics/forwards/fnl.hpp
Normal file
|
@ -0,0 +1,65 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/forwards/fnl.hpp
|
||||
Copyright (C) 2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#ifndef __LIBLSS_HADES_FORWARD_FNL_HPP
|
||||
# define __LIBLSS_HADES_FORWARD_FNL_HPP
|
||||
# pragma once
|
||||
|
||||
# include "libLSS/physics/forward_model.hpp"
|
||||
# include "libLSS/tools/fusewrapper.hpp"
|
||||
# include "libLSS/tools/errors.hpp"
|
||||
# include "libLSS/physics/forwards/registry.hpp"
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
/**
|
||||
* This class handles the addition of an fNL into primordial potential.
|
||||
*/
|
||||
class ForwardFNL : public BORGForwardModel {
|
||||
public:
|
||||
typedef IArrayType::ArrayType IArrayRef;
|
||||
typedef ArrayType1d::ArrayType Array1dRef;
|
||||
using BORGForwardModel::ArrayRef;
|
||||
|
||||
protected:
|
||||
ModelInput<3> hold_input;
|
||||
ModelInputAdjoint<3> hold_ag_input;
|
||||
bool lazyInit;
|
||||
|
||||
public:
|
||||
/**
|
||||
* Constructor.
|
||||
*/
|
||||
explicit ForwardFNL(MPI_Communication *comm, const BoxModel &box);
|
||||
|
||||
virtual PreferredIO getPreferredInput() const { return PREFERRED_REAL; }
|
||||
virtual PreferredIO getPreferredOutput() const { return PREFERRED_REAL; }
|
||||
|
||||
virtual void forwardModel_v2(ModelInput<3> delta_init);
|
||||
virtual void getDensityFinal(ModelOutput<3> delta_output);
|
||||
virtual void adjointModel_v2(ModelInputAdjoint<3> in_gradient_delta);
|
||||
virtual void
|
||||
getAdjointModelOutput(ModelOutputAdjoint<3> out_gradient_delta);
|
||||
virtual void forwardModelRsdField(ArrayRef &, double *);
|
||||
virtual void clearAdjointGradient();
|
||||
virtual void releaseParticles();
|
||||
}; // namespace LibLSS
|
||||
|
||||
} // namespace LibLSS
|
||||
|
||||
LIBLSS_REGISTER_FORWARD_DECL(PRIMORDIAL_FNL);
|
||||
|
||||
#endif
|
||||
// ARES TAG: num_authors = 2
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: year(0) = 2020
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
||||
// ARES TAG: name(1) = Jens Jasche
|
||||
// ARES TAG: year(1) = 2020
|
||||
// ARES TAG: email(1) = jens.jasche@fysik.su.se
|
205
extra/hades/libLSS/physics/forwards/primordial.cpp
Normal file
205
extra/hades/libLSS/physics/forwards/primordial.cpp
Normal file
|
@ -0,0 +1,205 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/forwards/primordial.cpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#include <set>
|
||||
#include <map>
|
||||
#include "libLSS/physics/forwards/primordial.hpp"
|
||||
#include "libLSS/physics/forwards/registry.hpp"
|
||||
#include "libLSS/physics/model_io.hpp"
|
||||
#include "libLSS/samplers/core/powerspec_tools.hpp"
|
||||
#include "libLSS/tools/itertools.hpp"
|
||||
#include <CosmoTool/cosmopower.hpp>
|
||||
|
||||
using namespace LibLSS;
|
||||
ForwardPrimordial::ForwardPrimordial(
|
||||
MPI_Communication *comm, const BoxModel &box, double af_)
|
||||
: BORGForwardModel(comm, box), af(af_), D_init(1.0),
|
||||
powerSpectrumKeys(lo_mgr->extents_complex()),
|
||||
powerSpectrum(boost::extents[1]), lazyInit(false), invalid(true), accumulateAg(false) {
|
||||
ensureInputEqualOutput();
|
||||
|
||||
// First look at the number of different keys
|
||||
size_t endN0 = startN0 + localN0;
|
||||
std::set<double> keyset;
|
||||
std::map<double, int> keymap;
|
||||
for (size_t i = startN0; i < endN0; i++) {
|
||||
for (size_t j = 0; j < N1; j++) {
|
||||
for (size_t k = 0; k < N2_HC; k++) {
|
||||
double kx = kmode(i, N0, L0);
|
||||
double ky = kmode(j, N1, L1);
|
||||
double kz = kmode(k, N2, L2);
|
||||
|
||||
double key2 = (kx * kx + ky * ky + kz * kz);
|
||||
keyset.insert(key2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Build inverse map
|
||||
for (auto iter : itertools::enumerate(keyset)) {
|
||||
keymap[iter.get<1>()] = iter.get<0>();
|
||||
}
|
||||
|
||||
keyTranslate.resize(boost::extents[keymap.size()]);
|
||||
|
||||
// Assign each k mode its unique identifier
|
||||
for (size_t i = startN0; i < endN0; i++) {
|
||||
for (size_t j = 0; j < N1; j++) {
|
||||
for (size_t k = 0; k < N2_HC; k++) {
|
||||
double kx = kmode(i, N0, L0);
|
||||
double ky = kmode(j, N1, L1);
|
||||
double kz = kmode(k, N2, L2);
|
||||
|
||||
double key2 = (kx * kx + ky * ky + kz * kz);
|
||||
int key = keymap[key2];
|
||||
powerSpectrumKeys[i][j][k] = key;
|
||||
keyTranslate[key] = std::sqrt(key2);
|
||||
}
|
||||
}
|
||||
}
|
||||
powerSpectrum.resize(boost::extents[keymap.size()]);
|
||||
}
|
||||
|
||||
auto ForwardPrimordial::getPowerSpectrumArray() {
|
||||
auto &local_keys = powerSpectrumKeys;
|
||||
|
||||
return fwrap(
|
||||
b_fused_idx<double, 3>([this, &local_keys](size_t i, size_t j, size_t k) {
|
||||
return powerSpectrum[local_keys[i][j][k]];
|
||||
}));
|
||||
}
|
||||
|
||||
void ForwardPrimordial::forwardModel_v2(ModelInput<3> delta_init) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
|
||||
// Setup the IO basis that is required for this forward.
|
||||
delta_init.setRequestedIO(PREFERRED_FOURIER);
|
||||
|
||||
hold_input = std::move(delta_init);
|
||||
}
|
||||
|
||||
void ForwardPrimordial::getDensityFinal(ModelOutput<3> delta_output) {
|
||||
delta_output.setRequestedIO(PREFERRED_FOURIER);
|
||||
auto w_delta_init = fwrap(hold_input.getFourierConst());
|
||||
auto w_delta_output = fwrap(delta_output.getFourierOutput());
|
||||
|
||||
w_delta_output = (w_delta_init)*getPowerSpectrumArray();
|
||||
|
||||
invalid = false;
|
||||
}
|
||||
|
||||
void ForwardPrimordial::updateCosmo() {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
|
||||
if (cosmo_params == old_cosmo_params)
|
||||
return;
|
||||
|
||||
invalid = true;
|
||||
|
||||
old_cosmo_params = cosmo_params;
|
||||
|
||||
Cosmology cosmo(cosmo_params);
|
||||
CosmoTool::CosmoPower cpower;
|
||||
|
||||
D_init = cosmo.d_plus(af) /
|
||||
cosmo.d_plus(
|
||||
1.0); // Scale factor for initial conditions, sigma8 is at z=0
|
||||
|
||||
double h = cpower.h = cosmo_params.h;
|
||||
cpower.OMEGA_B = cosmo_params.omega_b;
|
||||
cpower.OMEGA_C = cosmo_params.omega_m - cosmo_params.omega_b;
|
||||
cpower.SIGMA8 = cosmo_params.sigma8;
|
||||
cpower.n = cosmo_params.n_s;
|
||||
cpower.updateCosmology();
|
||||
cpower.setFunction(CosmoTool::CosmoPower::HU_WIGGLES);
|
||||
cpower.normalize();
|
||||
cpower.setFunction(CosmoTool::CosmoPower::PRIMORDIAL_PS);
|
||||
|
||||
// TODO: For future we will generate the power spectrum here, inline
|
||||
size_t endN0 = startN0 + localN0;
|
||||
|
||||
#pragma omp parallel for collapse(3)
|
||||
for (size_t i = startN0; i < endN0; i++) {
|
||||
for (size_t j = 0; j < N1; j++) {
|
||||
for (size_t k = 0; k < N2_HC; k++) {
|
||||
int key = powerSpectrumKeys[i][j][k];
|
||||
double k_mode = keyTranslate[key];
|
||||
double Qk_delta =
|
||||
std::sqrt(cpower.power(k_mode * h) * h * h * h * volume);
|
||||
if (k_mode > 0) {
|
||||
double Qk_phi = -Qk_delta / (k_mode * k_mode);
|
||||
powerSpectrum[key] = Qk_phi * D_init;
|
||||
} else {
|
||||
powerSpectrum[key] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
void ForwardPrimordial::setPowerSpectrum(
|
||||
IArrayRef const &keys, Array1dRef const &P) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
powerSpectrum.resize(array::make_extent_from(P));
|
||||
powerSpectrumKeys.resize(array::make_extent_from(keys));
|
||||
fwrap(powerSpectrum) = std::sqrt(fwrap(P) * volume);
|
||||
fwrap(powerSpectrumKeys) = keys;
|
||||
}*/
|
||||
|
||||
void ForwardPrimordial::adjointModel_v2(
|
||||
ModelInputAdjoint<3> in_gradient_delta) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
// Build strict range views (we do not want to see the
|
||||
// the FFTW padding, ensure that the view object lives till the end of this function.
|
||||
|
||||
in_gradient_delta.setRequestedIO(PREFERRED_FOURIER);
|
||||
if (accumulateAg) {
|
||||
if (!hold_ag_input) {
|
||||
hold_ag_input = std::move(in_gradient_delta);
|
||||
// Mark that we will modify it so we need a local copy.
|
||||
hold_ag_input.needDestroyInput();
|
||||
} else {
|
||||
auto w = fwrap(hold_ag_input.getFourier());
|
||||
w = w + fwrap(in_gradient_delta.getFourierConst());
|
||||
}
|
||||
} else {
|
||||
hold_ag_input = std::move(in_gradient_delta);
|
||||
}
|
||||
}
|
||||
|
||||
void ForwardPrimordial::getAdjointModelOutput(
|
||||
ModelOutputAdjoint<3> out_gradient_delta) {
|
||||
out_gradient_delta.setRequestedIO(PREFERRED_FOURIER);
|
||||
auto w_in_gradient = fwrap(hold_ag_input.getFourierConst());
|
||||
auto w_out_gradient = fwrap(out_gradient_delta.getFourierOutput());
|
||||
|
||||
w_out_gradient = w_in_gradient * getPowerSpectrumArray();
|
||||
}
|
||||
|
||||
void ForwardPrimordial::clearAdjointGradient() {
|
||||
hold_ag_input.clear();
|
||||
hold_input.clear();
|
||||
}
|
||||
|
||||
bool ForwardPrimordial::densityInvalidated() const { return invalid; }
|
||||
|
||||
void ForwardPrimordial::forwardModelRsdField(ArrayRef &, double *) {}
|
||||
|
||||
void ForwardPrimordial::releaseParticles() {}
|
||||
|
||||
static std::shared_ptr<BORGForwardModel> build_primordial(
|
||||
MPI_Communication *comm, BoxModel const &box, PropertyProxy const ¶ms) {
|
||||
// double ai = params.get<double>("a_initial");
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
double af = params.get<double>("a_final"); // A_initial has no point
|
||||
return std::make_shared<ForwardPrimordial>(comm, box, af);
|
||||
}
|
||||
|
||||
LIBLSS_REGISTER_FORWARD_IMPL(PRIMORDIAL, build_primordial);
|
95
extra/hades/libLSS/physics/forwards/primordial.hpp
Normal file
95
extra/hades/libLSS/physics/forwards/primordial.hpp
Normal file
|
@ -0,0 +1,95 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/forwards/primordial.hpp
|
||||
Copyright (C) 2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#ifndef __LIBLSS_HADES_FORWARD_PRIMORDIAL_HPP
|
||||
# define __LIBLSS_HADES_FORWARD_PRIMORDIAL_HPP
|
||||
# pragma once
|
||||
|
||||
# include "libLSS/physics/forward_model.hpp"
|
||||
# include "libLSS/tools/fusewrapper.hpp"
|
||||
# include "libLSS/tools/errors.hpp"
|
||||
# include "libLSS/physics/forwards/registry.hpp"
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
/**
|
||||
* This class handles the colouring of a white noise field with a cosmological
|
||||
* power spectrum. It is best to provide a fourier and use the returned fourier
|
||||
* field with the v2 interface. Otherwise extra FFT will be called to
|
||||
* to match to v1 interface.
|
||||
*/
|
||||
class ForwardPrimordial : public BORGForwardModel {
|
||||
public:
|
||||
typedef IArrayType::ArrayType IArrayRef;
|
||||
typedef ArrayType1d::ArrayType Array1dRef;
|
||||
using BORGForwardModel::ArrayRef;
|
||||
|
||||
protected:
|
||||
CosmologicalParameters old_cosmo_params;
|
||||
double af, D_init;
|
||||
LibLSS::multi_array<int, 3> powerSpectrumKeys;
|
||||
LibLSS::multi_array<double, 1> powerSpectrum;
|
||||
LibLSS::multi_array<double, 1> keyTranslate;
|
||||
|
||||
ModelInput<3> hold_input;
|
||||
ModelInputAdjoint<3> hold_ag_input;
|
||||
bool lazyInit;
|
||||
bool invalid;
|
||||
bool accumulateAg;
|
||||
|
||||
public:
|
||||
/**
|
||||
* Constructor.
|
||||
*/
|
||||
explicit ForwardPrimordial(
|
||||
MPI_Communication *comm, const BoxModel &box, double af_);
|
||||
|
||||
PreferredIO getPreferredInput() const override { return PREFERRED_FOURIER; }
|
||||
PreferredIO getPreferredOutput() const override {
|
||||
return PREFERRED_FOURIER;
|
||||
}
|
||||
|
||||
auto getPowerSpectrumArray();
|
||||
|
||||
void accumulateAdjoint(bool accumulate) override {
|
||||
accumulateAg = accumulate;
|
||||
}
|
||||
|
||||
void forwardModel_v2(ModelInput<3> delta_init) override;
|
||||
|
||||
void getDensityFinal(ModelOutput<3> delta_output) override;
|
||||
|
||||
void updateCosmo() override;
|
||||
|
||||
void adjointModel_v2(ModelInputAdjoint<3> in_gradient_delta) override;
|
||||
|
||||
void clearAdjointGradient() override;
|
||||
|
||||
void
|
||||
getAdjointModelOutput(ModelOutputAdjoint<3> out_gradient_delta) override;
|
||||
|
||||
void forwardModelRsdField(ArrayRef &, double *) override;
|
||||
|
||||
void releaseParticles() override;
|
||||
|
||||
bool densityInvalidated() const override;
|
||||
}; // namespace LibLSS
|
||||
|
||||
} // namespace LibLSS
|
||||
|
||||
LIBLSS_REGISTER_FORWARD_DECL(PRIMORDIAL);
|
||||
|
||||
#endif
|
||||
// ARES TAG: num_authors = 2
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: year(0) = 2020
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
||||
// ARES TAG: name(1) = Jens Jasche
|
||||
// ARES TAG: year(1) = 2020
|
||||
// ARES TAG: email(1) = jens.jasche@fysik.su.se
|
35
extra/hades/libLSS/physics/forwards/registry.cpp
Normal file
35
extra/hades/libLSS/physics/forwards/registry.cpp
Normal file
|
@ -0,0 +1,35 @@
|
|||
#include "libLSS/tools/console.hpp"
|
||||
#include "libLSS/physics/forwards/registry.hpp"
|
||||
#include "libLSS/tools/static_init.hpp"
|
||||
|
||||
using namespace LibLSS;
|
||||
|
||||
namespace {
|
||||
RegisterStaticInit init_registry([]() {
|
||||
auto &cons = Console::instance();
|
||||
|
||||
cons.print<LOG_INFO_SINGLE>("Registered forward models:");
|
||||
for (auto k : ForwardRegistry::instance().list()) {
|
||||
cons.print<LOG_INFO_SINGLE>(" - " + k.first);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
ForwardRegistry::ForwardRegistry() {}
|
||||
|
||||
ForwardRegistry &ForwardRegistry::instance() {
|
||||
static ForwardRegistry this_instance;
|
||||
|
||||
return this_instance;
|
||||
}
|
||||
|
||||
ForwardModelFactory ForwardRegistry::get(std::string const &n) {
|
||||
auto iter = forwardRegistry.find(n);
|
||||
if (iter == forwardRegistry.end())
|
||||
error_helper<ErrorParams>("Invalid model name");
|
||||
return iter->second;
|
||||
}
|
||||
|
||||
ForwardModelFactory LibLSS::setup_forward_model(std::string const &n) {
|
||||
return ForwardRegistry::instance().get(n);
|
||||
}
|
104
extra/hades/libLSS/physics/forwards/registry.hpp
Normal file
104
extra/hades/libLSS/physics/forwards/registry.hpp
Normal file
|
@ -0,0 +1,104 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/forwards/registry.hpp
|
||||
Copyright (C) 2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#pragma once
|
||||
#ifndef __LIBLSS_PHYSICS_FORWARDS_REGISTRY_HPP
|
||||
# define __LIBLSS_PHYSICS_FORWARDS_REGISTRY_HPP
|
||||
|
||||
# include <map>
|
||||
# include <string>
|
||||
# include <memory>
|
||||
# include "libLSS/mpi/generic_mpi.hpp"
|
||||
# include "libLSS/physics/forward_model.hpp"
|
||||
# include "libLSS/tools/ptree_proxy.hpp"
|
||||
# include "libLSS/tools/errors.hpp"
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
typedef std::function<std::shared_ptr<BORGForwardModel>(
|
||||
MPI_Communication *comm, BoxModel const &box,
|
||||
PropertyProxy const ¶ms)>
|
||||
ForwardModelFactory;
|
||||
|
||||
/**
|
||||
* @brief Class that handles the automatic registration of forward model element.
|
||||
*
|
||||
* The models can be later automatically constructed provided a name, a box and a dictionary.
|
||||
*/
|
||||
class ForwardRegistry {
|
||||
private:
|
||||
std::map<std::string, ForwardModelFactory> forwardRegistry;
|
||||
ForwardRegistry();
|
||||
|
||||
public:
|
||||
static ForwardRegistry &instance();
|
||||
|
||||
/**
|
||||
* @brief Register a new element factory. This is internal, do not call it directly.
|
||||
*
|
||||
* @param n
|
||||
* @param factory
|
||||
*/
|
||||
void registerFactory(std::string const &n, ForwardModelFactory factory) {
|
||||
forwardRegistry[n] = factory;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief List all registered models.
|
||||
*
|
||||
* @return std::map<std::string, ForwardModelFactory> const
|
||||
*/
|
||||
auto const list() { return forwardRegistry; }
|
||||
|
||||
/**
|
||||
* @brief Lookup a factory
|
||||
*
|
||||
* @param n name
|
||||
* @return ForwardModelFactory a factory
|
||||
*/
|
||||
ForwardModelFactory get(std::string const &n);
|
||||
};
|
||||
|
||||
struct _RegisterForwardModel {
|
||||
_RegisterForwardModel(std::string n, ForwardModelFactory factory) {
|
||||
ForwardRegistry::instance().registerFactory(n, factory);
|
||||
}
|
||||
};
|
||||
|
||||
ForwardModelFactory setup_forward_model(std::string const &n);
|
||||
|
||||
/**
|
||||
* @brief Declare an automatic registrator. This is required to get the dynamic linker includes the symbols.
|
||||
*
|
||||
*/
|
||||
# define LIBLSS_REGISTER_FORWARD_DECL(NAME) \
|
||||
AUTO_REGISTRATOR_DECL(Forward_##NAME)
|
||||
|
||||
# define LIBLSS_REGISTER_NAME(NAME) Forward_##NAME
|
||||
# define MANGLED_LIBLSS_REGISTER_NAME(NAME) _register_##NAME
|
||||
# define LIBLSS_REGISTER_NAMESTR(NAME) # NAME
|
||||
|
||||
/**
|
||||
* @brief Implements an automatic registrator. A builder function must be provided as an argument.
|
||||
*
|
||||
*/
|
||||
# define LIBLSS_REGISTER_FORWARD_IMPL(NAME, BUILDER) \
|
||||
AUTO_REGISTRATOR_IMPL(LIBLSS_REGISTER_NAME(NAME)) \
|
||||
namespace { \
|
||||
_RegisterForwardModel MANGLED_LIBLSS_REGISTER_NAME(NAME)( \
|
||||
LIBLSS_REGISTER_NAMESTR(NAME), BUILDER); \
|
||||
}
|
||||
|
||||
} // namespace LibLSS
|
||||
|
||||
#endif
|
||||
// ARES TAG: num_authors = 1
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: year(0) = 2020
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
178
extra/hades/libLSS/physics/forwards/transfer_ehu.cpp
Normal file
178
extra/hades/libLSS/physics/forwards/transfer_ehu.cpp
Normal file
|
@ -0,0 +1,178 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/forwards/transfer_ehu.cpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#include <set>
|
||||
#include <map>
|
||||
#include "libLSS/physics/forwards/transfer_ehu.hpp"
|
||||
#include "libLSS/physics/forwards/registry.hpp"
|
||||
#include "libLSS/physics/model_io.hpp"
|
||||
#include "libLSS/samplers/core/powerspec_tools.hpp"
|
||||
#include "libLSS/tools/itertools.hpp"
|
||||
#include <CosmoTool/cosmopower.hpp>
|
||||
|
||||
using namespace LibLSS;
|
||||
ForwardEisensteinHu::ForwardEisensteinHu(
|
||||
MPI_Communication *comm, const BoxModel &box)
|
||||
: BORGForwardModel(comm, box), powerSpectrumKeys(lo_mgr->extents_complex()),
|
||||
powerSpectrum(boost::extents[1]), lazyInit(false), sign_k2(1),
|
||||
invalid(true) {
|
||||
ensureInputEqualOutput();
|
||||
|
||||
// First look at the number of different keys
|
||||
size_t endN0 = startN0 + localN0;
|
||||
std::set<double> keyset;
|
||||
std::map<double, int> keymap;
|
||||
for (size_t i = startN0; i < endN0; i++) {
|
||||
for (size_t j = 0; j < N1; j++) {
|
||||
for (size_t k = 0; k < N2_HC; k++) {
|
||||
double kx = kmode(i, N0, L0);
|
||||
double ky = kmode(j, N1, L1);
|
||||
double kz = kmode(k, N2, L2);
|
||||
|
||||
double key2 = (kx * kx + ky * ky + kz * kz);
|
||||
keyset.insert(key2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Build inverse map
|
||||
for (auto iter : itertools::enumerate(keyset)) {
|
||||
keymap[iter.get<1>()] = iter.get<0>();
|
||||
}
|
||||
|
||||
keyTranslate.resize(boost::extents[keymap.size()]);
|
||||
|
||||
// Assign each k mode its unique identifier
|
||||
for (size_t i = startN0; i < endN0; i++) {
|
||||
for (size_t j = 0; j < N1; j++) {
|
||||
for (size_t k = 0; k < N2_HC; k++) {
|
||||
double kx = kmode(i, N0, L0);
|
||||
double ky = kmode(j, N1, L1);
|
||||
double kz = kmode(k, N2, L2);
|
||||
|
||||
double key2 = (kx * kx + ky * ky + kz * kz);
|
||||
int key = keymap[key2];
|
||||
powerSpectrumKeys[i][j][k] = key;
|
||||
keyTranslate[key] = std::sqrt(key2);
|
||||
}
|
||||
}
|
||||
}
|
||||
powerSpectrum.resize(boost::extents[keymap.size()]);
|
||||
}
|
||||
|
||||
auto ForwardEisensteinHu::getPowerSpectrumArray() {
|
||||
auto &local_keys = powerSpectrumKeys;
|
||||
|
||||
return fwrap(
|
||||
b_fused_idx<double, 3>([this, &local_keys](size_t i, size_t j, size_t k) {
|
||||
return powerSpectrum[local_keys[i][j][k]];
|
||||
}));
|
||||
}
|
||||
|
||||
void ForwardEisensteinHu::forwardModel_v2(ModelInput<3> delta_init) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
|
||||
// Setup the IO basis that is required for this forward.
|
||||
delta_init.setRequestedIO(PREFERRED_FOURIER);
|
||||
|
||||
hold_input = std::move(delta_init);
|
||||
}
|
||||
|
||||
void ForwardEisensteinHu::getDensityFinal(ModelOutput<3> delta_output) {
|
||||
delta_output.setRequestedIO(PREFERRED_FOURIER);
|
||||
auto w_delta_init = fwrap(hold_input.getFourierConst());
|
||||
auto w_delta_output = fwrap(delta_output.getFourierOutput());
|
||||
|
||||
w_delta_output = (w_delta_init)*getPowerSpectrumArray();
|
||||
|
||||
invalid = false;
|
||||
}
|
||||
|
||||
void ForwardEisensteinHu::updateCosmo() {
|
||||
LIBLSS_AUTO_CONTEXT(LOG_VERBOSE, ctx);
|
||||
|
||||
if (cosmo_params == old_cosmo_params)
|
||||
return;
|
||||
|
||||
invalid = true;
|
||||
|
||||
old_cosmo_params = cosmo_params;
|
||||
|
||||
Cosmology cosmo(cosmo_params);
|
||||
CosmoTool::CosmoPower cpower;
|
||||
|
||||
double h = cpower.h = cosmo_params.h;
|
||||
cpower.OMEGA_B = cosmo_params.omega_b;
|
||||
cpower.OMEGA_C = cosmo_params.omega_m - cosmo_params.omega_b;
|
||||
cpower.SIGMA8 = cosmo_params.sigma8;
|
||||
cpower.n = cosmo_params.n_s;
|
||||
cpower.updateCosmology();
|
||||
cpower.setFunction(CosmoTool::CosmoPower::HU_WIGGLES);
|
||||
cpower.normalize();
|
||||
cpower.setFunction(CosmoTool::CosmoPower::MATTER_TK);
|
||||
|
||||
// TODO: For future we will generate the power spectrum here, inline
|
||||
size_t endN0 = startN0 + localN0;
|
||||
|
||||
#pragma omp parallel for collapse(3)
|
||||
for (size_t i = startN0; i < endN0; i++) {
|
||||
for (size_t j = 0; j < N1; j++) {
|
||||
for (size_t k = 0; k < N2_HC; k++) {
|
||||
int key = powerSpectrumKeys[i][j][k];
|
||||
double k_mode = keyTranslate[key];
|
||||
double Qk_delta =
|
||||
-sign_k2 * cpower.power(k_mode * h) * (k_mode * k_mode);
|
||||
powerSpectrum[key] = Qk_delta;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ForwardEisensteinHu::adjointModel_v2(
|
||||
ModelInputAdjoint<3> in_gradient_delta) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
// Build strict range views (we do not want to see the
|
||||
// the FFTW padding, ensure that the view object lives till the end of this function.
|
||||
|
||||
in_gradient_delta.setRequestedIO(PREFERRED_FOURIER);
|
||||
hold_ag_input = std::move(in_gradient_delta);
|
||||
}
|
||||
|
||||
void ForwardEisensteinHu::getAdjointModelOutput(
|
||||
ModelOutputAdjoint<3> out_gradient_delta) {
|
||||
out_gradient_delta.setRequestedIO(PREFERRED_FOURIER);
|
||||
auto w_in_gradient = fwrap(hold_ag_input.getFourierConst());
|
||||
auto w_out_gradient = fwrap(out_gradient_delta.getFourierOutput());
|
||||
|
||||
w_out_gradient = w_in_gradient * getPowerSpectrumArray();
|
||||
}
|
||||
|
||||
void ForwardEisensteinHu::clearAdjointGradient() {
|
||||
hold_ag_input.clear();
|
||||
hold_input.clear();
|
||||
}
|
||||
|
||||
void ForwardEisensteinHu::forwardModelRsdField(ArrayRef &, double *) {}
|
||||
|
||||
void ForwardEisensteinHu::releaseParticles() {}
|
||||
|
||||
bool ForwardEisensteinHu::densityInvalidated() const { return invalid; }
|
||||
|
||||
static std::shared_ptr<BORGForwardModel> build_eisenstein_hu(
|
||||
MPI_Communication *comm, BoxModel const &box, PropertyProxy const ¶ms) {
|
||||
// double ai = params.get<double>("a_initial");
|
||||
auto reverse_sign = params.get_optional<bool>("reverse_sign");
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
auto v = std::make_shared<ForwardEisensteinHu>(comm, box);
|
||||
if (reverse_sign)
|
||||
v->setReverseSign(*reverse_sign);
|
||||
return v;
|
||||
}
|
||||
|
||||
LIBLSS_REGISTER_FORWARD_IMPL(TRANSFER_EHU, build_eisenstein_hu);
|
97
extra/hades/libLSS/physics/forwards/transfer_ehu.hpp
Normal file
97
extra/hades/libLSS/physics/forwards/transfer_ehu.hpp
Normal file
|
@ -0,0 +1,97 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/forwards/transfer_ehu.hpp
|
||||
Copyright (C) 2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#ifndef __LIBLSS_HADES_FORWARD_TRANSFER_EHU_HPP
|
||||
# define __LIBLSS_HADES_FORWARD_TRANSFER_EHU_HPP
|
||||
# pragma once
|
||||
|
||||
# include "libLSS/physics/forward_model.hpp"
|
||||
# include "libLSS/tools/fusewrapper.hpp"
|
||||
# include "libLSS/tools/errors.hpp"
|
||||
# include "libLSS/physics/forwards/registry.hpp"
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
/**
|
||||
* @brief Forward model element that applies a Eisensten&Hu transfer function
|
||||
*
|
||||
* It assumes that the input is a properly linearly-scaled gravitational potential.
|
||||
*/
|
||||
class ForwardEisensteinHu : public BORGForwardModel {
|
||||
public:
|
||||
typedef IArrayType::ArrayType IArrayRef;
|
||||
typedef ArrayType1d::ArrayType Array1dRef;
|
||||
using BORGForwardModel::ArrayRef;
|
||||
|
||||
protected:
|
||||
CosmologicalParameters old_cosmo_params;
|
||||
LibLSS::multi_array<int, 3> powerSpectrumKeys;
|
||||
LibLSS::multi_array<double, 1> powerSpectrum;
|
||||
LibLSS::multi_array<double, 1> keyTranslate;
|
||||
|
||||
ModelInput<3> hold_input;
|
||||
ModelInputAdjoint<3> hold_ag_input;
|
||||
bool lazyInit;
|
||||
|
||||
double sign_k2;
|
||||
|
||||
bool invalid;
|
||||
bool accum;
|
||||
|
||||
public:
|
||||
/**
|
||||
* Constructor.
|
||||
*/
|
||||
explicit ForwardEisensteinHu(MPI_Communication *comm, const BoxModel &box);
|
||||
|
||||
PreferredIO getPreferredInput() const override { return PREFERRED_FOURIER; }
|
||||
PreferredIO getPreferredOutput() const override {
|
||||
return PREFERRED_FOURIER;
|
||||
}
|
||||
|
||||
auto getPowerSpectrumArray();
|
||||
|
||||
void setReverseSign(bool reverse) { sign_k2 = reverse ? -1 : 1; }
|
||||
|
||||
void forwardModel_v2(ModelInput<3> delta_init) override;
|
||||
|
||||
void getDensityFinal(ModelOutput<3> delta_output) override;
|
||||
|
||||
void updateCosmo() override;
|
||||
|
||||
void adjointModel_v2(ModelInputAdjoint<3> in_gradient_delta) override;
|
||||
|
||||
void accumulateAdjoint(bool do_accumulate) override {
|
||||
accum = do_accumulate;
|
||||
}
|
||||
|
||||
void clearAdjointGradient() override;
|
||||
|
||||
void
|
||||
getAdjointModelOutput(ModelOutputAdjoint<3> out_gradient_delta) override;
|
||||
|
||||
void forwardModelRsdField(ArrayRef &, double *) override;
|
||||
|
||||
void releaseParticles() override;
|
||||
|
||||
bool densityInvalidated() const override;
|
||||
}; // namespace LibLSS
|
||||
|
||||
} // namespace LibLSS
|
||||
|
||||
LIBLSS_REGISTER_FORWARD_DECL(TRANSFER_EHU);
|
||||
|
||||
#endif
|
||||
// ARES TAG: num_authors = 2
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: year(0) = 2020
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
||||
// ARES TAG: name(1) = Jens Jasche
|
||||
// ARES TAG: year(1) = 2020
|
||||
// ARES TAG: email(1) = jens.jasche@fysik.su.se
|
257
extra/hades/libLSS/physics/forwards/upgrade.cpp
Normal file
257
extra/hades/libLSS/physics/forwards/upgrade.cpp
Normal file
|
@ -0,0 +1,257 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/forwards/upgrade.cpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#include "libLSS/tools/console.hpp"
|
||||
#include "libLSS/tools/fusewrapper.hpp"
|
||||
#include "libLSS/tools/static_init.hpp"
|
||||
#include "libLSS/physics/forwards/upgrade.hpp"
|
||||
|
||||
using namespace LibLSS;
|
||||
|
||||
static BoxModel mul_box(BoxModel inbox, unsigned int m) {
|
||||
inbox.N0 *= m;
|
||||
inbox.N1 *= m;
|
||||
inbox.N2 *= m;
|
||||
return inbox;
|
||||
}
|
||||
|
||||
ForwardUpgrade::ForwardUpgrade(
|
||||
MPI_Communication *comm, BoxModel const &box, unsigned int multiply)
|
||||
: BORGForwardModel(comm, box, mul_box(box, multiply)) {
|
||||
|
||||
//ghosts.setup();
|
||||
}
|
||||
|
||||
void ForwardUpgrade::forwardModel_v2(ModelInput<3> delta_init) {
|
||||
delta_init.setRequestedIO(PREFERRED_FOURIER);
|
||||
|
||||
hold_input = std::move(delta_init);
|
||||
}
|
||||
|
||||
namespace {
|
||||
namespace details {
|
||||
template <size_t Dim, size_t r, typename A>
|
||||
auto _basic_range(
|
||||
size_t id, std::array<ssize_t, Dim> const &source_N,
|
||||
std::array<ssize_t, Dim> const &target_N, A a) {
|
||||
typedef boost::multi_array_types::index_range range;
|
||||
if (r == 0) {
|
||||
if ((id & 1) != 0) {
|
||||
return a[range(target_N[0] - source_N[0] / 2 + 1, target_N[0])];
|
||||
} else {
|
||||
return a[range(0, source_N[0] / 2)];
|
||||
}
|
||||
} else {
|
||||
if ((id & (1UL << r)) != 0) {
|
||||
return a[range(target_N[r] - source_N[r] / 2 + 1, target_N[r])];
|
||||
} else {
|
||||
return a[range(0, source_N[r] / 2)];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <size_t Dim, size_t r, typename A>
|
||||
auto _basic_slice(
|
||||
size_t id, size_t nyquist_dir, std::array<ssize_t, Dim> const &source_N,
|
||||
std::array<ssize_t, Dim> const &target_N, A a) {
|
||||
typedef boost::multi_array_types::index_range range;
|
||||
if (nyquist_dir == r) {
|
||||
if ((id & (1UL << r)) == 0) {
|
||||
return a[range(source_N[r] / 2, source_N[r] / 2 + 1)];
|
||||
} else {
|
||||
return a[range(
|
||||
target_N[r] - source_N[r] / 2,
|
||||
target_N[r] - source_N[r] / 2 + 1)];
|
||||
}
|
||||
} else {
|
||||
if ((id & (1UL << r)) == 0) {
|
||||
return a[range(0, source_N[r] / 2 + 1)];
|
||||
} else {
|
||||
return a[range(target_N[r] - source_N[r] / 2, target_N[r])];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <size_t Dim, size_t r>
|
||||
struct _gen_range {
|
||||
|
||||
static auto
|
||||
gen(size_t id, std::array<ssize_t, Dim> const &source_N,
|
||||
std::array<ssize_t, Dim> const &target_N) {
|
||||
return _basic_range<Dim, r>(
|
||||
id, source_N, target_N,
|
||||
_gen_range<Dim, r - 1>::gen(id, source_N, target_N));
|
||||
}
|
||||
static auto slice(
|
||||
size_t id, size_t nyquist_dir,
|
||||
std::array<ssize_t, Dim> const &source_N,
|
||||
std::array<ssize_t, Dim> const &target_N) {
|
||||
return _basic_slice<Dim, r>(
|
||||
id, nyquist_dir, source_N, target_N,
|
||||
_gen_range<Dim, r - 1>::slice(id, nyquist_dir, source_N, target_N));
|
||||
}
|
||||
};
|
||||
|
||||
template <size_t Dim>
|
||||
struct _gen_range<Dim, 0UL> {
|
||||
static auto
|
||||
gen(size_t id, std::array<ssize_t, Dim> const &source_N,
|
||||
std::array<ssize_t, Dim> const &target_N) {
|
||||
return _basic_range<Dim, 0>(id, source_N, target_N, boost::indices);
|
||||
}
|
||||
static auto slice(
|
||||
size_t id, size_t nyquist_dir,
|
||||
std::array<ssize_t, Dim> const &source_N,
|
||||
std::array<ssize_t, Dim> const &target_N) {
|
||||
return _basic_slice<Dim, 0>(
|
||||
id, nyquist_dir, source_N, target_N, boost::indices);
|
||||
}
|
||||
};
|
||||
|
||||
template <size_t Dim>
|
||||
auto gen_range(
|
||||
size_t id, std::array<ssize_t, Dim> const &source_N,
|
||||
std::array<ssize_t, Dim> const &target_N) {
|
||||
return _gen_range<Dim, Dim - 1>::gen(id, source_N, target_N);
|
||||
}
|
||||
|
||||
template <size_t Dim>
|
||||
auto gen_slice(
|
||||
size_t id, size_t nyquist_dir, std::array<ssize_t, Dim> const &source_N,
|
||||
std::array<ssize_t, Dim> const &target_N) {
|
||||
return _gen_range<Dim, Dim - 1>::slice(
|
||||
id, nyquist_dir, source_N, target_N);
|
||||
}
|
||||
} // namespace details
|
||||
|
||||
using details::gen_range;
|
||||
using details::gen_slice;
|
||||
|
||||
} // namespace
|
||||
|
||||
template <size_t Dim, typename OutArray, typename InputArray>
|
||||
static void upgrade(
|
||||
NBoxModel<Dim> const &in_box, NBoxModel<Dim> const &out_box,
|
||||
OutArray &output, InputArray const &input) {
|
||||
constexpr size_t Pairs = 1UL << (Dim - 1);
|
||||
|
||||
std::array<ssize_t, Dim> in_Ns, out_Ns;
|
||||
in_box.fill(in_Ns);
|
||||
out_box.fill(out_Ns);
|
||||
|
||||
fwrap(output) = 0;
|
||||
|
||||
for (size_t i = 0; i < Pairs; i++) {
|
||||
auto r0 = gen_range(i, in_Ns, in_Ns);
|
||||
auto r1 = gen_range(i, in_Ns, out_Ns);
|
||||
|
||||
fwrap(output[r1]) = fwrap(input[r0]);
|
||||
|
||||
for (unsigned nyquist = 0; nyquist < Dim; nyquist++) {
|
||||
auto s0 = gen_slice(i, nyquist, in_Ns, out_Ns);
|
||||
auto s1 = gen_slice(i, nyquist, in_Ns, in_Ns);
|
||||
|
||||
fwrap(output[s0]) = fwrap(input[s1]);
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < Pairs; i++) {
|
||||
|
||||
for (unsigned nyquist = 0; nyquist < Dim; nyquist++) {
|
||||
{
|
||||
auto s0 = gen_slice(i, nyquist, in_Ns, out_Ns);
|
||||
|
||||
fwrap(output[s0]) = 0.5 * fwrap(output[s0]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <size_t Dim, typename OutArray, typename InputArray>
|
||||
static void adjoint_upgrade(
|
||||
NBoxModel<Dim> const &in_box, NBoxModel<Dim> const &out_box,
|
||||
OutArray &output, InputArray const &input) {
|
||||
constexpr size_t Pairs = 1UL << (Dim - 1);
|
||||
|
||||
std::array<ssize_t, Dim> in_Ns, out_Ns;
|
||||
in_box.fill(in_Ns);
|
||||
out_box.fill(out_Ns);
|
||||
|
||||
fwrap(output) = 0;
|
||||
|
||||
for (size_t i = 0; i < Pairs; i++) {
|
||||
|
||||
for (unsigned nyquist = 0; nyquist < Dim; nyquist++) {
|
||||
{
|
||||
auto s0 = gen_slice(i, nyquist, in_Ns, in_Ns);
|
||||
auto s1 = gen_slice(i, nyquist, in_Ns, out_Ns);
|
||||
// double const fac = (nyquist==(Dim-1) ? 2 : 1);
|
||||
|
||||
fwrap(output[s0]) = fwrap(output[s0]) + fwrap(input[s1]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (unsigned nyquist = 0; nyquist < Dim; nyquist++)
|
||||
{
|
||||
auto s0 = gen_slice(1UL << nyquist, nyquist, in_Ns, in_Ns);
|
||||
|
||||
fwrap(output[s0]) = 0.5 * fwrap(output[s0]);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < Pairs; i++) {
|
||||
auto r0 = gen_range(i, in_Ns, in_Ns);
|
||||
auto r1 = gen_range(i, in_Ns, out_Ns);
|
||||
|
||||
fwrap(output[r0]) = fwrap(input[r1]);
|
||||
}
|
||||
}
|
||||
|
||||
void ForwardUpgrade::getDensityFinal(ModelOutput<3> delta_output) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
delta_output.setRequestedIO(PREFERRED_FOURIER);
|
||||
|
||||
auto &in_delta = hold_input.getFourierConst();
|
||||
// ghosts.synchronize(in_delta);
|
||||
|
||||
upgrade(
|
||||
get_box_model(), get_box_model_output(), delta_output.getFourierOutput(),
|
||||
in_delta);
|
||||
}
|
||||
|
||||
void ForwardUpgrade::adjointModel_v2(ModelInputAdjoint<3> in_gradient_delta) {
|
||||
|
||||
in_gradient_delta.setRequestedIO(PREFERRED_FOURIER);
|
||||
|
||||
hold_adjoint = std::move(in_gradient_delta);
|
||||
}
|
||||
|
||||
void ForwardUpgrade::getAdjointModelOutput(
|
||||
ModelOutputAdjoint<3> out_gradient_delta) {
|
||||
|
||||
out_gradient_delta.setRequestedIO(PREFERRED_FOURIER);
|
||||
adjoint_upgrade(
|
||||
get_box_model(), get_box_model_output(),
|
||||
out_gradient_delta.getFourierOutput(), hold_adjoint.getFourierConst());
|
||||
}
|
||||
|
||||
static std::shared_ptr<BORGForwardModel> build_upgrade(
|
||||
MPI_Communication *comm, BoxModel const &box, PropertyProxy const ¶ms) {
|
||||
|
||||
int multiplier = params.get<int>("multiplier");
|
||||
if (multiplier <= 1) {
|
||||
error_helper<ErrorParams>("Invalid multiplier, it is required to be > 1");
|
||||
}
|
||||
|
||||
// TODO: Setup transfer function
|
||||
auto model = std::make_shared<ForwardUpgrade>(comm, box, multiplier);
|
||||
return model;
|
||||
}
|
||||
|
||||
LIBLSS_REGISTER_FORWARD_IMPL(Upgrade, build_upgrade);
|
65
extra/hades/libLSS/physics/forwards/upgrade.hpp
Normal file
65
extra/hades/libLSS/physics/forwards/upgrade.hpp
Normal file
|
@ -0,0 +1,65 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/forwards/upgrade.hpp
|
||||
Copyright (C) 2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#pragma once
|
||||
#ifndef __LIBLSS_HADES_FORWARD_UPGRAADE_HPP
|
||||
# define __LIBLSS_HADES_FORWARD_UPGRADE_HPP
|
||||
|
||||
# include "libLSS/physics/forward_model.hpp"
|
||||
# include "libLSS/tools/fusewrapper.hpp"
|
||||
# include "libLSS/tools/errors.hpp"
|
||||
# include "libLSS/physics/forwards/registry.hpp"
|
||||
# include "libLSS/tools/mpi/ghost_planes.hpp"
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
/**
|
||||
* @brief Upgrade forward model element
|
||||
*
|
||||
* It pads the fourier representation of the input field with zeros, treating Nyquist plane correctly.
|
||||
*/
|
||||
class ForwardUpgrade : public BORGForwardModel {
|
||||
protected:
|
||||
ModelInput<3> hold_input;
|
||||
ModelInputAdjoint<3> hold_adjoint;
|
||||
|
||||
GhostPlanes<std::complex<double>, 2> ghosts;
|
||||
|
||||
public:
|
||||
ForwardUpgrade(
|
||||
MPI_Communication *comm, BoxModel const &box, unsigned int multiply);
|
||||
|
||||
PreferredIO getPreferredInput() const override { return PREFERRED_FOURIER; }
|
||||
PreferredIO getPreferredOutput() const override {
|
||||
return PREFERRED_FOURIER;
|
||||
}
|
||||
|
||||
bool densityInvalidated() const override { return false; }
|
||||
|
||||
void forwardModel_v2(ModelInput<3> delta_init) override;
|
||||
void getDensityFinal(ModelOutput<3> delta_output) override;
|
||||
|
||||
void adjointModel_v2(ModelInputAdjoint<3> in_gradient_delta) override;
|
||||
|
||||
void
|
||||
getAdjointModelOutput(ModelOutputAdjoint<3> out_gradient_delta) override;
|
||||
};
|
||||
} // namespace LibLSS
|
||||
|
||||
LIBLSS_REGISTER_FORWARD_DECL(Upgrade);
|
||||
|
||||
// ARES TAG: num_authors = 2
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: year(0) = 2020
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
||||
// ARES TAG: name(1) = Jens Jasche
|
||||
// ARES TAG: year(1) = 2020
|
||||
// ARES TAG: email(1) = jens.jasche@fysik.su.se
|
||||
|
||||
#endif
|
191
extra/hades/libLSS/physics/haar.cpp
Normal file
191
extra/hades/libLSS/physics/haar.cpp
Normal file
|
@ -0,0 +1,191 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/haar.cpp
|
||||
Copyright (C) 2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#include <string>
|
||||
#include "libLSS/tools/console.hpp"
|
||||
#include "libLSS/physics/model_io.hpp"
|
||||
#include "libLSS/physics/haar.hpp"
|
||||
#include "libLSS/tools/fusewrapper.hpp"
|
||||
#include "libLSS/physics/forwards/registry.hpp"
|
||||
#include "libLSS/tools/ptree_proxy.hpp"
|
||||
|
||||
using namespace LibLSS;
|
||||
|
||||
void ForwardHaar::forwardModel_v2(ModelInput<3> delta_init) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
|
||||
// Setup the IO basis that is required for this forward.
|
||||
delta_init.setRequestedIO(PREFERRED_REAL);
|
||||
|
||||
delta_init.needDestroyInput();
|
||||
|
||||
hold_input = std::move(delta_init);
|
||||
}
|
||||
|
||||
template <typename ArrayIn, typename ArrayOut>
|
||||
void ihaar_1d(ArrayIn &&in, ArrayOut &&out) {
|
||||
int N = in.shape()[0];
|
||||
int k = 1;
|
||||
|
||||
while (2 * k <= N) {
|
||||
for (int i = 0; i < k; i++) {
|
||||
double a = in[i], b = in[i + k];
|
||||
out[2 * i] = (a + b) * M_SQRT1_2;
|
||||
out[2 * i + 1] = (a - b) * M_SQRT1_2;
|
||||
}
|
||||
|
||||
// The last one does not need anything
|
||||
k = k * 2;
|
||||
if (k < N)
|
||||
for (int i = 0; i < (2 * k); i++)
|
||||
in[i] = out[i];
|
||||
}
|
||||
}
|
||||
|
||||
template <typename ArrayIn, typename ArrayOut>
|
||||
void haar_1d(ArrayIn &&in, ArrayOut &&out) {
|
||||
int N = in.shape()[0];
|
||||
int k = N;
|
||||
|
||||
while (k > 1) {
|
||||
k = k / 2;
|
||||
for (int i = 0; i < k; i++) {
|
||||
double a = in[2 * i], b = in[2 * i + 1];
|
||||
out[i] = (a + b) * M_SQRT1_2;
|
||||
out[i + k] = (a - b) * M_SQRT1_2;
|
||||
}
|
||||
|
||||
// The last one does not need anything
|
||||
if (k > 1)
|
||||
for (int i = 0; i < (2 * k); i++)
|
||||
in[i] = out[i];
|
||||
}
|
||||
}
|
||||
|
||||
template <typename ArrayIn, typename ArrayOut>
|
||||
void haar_3d(ArrayIn &&in, ArrayOut &&out) {
|
||||
typedef decltype(out) Array;
|
||||
typedef boost::multi_array_types::index_range range;
|
||||
using boost::indices;
|
||||
size_t N0 = in.shape()[0], N1 = in.shape()[1], N2 = in.shape()[2];
|
||||
|
||||
#pragma omp parallel for schedule(static) collapse(2)
|
||||
for (int j = 0; j < N1; j++) {
|
||||
for (int k = 0; k < N2; k++) {
|
||||
auto sub_idx = indices[range()][j][k];
|
||||
haar_1d(in[sub_idx], out[sub_idx]);
|
||||
}
|
||||
}
|
||||
|
||||
#pragma omp parallel for schedule(static) collapse(2)
|
||||
for (int j = 0; j < N0; j++) {
|
||||
for (int k = 0; k < N2; k++) {
|
||||
auto sub_idx = indices[j][range()][k];
|
||||
// THIS IS NOT A TYPO! out and in are reversed here.
|
||||
haar_1d(out[sub_idx], in[sub_idx]);
|
||||
}
|
||||
}
|
||||
|
||||
#pragma omp parallel for schedule(static) collapse(2)
|
||||
for (int j = 0; j < N0; j++) {
|
||||
for (int k = 0; k < N1; k++) {
|
||||
auto sub_idx = indices[j][k][range()];
|
||||
haar_1d(in[sub_idx], out[sub_idx]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename ArrayIn, typename ArrayOut>
|
||||
void ihaar_3d(ArrayIn &&in, ArrayOut &&out) {
|
||||
typedef decltype(out) Array;
|
||||
typedef boost::multi_array_types::index_range range;
|
||||
using boost::indices;
|
||||
size_t N0 = in.shape()[0], N1 = in.shape()[1], N2 = in.shape()[2];
|
||||
|
||||
#pragma omp parallel for schedule(static) collapse(2)
|
||||
for (int j = 0; j < N0; j++) {
|
||||
for (int k = 0; k < N1; k++) {
|
||||
auto sub_idx = indices[j][k][range()];
|
||||
ihaar_1d(in[sub_idx], out[sub_idx]);
|
||||
}
|
||||
}
|
||||
|
||||
#pragma omp parallel for schedule(static) collapse(2)
|
||||
for (int j = 0; j < N0; j++) {
|
||||
for (int k = 0; k < N2; k++) {
|
||||
auto sub_idx = indices[j][range()][k];
|
||||
// THIS IS NOT A TYPO! out and in are reversed here.
|
||||
ihaar_1d(out[sub_idx], in[sub_idx]);
|
||||
}
|
||||
}
|
||||
|
||||
#pragma omp parallel for schedule(static) collapse(2)
|
||||
for (int j = 0; j < N1; j++) {
|
||||
for (int k = 0; k < N2; k++) {
|
||||
auto sub_idx = indices[range()][j][k];
|
||||
ihaar_1d(in[sub_idx], out[sub_idx]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ForwardHaar::getDensityFinal(ModelOutput<3> delta_output) {
|
||||
delta_output.setRequestedIO(PREFERRED_REAL);
|
||||
|
||||
size_t N2 = lo_mgr->N2;
|
||||
typedef boost::multi_array_types::index_range range;
|
||||
|
||||
auto sub_array = boost::indices[range()][range()][range(0, N2)];
|
||||
auto &out = delta_output.getRealOutput();
|
||||
auto &in = hold_input.getReal();
|
||||
|
||||
if (do_inverse)
|
||||
ihaar_3d(in[sub_array], out[sub_array]);
|
||||
else
|
||||
haar_3d(in[sub_array], out[sub_array]);
|
||||
}
|
||||
|
||||
void ForwardHaar::adjointModel_v2(ModelInputAdjoint<3> in_gradient_delta) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
|
||||
in_gradient_delta.setRequestedIO(PREFERRED_REAL);
|
||||
in_gradient_delta.needDestroyInput();
|
||||
hold_ag_input = std::move(in_gradient_delta);
|
||||
}
|
||||
|
||||
void ForwardHaar::getAdjointModelOutput(
|
||||
ModelOutputAdjoint<3> out_gradient_delta) {
|
||||
out_gradient_delta.setRequestedIO(PREFERRED_REAL);
|
||||
|
||||
size_t N2 = lo_mgr->N2;
|
||||
typedef boost::multi_array_types::index_range range;
|
||||
|
||||
auto sub_array = boost::indices[range()][range()][range(0, N2)];
|
||||
auto &in = hold_ag_input.getReal();
|
||||
auto &out = out_gradient_delta.getRealOutput();
|
||||
|
||||
// Haar transpose is its inverse.
|
||||
if (do_inverse)
|
||||
haar_3d(in[sub_array], out[sub_array]);
|
||||
else
|
||||
ihaar_3d(in[sub_array], out[sub_array]);
|
||||
}
|
||||
|
||||
static std::shared_ptr<BORGForwardModel> build_haar(
|
||||
MPI_Communication *comm, BoxModel const &box, PropertyProxy const ¶ms) {
|
||||
bool use_inverse = params.get("inverse", false);
|
||||
|
||||
return std::make_shared<ForwardHaar>(comm, box, use_inverse);
|
||||
}
|
||||
|
||||
LIBLSS_REGISTER_FORWARD_IMPL(Haar, build_haar);
|
||||
|
||||
// ARES TAG: num_authors = 1
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: year(0) = 2020
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
65
extra/hades/libLSS/physics/haar.hpp
Normal file
65
extra/hades/libLSS/physics/haar.hpp
Normal file
|
@ -0,0 +1,65 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/haar.hpp
|
||||
Copyright (C) 2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#pragma once
|
||||
#ifndef __LIBLSS_HADES_FORWARD_HAAR_HPP
|
||||
# define __LIBLSS_HADES_FORWARD_HAAR_HPP
|
||||
# pragma once
|
||||
|
||||
# include "libLSS/physics/forward_model.hpp"
|
||||
# include "libLSS/tools/fusewrapper.hpp"
|
||||
# include "libLSS/tools/errors.hpp"
|
||||
# include "libLSS/physics/forwards/registry.hpp"
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
/**
|
||||
* This class handles the convolution of a real field by some Fourier kernel.
|
||||
*/
|
||||
class ForwardHaar : public BORGForwardModel {
|
||||
protected:
|
||||
bool do_inverse;
|
||||
ModelInput<3> hold_input;
|
||||
ModelInputAdjoint<3> hold_ag_input;
|
||||
|
||||
public:
|
||||
/**
|
||||
* Constructor.
|
||||
*/
|
||||
explicit ForwardHaar(
|
||||
MPI_Communication *comm, const BoxModel &box, bool inverse = false)
|
||||
: BORGForwardModel(comm, box), do_inverse(inverse) {
|
||||
if (comm->size() > 1) {
|
||||
error_helper<ErrorParams>(
|
||||
"MPI is not supported. Comm size must be equal to one.");
|
||||
}
|
||||
ensureInputEqualOutput();
|
||||
}
|
||||
|
||||
virtual PreferredIO getPreferredInput() const { return PREFERRED_REAL; }
|
||||
virtual PreferredIO getPreferredOutput() const { return PREFERRED_REAL; }
|
||||
|
||||
virtual void forwardModel_v2(ModelInput<3> delta_init);
|
||||
virtual void getDensityFinal(ModelOutput<3> delta_output);
|
||||
|
||||
virtual void adjointModel_v2(ModelInputAdjoint<3> in_gradient_delta);
|
||||
virtual void
|
||||
getAdjointModelOutput(ModelOutputAdjoint<3> out_gradient_delta);
|
||||
}; // namespace LibLSS
|
||||
|
||||
} // namespace LibLSS
|
||||
|
||||
LIBLSS_REGISTER_FORWARD_DECL(Haar);
|
||||
|
||||
#endif
|
||||
|
||||
// ARES TAG: num_authors = 1
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: year(0) = 2020
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
138
extra/hades/libLSS/physics/hades_log.cpp
Normal file
138
extra/hades/libLSS/physics/hades_log.cpp
Normal file
|
@ -0,0 +1,138 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/hades_log.cpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#include "libLSS/physics/forward_model.hpp"
|
||||
#include "libLSS/tools/fusewrapper.hpp"
|
||||
#include "libLSS/tools/errors.hpp"
|
||||
#include "libLSS/physics/hades_log.hpp"
|
||||
|
||||
using namespace LibLSS;
|
||||
|
||||
HadesLog::HadesLog(
|
||||
MPI_Communication *comm, const BoxModel &box, double ai_,
|
||||
bool shifted_mean_)
|
||||
: BORGForwardModel(comm, box), ai(ai_), D_init(1.0),
|
||||
shifted_mean(shifted_mean_) {
|
||||
setupDefault();
|
||||
}
|
||||
|
||||
void HadesLog::forwardModelSimple(CArrayRef &delta_init) {
|
||||
error_helper<ErrorNotImplemented>(
|
||||
"No forwardModelSimple in Log forward model");
|
||||
}
|
||||
|
||||
void HadesLog::clearAdjointGradient() { hold_in_gradient.clear(); }
|
||||
|
||||
void HadesLog::forwardModel_v2(ModelInput<3> delta_init) {
|
||||
ConsoleContext<LOG_DEBUG> ctx("forward Hades Log");
|
||||
|
||||
delta_init.setRequestedIO(PREFERRED_REAL);
|
||||
|
||||
double G = 1; // Growth TBI
|
||||
// Only bother of real values (no padding)
|
||||
auto strict_field = delta_init.getRealConst()[lo_mgr->strict_range()];
|
||||
// First part of the forward model, exponentiation and rescaling
|
||||
auto fdelta = std::exp((G / D_init) * fwrap(strict_field));
|
||||
|
||||
// Compute mean and save it for later
|
||||
|
||||
if (shifted_mean) {
|
||||
rho_mean = fdelta.sum() / (N0 * N1 * N2);
|
||||
comm->all_reduce_t(MPI_IN_PLACE, &rho_mean, 1, MPI_SUM);
|
||||
fwrap(tmp_real_field->get_array()[lo_mgr->strict_range()]) =
|
||||
fdelta / rho_mean - 1;
|
||||
} else
|
||||
fwrap(tmp_real_field->get_array()[lo_mgr->strict_range()]) = fdelta;
|
||||
}
|
||||
|
||||
void HadesLog::getDensityFinal(ModelOutput<3> delta_output) {
|
||||
delta_output.setRequestedIO(PREFERRED_REAL);
|
||||
|
||||
// Compute zero mean density.
|
||||
fwrap(delta_output.getRealOutput()[lo_mgr->strict_range()]) =
|
||||
fwrap(tmp_real_field->get_array()[lo_mgr->strict_range()]);
|
||||
}
|
||||
|
||||
void HadesLog::updateCosmo() {
|
||||
ConsoleContext<LOG_DEBUG> ctx("Hades Log cosmo update");
|
||||
|
||||
if (old_params != cosmo_params) {
|
||||
Cosmology cosmo(cosmo_params);
|
||||
|
||||
D_init = cosmo.d_plus(ai) /
|
||||
cosmo.d_plus(1.0); // Scale factor for initial conditions
|
||||
old_params = cosmo_params;
|
||||
}
|
||||
}
|
||||
|
||||
void HadesLog::forwardModelRsdField(ArrayRef &deltaf, double *vobs_ext) {
|
||||
error_helper<ErrorNotImplemented>("No RSD support in Log forward model");
|
||||
}
|
||||
|
||||
void HadesLog::adjointModel_v2(ModelInputAdjoint<3> in_gradient_delta) {
|
||||
ConsoleContext<LOG_DEBUG> ctx("adjoint Hades Log");
|
||||
// Build strict range views (we do not want to see the
|
||||
// the FFTW padding, ensure that the view object lives till the end of this function.
|
||||
in_gradient_delta.setRequestedIO(PREFERRED_REAL);
|
||||
auto in_gradient_view =
|
||||
in_gradient_delta.getRealConst()[lo_mgr->strict_range()];
|
||||
auto input_view = tmp_real_field->get_array()[lo_mgr->strict_range()];
|
||||
// Wrap for automated vectorization.
|
||||
auto w_gradient = fwrap(in_gradient_view);
|
||||
|
||||
double G = 1.0;
|
||||
// Recompute forward transforms
|
||||
if (shifted_mean) {
|
||||
auto fdelta = (fwrap(input_view) + 1) * rho_mean;
|
||||
|
||||
// Gradient of the denominator
|
||||
A_mean = (w_gradient * fdelta).sum() / rho_mean / (N0 * N1 * N2);
|
||||
comm->all_reduce_t(MPI_IN_PLACE, &A_mean, 1, MPI_SUM);
|
||||
ctx.format(
|
||||
"D_init = %g, A_mean = %g, rho_mean = %g", D_init, A_mean, rho_mean);
|
||||
}
|
||||
|
||||
hold_in_gradient = std::move(in_gradient_delta);
|
||||
}
|
||||
|
||||
void HadesLog::getAdjointModelOutput(ModelOutputAdjoint<3> ag_delta_output) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
ag_delta_output.setRequestedIO(PREFERRED_REAL);
|
||||
auto in_gradient_view =
|
||||
hold_in_gradient.getRealConst()[lo_mgr->strict_range()];
|
||||
auto out_gradient_view =
|
||||
ag_delta_output.getRealOutput()[lo_mgr->strict_range()];
|
||||
auto input_view = tmp_real_field->get_array()[lo_mgr->strict_range()];
|
||||
|
||||
// Wrap for automated vectorization.
|
||||
double G = 1.0;
|
||||
auto w_gradient = fwrap(in_gradient_view);
|
||||
if (shifted_mean) {
|
||||
auto fdelta = (fwrap(input_view) + 1) * rho_mean;
|
||||
|
||||
// Complete gradient of numerator and denominator
|
||||
fwrap(out_gradient_view) =
|
||||
(w_gradient - A_mean) * fdelta * (G / (D_init * rho_mean));
|
||||
} else {
|
||||
auto fdelta = (fwrap(input_view));
|
||||
|
||||
// Complete gradient of numerator and denominator
|
||||
fwrap(out_gradient_view) = (w_gradient)*fdelta * (G / D_init);
|
||||
}
|
||||
}
|
||||
|
||||
void HadesLog::releaseParticles() {}
|
||||
|
||||
static std::shared_ptr<BORGForwardModel> build_hades_log(
|
||||
MPI_Communication *comm, BoxModel const &box, PropertyProxy const ¶ms) {
|
||||
double ai = params.get<double>("a_initial");
|
||||
return std::make_shared<HadesLog>(comm, box, ai);
|
||||
}
|
||||
|
||||
LIBLSS_REGISTER_FORWARD_IMPL(HADES_LOG, build_hades_log);
|
56
extra/hades/libLSS/physics/hades_log.hpp
Normal file
56
extra/hades/libLSS/physics/hades_log.hpp
Normal file
|
@ -0,0 +1,56 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/hades_log.hpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#ifndef __LIBLSS_HADES_LOG_FORWARD_MODEL_HPP
|
||||
#define __LIBLSS_HADES_LOG_FORWARD_MODEL_HPP
|
||||
#pragma once
|
||||
|
||||
#include "libLSS/physics/forward_model.hpp"
|
||||
#include "libLSS/tools/fusewrapper.hpp"
|
||||
#include "libLSS/tools/errors.hpp"
|
||||
#include "libLSS/physics/forwards/registry.hpp"
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
class HadesLog : public BORGForwardModel {
|
||||
protected:
|
||||
double ai, D_init;
|
||||
double rho_mean, A_mean;
|
||||
ModelInputAdjoint<3> hold_in_gradient;
|
||||
CosmologicalParameters old_params;
|
||||
bool shifted_mean;
|
||||
|
||||
public:
|
||||
explicit HadesLog(
|
||||
MPI_Communication *comm, const BoxModel &box,
|
||||
double ai_, bool shifted_mean_ = true);
|
||||
|
||||
virtual void forwardModelSimple(CArrayRef &delta_init);
|
||||
|
||||
virtual void forwardModel_v2(ModelInput<3> delta_init);
|
||||
|
||||
virtual void getDensityFinal(ModelOutput<3> delta_output);
|
||||
virtual void updateCosmo();
|
||||
|
||||
virtual void forwardModelRsdField(ArrayRef &deltaf, double *vobs_ext);
|
||||
|
||||
virtual void adjointModel_v2(ModelInputAdjoint<3> in_gradient_delta);
|
||||
|
||||
virtual void getAdjointModelOutput(ModelOutputAdjoint<3> ag_delta_output);
|
||||
|
||||
virtual void clearAdjointGradient();
|
||||
|
||||
virtual void releaseParticles();
|
||||
};
|
||||
|
||||
} // namespace LibLSS
|
||||
|
||||
LIBLSS_REGISTER_FORWARD_DECL(HADES_LOG);
|
||||
|
||||
#endif
|
150
extra/hades/libLSS/physics/hades_pt.cpp
Normal file
150
extra/hades/libLSS/physics/hades_pt.cpp
Normal file
|
@ -0,0 +1,150 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/hades_pt.cpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
|
||||
#include "libLSS/physics/forward_model.hpp"
|
||||
#include "libLSS/tools/fusewrapper.hpp"
|
||||
#include "libLSS/tools/errors.hpp"
|
||||
#include "libLSS/physics/hades_pt.hpp"
|
||||
#include "libLSS/physics/forwards/registry.hpp"
|
||||
|
||||
using namespace LibLSS;
|
||||
|
||||
HadesLinear::HadesLinear(
|
||||
MPI_Communication *comm, const BoxModel &box, const BoxModel &box_out,
|
||||
double ai_, double af_)
|
||||
: BORGForwardModel(comm, box, box_out), ai(ai_), af(af_), D_init(0) {
|
||||
ensureInputEqualOutput();
|
||||
setupDefault();
|
||||
}
|
||||
|
||||
void HadesLinear::forwardModelSimple(CArrayRef &delta_init) {
|
||||
error_helper<ErrorNotImplemented>(
|
||||
"No forwardModelSimple in Linear forward model");
|
||||
}
|
||||
|
||||
PreferredIO HadesLinear::getPreferredInput() const { return PREFERRED_NONE; }
|
||||
PreferredIO HadesLinear::getPreferredOutput() const { return PREFERRED_NONE; }
|
||||
|
||||
void HadesLinear::forwardModel_v2(ModelInput<3> delta_init) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT2(ctx, "HadesLinear::forwardModel_v2");
|
||||
|
||||
double G = 1; // Growth TBI
|
||||
// Only bother of real values (no padding)
|
||||
ctx.print(boost::format("D_init=%g") % D_init);
|
||||
// Compute zero mean density.
|
||||
PreferredIO choice = delta_init.current;
|
||||
delta_init.setRequestedIO(choice);
|
||||
switch (choice) {
|
||||
case PREFERRED_REAL: {
|
||||
auto strict_field = tmp_real_field->get_array()[lo_mgr->strict_range()];
|
||||
fwrap(strict_field) = fwrap(delta_init.getRealConst()[lo_mgr->strict_range()]) / D_init;
|
||||
break;
|
||||
}
|
||||
case PREFERRED_FOURIER: {
|
||||
auto &strict_field = tmp_complex_field->get_array();
|
||||
fwrap(strict_field) = fwrap(delta_init.getFourierConst()) / D_init;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
error_helper<ErrorNotImplemented>("Invalid IO");
|
||||
}
|
||||
lastInput = currentOutput = choice;
|
||||
}
|
||||
|
||||
void HadesLinear::getDensityFinal(ModelOutput<3> delta_output) {
|
||||
delta_output.setRequestedIO(currentOutput);
|
||||
switch (currentOutput) {
|
||||
case PREFERRED_REAL:
|
||||
fwrap(delta_output.getRealOutput()) = tmp_real_field->get_array();
|
||||
break;
|
||||
case PREFERRED_FOURIER:
|
||||
fwrap(delta_output.getFourierOutput()) = tmp_complex_field->get_array();
|
||||
break;
|
||||
default:
|
||||
error_helper<ErrorNotImplemented>("Invalid IO");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void HadesLinear::updateCosmo() {
|
||||
ConsoleContext<LOG_DEBUG> ctx("HadesLinear::updateCosmo");
|
||||
|
||||
Cosmology cosmo(cosmo_params);
|
||||
|
||||
D_init = cosmo.d_plus(ai) /
|
||||
cosmo.d_plus(af); // Scale factor for initial conditions
|
||||
}
|
||||
|
||||
void HadesLinear::forwardModelRsdField(ArrayRef &deltaf, double *vobs_ext) {
|
||||
error_helper<ErrorNotImplemented>("No RSD support in Linear forward model");
|
||||
}
|
||||
|
||||
void HadesLinear::adjointModel_v2(ModelInputAdjoint<3> ag_delta_input) {
|
||||
ConsoleContext<LOG_DEBUG> ctx("HadesLinear::adjointModel_v2");
|
||||
|
||||
ag_delta_input.setRequestedIO(currentOutput);
|
||||
|
||||
// Compute zero mean density.
|
||||
switch (currentOutput) {
|
||||
case PREFERRED_REAL: {
|
||||
auto strict_field = tmp_real_field->get_array()[lo_mgr->strict_range()];
|
||||
fwrap(strict_field) =
|
||||
fwrap(ag_delta_input.getRealConst()[lo_mgr->strict_range()]) / D_init;
|
||||
break;
|
||||
}
|
||||
case PREFERRED_FOURIER: {
|
||||
auto &strict_field = tmp_complex_field->get_array();
|
||||
fwrap(strict_field) = fwrap(ag_delta_input.getFourierConst()) / D_init;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
error_helper<ErrorNotImplemented>("Invalid IO");
|
||||
}
|
||||
}
|
||||
|
||||
void HadesLinear::getAdjointModelOutput(ModelOutputAdjoint<3> ag_delta_output) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
|
||||
//if (lastInput != ag_delta_output.current) {
|
||||
// error_helper<ErrorBadState>(
|
||||
// "The forward and adjoint gradient pipeline is inconsistent.");
|
||||
//}
|
||||
|
||||
ag_delta_output.setRequestedIO(currentOutput);
|
||||
|
||||
switch (currentOutput) {
|
||||
case PREFERRED_REAL: {
|
||||
auto strict_field = tmp_real_field->get_array()[lo_mgr->strict_range()];
|
||||
auto w_gradient2 =
|
||||
fwrap(ag_delta_output.getRealOutput()[lo_mgr->strict_range()]);
|
||||
w_gradient2 = strict_field;
|
||||
break;
|
||||
}
|
||||
case PREFERRED_FOURIER: {
|
||||
auto strict_field = tmp_complex_field->get_array();
|
||||
auto w_gradient2 = fwrap(ag_delta_output.getFourierOutput());
|
||||
w_gradient2 = strict_field;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
error_helper<ErrorNotImplemented>("Invalid IO");
|
||||
}
|
||||
}
|
||||
|
||||
void HadesLinear::releaseParticles() {}
|
||||
|
||||
static std::shared_ptr<BORGForwardModel> build_hades_linear(
|
||||
MPI_Communication *comm, BoxModel const &box, PropertyProxy const ¶ms) {
|
||||
double ai = params.get<double>("a_initial");
|
||||
double af = params.get<double>("a_final");
|
||||
return std::make_shared<HadesLinear>(comm, box, box, ai, af);
|
||||
}
|
||||
|
||||
LIBLSS_REGISTER_FORWARD_IMPL(HADES_PT, build_hades_linear);
|
68
extra/hades/libLSS/physics/hades_pt.hpp
Normal file
68
extra/hades/libLSS/physics/hades_pt.hpp
Normal file
|
@ -0,0 +1,68 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/hades_pt.hpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#ifndef __LIBLSS_HADES_LINEAR_FORWARD_MODEL_HPP
|
||||
#define __LIBLSS_HADES_LINEAR_FORWARD_MODEL_HPP
|
||||
|
||||
#include "libLSS/physics/forward_model.hpp"
|
||||
#include "libLSS/tools/fusewrapper.hpp"
|
||||
#include "libLSS/tools/errors.hpp"
|
||||
#include "libLSS/physics/forwards/registry.hpp"
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
/**
|
||||
* This is the example class to implement the linear gravity model (also
|
||||
* known as PT model). It only really do two things: a fast fourier transform
|
||||
* and a scaling by the linear growth function.
|
||||
*/
|
||||
class HadesLinear : public BORGForwardModel {
|
||||
protected:
|
||||
double ai, af, D_init;
|
||||
PreferredIO currentOutput, lastInput;
|
||||
|
||||
public:
|
||||
/**
|
||||
* Consruct a new object.
|
||||
* @param comm an MPI communicator
|
||||
* @param box the box describing the input physical grid
|
||||
* @param box_out the box describing the output physical grid (at the moment
|
||||
* box == box_out is required)
|
||||
* @param ai the universe scale factor at which the initial conditions
|
||||
* are provided.
|
||||
*/
|
||||
explicit HadesLinear(
|
||||
MPI_Communication *comm, const BoxModel &box, const BoxModel &box_out,
|
||||
double ai_, double af_);
|
||||
|
||||
virtual void forwardModelSimple(CArrayRef &delta_init);
|
||||
|
||||
virtual PreferredIO getPreferredInput() const;
|
||||
virtual PreferredIO getPreferredOutput() const;
|
||||
|
||||
virtual void forwardModel_v2(ModelInput<3> delta_init);
|
||||
|
||||
virtual void getDensityFinal(ModelOutput<3> delta_output);
|
||||
|
||||
virtual void updateCosmo();
|
||||
|
||||
virtual void forwardModelRsdField(ArrayRef &deltaf, double *vobs_ext);
|
||||
|
||||
virtual void adjointModel_v2(ModelInputAdjoint<3> ag_delta_input);
|
||||
|
||||
virtual void getAdjointModelOutput(ModelOutputAdjoint<3> ag_delta_output);
|
||||
|
||||
virtual void releaseParticles();
|
||||
};
|
||||
|
||||
} // namespace LibLSS
|
||||
|
||||
LIBLSS_REGISTER_FORWARD_DECL(HADES_PT);
|
||||
|
||||
#endif
|
70
extra/hades/libLSS/physics/hermitic.hpp
Normal file
70
extra/hades/libLSS/physics/hermitic.hpp
Normal file
|
@ -0,0 +1,70 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/hermitic.hpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#ifndef __LIBLSS_PHYSICS_HERMITIC_HPP
|
||||
# define __LIBLSS_PHYSICS_HERMITIC_HPP
|
||||
|
||||
# include "libLSS/tools/errors.hpp"
|
||||
# include "libLSS/physics/forward_model.hpp"
|
||||
# include "libLSS/physics/forwards/registry.hpp"
|
||||
# include "libLSS/tools/fusewrapper.hpp"
|
||||
# include "libLSS/tools/hermiticity_fixup.hpp"
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
class ForwardHermiticOperation : public BORGForwardModel {
|
||||
protected:
|
||||
Hermiticity_fixer<double, 3> fixer;
|
||||
ModelInput<3> hold_input;
|
||||
ModelInputAdjoint<3> hold_ag_input;
|
||||
|
||||
public:
|
||||
explicit ForwardHermiticOperation(
|
||||
MPI_Communication *comm, const BoxModel &box)
|
||||
: BORGForwardModel(comm, box), fixer(lo_mgr) {}
|
||||
|
||||
virtual PreferredIO getPreferredInput() const { return PREFERRED_FOURIER; }
|
||||
virtual PreferredIO getPreferredOutput() const { return PREFERRED_FOURIER; }
|
||||
|
||||
virtual void forwardModel_v2(ModelInput<3> input) {
|
||||
input.setRequestedIO(PREFERRED_FOURIER);
|
||||
hold_input = std::move(input);
|
||||
}
|
||||
|
||||
virtual void getDensityFinal(ModelOutput<3> output) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
output.setRequestedIO(PREFERRED_FOURIER);
|
||||
fwrap(output.getFourierOutput()) = fwrap(hold_input.getFourierConst());
|
||||
fixer.forward(output.getFourierOutput());
|
||||
}
|
||||
|
||||
virtual void clearAdjointGradient() {
|
||||
hold_input.clear();
|
||||
hold_ag_input.clear();
|
||||
}
|
||||
|
||||
virtual void adjointModel_v2(ModelInputAdjoint<3> ag_input) {
|
||||
ag_input.setRequestedIO(PREFERRED_FOURIER);
|
||||
hold_ag_input = std::move(ag_input);
|
||||
}
|
||||
|
||||
virtual void getAdjointModelOutput(ModelOutputAdjoint<3> ag_output) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
ag_output.setRequestedIO(PREFERRED_FOURIER);
|
||||
fwrap(ag_output.getFourierOutput()) = hold_ag_input.getFourierConst();
|
||||
fixer.adjoint(ag_output.getFourierOutput());
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace LibLSS
|
||||
|
||||
#endif
|
||||
// ARES TAG: authors_num = 1
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
||||
// ARES TAG: year(0) = 2014-2020
|
21
extra/hades/libLSS/physics/likelihoods/base.cpp
Normal file
21
extra/hades/libLSS/physics/likelihoods/base.cpp
Normal file
|
@ -0,0 +1,21 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/likelihoods/base.cpp
|
||||
Copyright (C) 2018 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#include "libLSS/physics/likelihoods/base.hpp"
|
||||
|
||||
namespace LibLSS {
|
||||
namespace Likelihood {
|
||||
const std::string MPI = "MPI", COLOR_MAP = "COLOR_MAP", GRID = "GRID",
|
||||
MPI_GRID = "MPI_GRID", GRID_LENGTH = "GRID_LENGTH", DATA_GRID = "DATA_GRID";
|
||||
}
|
||||
} // namespace LibLSS
|
||||
|
||||
// ARES TAG: authors_num = 1
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
||||
// ARES TAG: year(0) = 2018
|
114
extra/hades/libLSS/physics/likelihoods/base.hpp
Normal file
114
extra/hades/libLSS/physics/likelihoods/base.hpp
Normal file
|
@ -0,0 +1,114 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/likelihoods/base.hpp
|
||||
Copyright (C) 2018 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#ifndef __LIBLSS_BORG_LIKELIHOODS_BASE_HPP
|
||||
# define __LIBLSS_BORG_LIKELIHOODS_BASE_HPP
|
||||
|
||||
# include <string>
|
||||
# include <boost/any.hpp>
|
||||
# include <map>
|
||||
# include <memory>
|
||||
# include <array>
|
||||
# include "libLSS/mpi/generic_mpi.hpp"
|
||||
# include "libLSS/tools/defer.hpp"
|
||||
# include "libLSS/tools/errors.hpp"
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
typedef std::map<std::string, boost::any> LikelihoodInfo;
|
||||
|
||||
namespace Likelihood {
|
||||
|
||||
typedef boost::multi_array<size_t, 1> GridSize;
|
||||
typedef boost::multi_array<double, 1> GridLengths;
|
||||
|
||||
extern std::string const MPI, COLOR_MAP, DATA_GRID, GRID, MPI_GRID,
|
||||
GRID_LENGTH;
|
||||
|
||||
template <typename T>
|
||||
T query(LikelihoodInfo const &info, std::string const &key) {
|
||||
auto i = info.find(key);
|
||||
if (i == info.end()) {
|
||||
error_helper<ErrorBadState>("No key " + key + " in info dictionnary");
|
||||
}
|
||||
try {
|
||||
return boost::any_cast<T>(i->second);
|
||||
} catch (boost::bad_any_cast& e) {
|
||||
error_helper<ErrorBadState>("Type incompatible in any_cast (stored=" + std::string(i->second.type().name()) + "), requested=" + std::string(typeid(T).name()));
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T query_default(LikelihoodInfo const &info, std::string const &key, T const& default_value) {
|
||||
auto i = info.find(key);
|
||||
if (i == info.end()) {
|
||||
return default_value;
|
||||
}
|
||||
try {
|
||||
return boost::any_cast<T>(i->second);
|
||||
} catch (boost::bad_any_cast& e) {
|
||||
error_helper<ErrorBadState>("Type incompatible in any_cast (stored=" + std::string(i->second.type().name()) + "), requested=" + std::string(typeid(T).name()));
|
||||
}
|
||||
}
|
||||
|
||||
inline MPI_Communication *getMPI(LikelihoodInfo const &info) {
|
||||
return query<MPI_Communication *>(info, MPI);
|
||||
}
|
||||
|
||||
template <typename T, int N>
|
||||
auto getArray(LikelihoodInfo const &info, std::string const &key) {
|
||||
return query<std::shared_ptr<boost::multi_array_ref<T, N>>>(info, key);
|
||||
}
|
||||
|
||||
template <typename T, int N>
|
||||
auto getPromisedArray(LikelihoodInfo const &info, std::string const &key) {
|
||||
return query<PromisePointer<boost::multi_array_ref<T, N>>>(info, key);
|
||||
}
|
||||
|
||||
template <size_t N, size_t start, size_t skip, typename T>
|
||||
auto multi_array_to_std(boost::multi_array_ref<T, 1> const &a) {
|
||||
std::array<T, N> b;
|
||||
for (size_t i = 0, j = start; i < N; i++, j += skip)
|
||||
b[i] = a[j];
|
||||
return b;
|
||||
}
|
||||
|
||||
template <typename T, size_t N>
|
||||
auto diff_array(std::array<T, N> const &a, std::array<T, N> const &b) {
|
||||
std::array<T, N> c;
|
||||
for (size_t i = 0; i < N; i++)
|
||||
c[i] = a[i] - b[i];
|
||||
return c;
|
||||
}
|
||||
|
||||
inline auto gridResolution(LikelihoodInfo const &info) {
|
||||
return multi_array_to_std<3, 0, 1>(query<GridSize>(info, GRID));
|
||||
}
|
||||
|
||||
inline auto gridSide(LikelihoodInfo const &info) {
|
||||
auto sides = query<GridLengths>(info, GRID_LENGTH);
|
||||
auto a0 = multi_array_to_std<3, 0, 2>(sides);
|
||||
auto a1 = multi_array_to_std<3, 1, 2>(sides);
|
||||
|
||||
return diff_array(a1, a0);
|
||||
}
|
||||
|
||||
inline auto gridCorners(LikelihoodInfo const &info) {
|
||||
return multi_array_to_std<3, 0, 2>(query<GridLengths>(info, GRID_LENGTH));
|
||||
}
|
||||
|
||||
} // namespace Likelihood
|
||||
|
||||
} // namespace LibLSS
|
||||
|
||||
#endif
|
||||
|
||||
// ARES TAG: authors_num = 1
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
||||
// ARES TAG: year(0) = 2018
|
371
extra/hades/libLSS/physics/model_io.cpp
Normal file
371
extra/hades/libLSS/physics/model_io.cpp
Normal file
|
@ -0,0 +1,371 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/model_io.cpp
|
||||
Copyright (C) 2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#include <boost/variant.hpp>
|
||||
#include <memory>
|
||||
#include "libLSS/tools/fusewrapper.hpp"
|
||||
#include "libLSS/physics/model_io.hpp"
|
||||
#include "libLSS/tools/overload.hpp"
|
||||
|
||||
using namespace LibLSS;
|
||||
|
||||
using boost::multi_array_types::index_range;
|
||||
|
||||
template <size_t Nd, typename Super>
|
||||
void LibLSS::ModelInputBase<Nd, Super>::setRequestedIO(PreferredIO opt) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
|
||||
Console::instance().c_assert(
|
||||
!this->uninitialized, "Model must be initialized");
|
||||
this->active = opt;
|
||||
if (opt == this->current)
|
||||
return;
|
||||
switch (opt) {
|
||||
case PREFERRED_FOURIER:
|
||||
transformInputRealToFourier();
|
||||
break;
|
||||
case PREFERRED_REAL:
|
||||
transformInputFourierToReal();
|
||||
break;
|
||||
default:
|
||||
Console::instance().c_assert(false, "Invalid IO");
|
||||
break;
|
||||
}
|
||||
this->ioIsTransformed = true;
|
||||
}
|
||||
|
||||
template <size_t Nd, typename Super>
|
||||
void ModelInputBase<Nd, Super>::needDestroyInput() {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
typedef typename ModelIO<Nd>::HolderType HolderType;
|
||||
|
||||
this->holder = boost::apply_visitor(
|
||||
overload(
|
||||
[&ctx](auto *v) {
|
||||
ctx.print("Pass through");
|
||||
return HolderType(v);
|
||||
},
|
||||
[&](CArrayRef const *v) {
|
||||
ctx.print("Protect complex");
|
||||
auto p = this->mgr->allocate_ptr_complex_array();
|
||||
fwrap(*p) = fwrap(*v);
|
||||
auto h = HolderType(&p->get_array());
|
||||
this->hold_original = std::move(p);
|
||||
return h;
|
||||
},
|
||||
[&](ArrayRef const *v) {
|
||||
ctx.print("Protect real");
|
||||
auto p = this->mgr->allocate_ptr_array();
|
||||
fwrap(*p) = fwrap(*v);
|
||||
auto h = HolderType(&p->get_array());
|
||||
this->hold_original = std::move(p);
|
||||
return h;
|
||||
}),
|
||||
this->holder);
|
||||
}
|
||||
|
||||
template <size_t Nd, typename Super>
|
||||
void ModelInputBase<Nd, Super>::transformInputRealToFourier() {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
|
||||
Console::instance().c_assert(
|
||||
!this->uninitialized, "Model must be initialized");
|
||||
if (!this->tmp_fourier) {
|
||||
this->tmp_fourier = this->mgr->allocate_ptr_complex_array();
|
||||
}
|
||||
auto volatile_real = this->mgr->allocate_array();
|
||||
|
||||
auto plan = this->mgr->create_r2c_plan(
|
||||
volatile_real.get_array().data(), this->tmp_fourier->get_array().data());
|
||||
|
||||
needDestroyInput();
|
||||
try {
|
||||
this->mgr->execute_r2c(
|
||||
plan, boost::get<ArrayRef *>(this->holder)->data(),
|
||||
this->tmp_fourier->get_array().data());
|
||||
} catch (boost::bad_get const &e) {
|
||||
error_helper<ErrorBadState>(
|
||||
"Runtime error thrown: " + std::string(e.what()));
|
||||
}
|
||||
this->mgr->destroy_plan(plan);
|
||||
|
||||
if (scaler != 1) {
|
||||
ctx.format(" -> Scaler %g", scaler);
|
||||
auto w_c = fwrap(this->tmp_fourier->get_array());
|
||||
w_c = w_c * scaler;
|
||||
}
|
||||
|
||||
//this->hermitic_fixer(this->tmp_fourier->get_array());
|
||||
}
|
||||
|
||||
template <size_t Nd, typename Super>
|
||||
void ModelInputBase<Nd, Super>::transformInputFourierToReal() {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
|
||||
Console::instance().c_assert(
|
||||
!this->uninitialized, "Model must be initialized");
|
||||
if (!this->tmp_real) {
|
||||
this->tmp_real = this->mgr->allocate_ptr_array();
|
||||
}
|
||||
auto volatile_fourier = this->mgr->allocate_complex_array();
|
||||
|
||||
auto plan = this->mgr->create_c2r_plan(
|
||||
volatile_fourier.get_array().data(), this->tmp_real->get_array().data());
|
||||
|
||||
needDestroyInput();
|
||||
try {
|
||||
this->mgr->execute_c2r(
|
||||
plan, boost::get<CArrayRef *>(this->holder)->data(),
|
||||
this->tmp_real->get_array().data());
|
||||
} catch (boost::bad_get const &e) {
|
||||
error_helper<ErrorBadState>(
|
||||
"Runtime error thrown: " + std::string(e.what()));
|
||||
}
|
||||
|
||||
this->mgr->destroy_plan(plan);
|
||||
|
||||
if (scaler != 1) {
|
||||
ctx.format(" -> Scaler %g", scaler);
|
||||
auto w_r = fwrap(this->tmp_real->get_array());
|
||||
w_r = w_r * scaler;
|
||||
}
|
||||
}
|
||||
|
||||
template <size_t Nd, typename Super>
|
||||
void ModelOutputBase<Nd, Super>::transformOutputFourierToReal() {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
|
||||
Console::instance().c_assert(
|
||||
!this->uninitialized, "Model must be initialized");
|
||||
auto volatile_fourier = this->mgr->allocate_ptr_complex_array();
|
||||
|
||||
try {
|
||||
auto &array = *boost::get<ArrayRef *>(this->holder);
|
||||
auto plan = this->mgr->create_c2r_plan(
|
||||
volatile_fourier->get_array().data(), array.data());
|
||||
|
||||
this->mgr->execute_c2r(
|
||||
plan, this->tmp_fourier->get_array().data(), array.data());
|
||||
this->mgr->destroy_plan(plan);
|
||||
|
||||
if (scaler != 1) {
|
||||
ctx.format(" -> Scaler %g", scaler);
|
||||
auto w_h = fwrap(array);
|
||||
w_h = w_h * scaler;
|
||||
}
|
||||
} catch (boost::bad_get const &e) {
|
||||
error_helper<ErrorBadState>(
|
||||
"Runtime error thrown: " + std::string(e.what()));
|
||||
}
|
||||
}
|
||||
|
||||
template <size_t Nd, typename Super>
|
||||
void ModelOutputBase<Nd, Super>::transformOutputRealToFourier() {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
|
||||
Console::instance().c_assert(
|
||||
!this->uninitialized, "Model must be initialized");
|
||||
auto volatile_real = this->mgr->allocate_ptr_array();
|
||||
try {
|
||||
auto &c_array = *boost::get<CArrayRef *>(this->holder);
|
||||
auto plan = this->mgr->create_r2c_plan(
|
||||
volatile_real->get_array().data(), c_array.data());
|
||||
|
||||
this->mgr->execute_r2c(
|
||||
plan, this->tmp_real->get_array().data(), c_array.data());
|
||||
|
||||
this->mgr->destroy_plan(plan);
|
||||
|
||||
if (scaler != 1) {
|
||||
ctx.format(" -> Scaler %g", scaler);
|
||||
auto w_ch = fwrap(c_array);
|
||||
w_ch = w_ch * scaler;
|
||||
}
|
||||
} catch (boost::bad_get const &e) {
|
||||
error_helper<ErrorBadState>(
|
||||
"Runtime error thrown: " + std::string(e.what()));
|
||||
}
|
||||
}
|
||||
|
||||
template <size_t Nd, typename Super>
|
||||
void ModelOutputBase<Nd, Super>::triggerTransform() {
|
||||
if (this->alreadyTransformed || this->uninitialized)
|
||||
return;
|
||||
if (this->ioIsTransformed) {
|
||||
switch (this->current) {
|
||||
case PREFERRED_FOURIER:
|
||||
transformOutputRealToFourier();
|
||||
break;
|
||||
case PREFERRED_REAL:
|
||||
transformOutputFourierToReal();
|
||||
break;
|
||||
default:
|
||||
Console::instance().c_assert(false, "Invalid IO");
|
||||
break;
|
||||
}
|
||||
}
|
||||
alreadyTransformed = true;
|
||||
}
|
||||
|
||||
template <size_t Nd, typename Super>
|
||||
ModelOutputBase<Nd, Super>::~ModelOutputBase() {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
|
||||
triggerTransform();
|
||||
}
|
||||
|
||||
template <size_t Nd, typename Super>
|
||||
void ModelOutputBase<Nd, Super>::setRequestedIO(PreferredIO opt) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
Console::instance().c_assert(
|
||||
!this->uninitialized, "Model must be initialized");
|
||||
this->active = opt;
|
||||
if (opt == this->current)
|
||||
return;
|
||||
Console::instance().c_assert(
|
||||
!this->ioIsTransformed, "Transformation already requested.");
|
||||
switch (this->current) {
|
||||
case PREFERRED_FOURIER:
|
||||
// then opt is REAL
|
||||
ctx.print("Want real ");
|
||||
this->tmp_real = this->mgr->allocate_ptr_array();
|
||||
break;
|
||||
case PREFERRED_REAL:
|
||||
ctx.print("Want fourier");
|
||||
this->tmp_fourier = this->mgr->allocate_ptr_complex_array();
|
||||
break;
|
||||
default:
|
||||
Console::instance().c_assert(false, "Invalid IO");
|
||||
break;
|
||||
}
|
||||
this->ioIsTransformed = true;
|
||||
}
|
||||
|
||||
template <size_t Nd, typename Super>
|
||||
void ModelOutputBase<Nd, Super>::copyFrom(ModelOutputBase<Nd, Super> &other) {
|
||||
Console::instance().c_assert(
|
||||
!this->uninitialized, "Model must be initialized");
|
||||
//other.triggerTransform();
|
||||
Console::instance().c_assert(
|
||||
this->active == other.active,
|
||||
"this->active and other.active are different");
|
||||
switch (other.active) {
|
||||
case PREFERRED_FOURIER:
|
||||
fwrap(this->getFourierOutput()) = other.getFourierOutput();
|
||||
break;
|
||||
case PREFERRED_REAL:
|
||||
fwrap(this->getRealOutput()) = other.getRealOutput();
|
||||
break;
|
||||
default:
|
||||
Console::instance().c_assert(false, "Invalid IO");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
template <size_t Nd, typename Super>
|
||||
ModelInput<Nd> ModelInput<Nd, Super>::shallowClone() {
|
||||
return boost::apply_visitor(
|
||||
[this](auto const *v) {
|
||||
return ModelInput<Nd>(
|
||||
this->mgr, this->box, *v, this->hold_original, true, this->scaler);
|
||||
},
|
||||
this->holder);
|
||||
}
|
||||
|
||||
template <size_t Nd, typename Super>
|
||||
ModelInputAdjoint<Nd> ModelInputAdjoint<Nd, Super>::shallowClone() {
|
||||
return boost::apply_visitor(
|
||||
[this](auto const *v) {
|
||||
return ModelInputAdjoint<Nd>(
|
||||
this->mgr, this->box, *v, this->hold_original, true, this->scaler);
|
||||
},
|
||||
this->holder);
|
||||
}
|
||||
|
||||
template <size_t Nd, typename Super>
|
||||
ModelOutput<Nd> ModelOutput<Nd, Super>::shallowClone() {
|
||||
return boost::apply_visitor(
|
||||
overload(
|
||||
[this](auto const *v) {
|
||||
error_helper<ErrorBadState>("Output cannot be const");
|
||||
return ModelOutput<Nd>();
|
||||
},
|
||||
[this](auto *v) {
|
||||
return ModelOutput<Nd>(
|
||||
this->mgr, this->box, *v, this->hold_original);
|
||||
}),
|
||||
this->holder);
|
||||
}
|
||||
|
||||
template <size_t Nd, typename Super>
|
||||
ModelOutputAdjoint<Nd> ModelOutputAdjoint<Nd, Super>::shallowClone() {
|
||||
return boost::apply_visitor(
|
||||
overload(
|
||||
[this](auto const *v) {
|
||||
error_helper<ErrorBadState>("Output cannot be const");
|
||||
return ModelOutputAdjoint<Nd>();
|
||||
},
|
||||
[this](auto *v) {
|
||||
return ModelOutputAdjoint<Nd>(
|
||||
this->mgr, this->box, *v, this->hold_original);
|
||||
}),
|
||||
this->holder);
|
||||
}
|
||||
|
||||
template <size_t Nd, typename Super>
|
||||
ModelOutput<Nd> ModelOutput<Nd, Super>::makeTempLike() {
|
||||
return boost::apply_visitor(
|
||||
overload(
|
||||
[this](CArrayRef const *v) {
|
||||
auto tmp_p = this->mgr->allocate_ptr_complex_array();
|
||||
auto &tmp = tmp_p->get_array();
|
||||
return ModelOutput<Nd>(this->mgr, this->box, tmp, std::move(tmp_p));
|
||||
},
|
||||
[this](ArrayRef const *v) {
|
||||
auto tmp_p = this->mgr->allocate_ptr_array();
|
||||
auto &tmp = tmp_p->get_array();
|
||||
return ModelOutput<Nd>(this->mgr, this->box, tmp, std::move(tmp_p));
|
||||
}),
|
||||
this->holder);
|
||||
}
|
||||
|
||||
template <size_t Nd, typename Super>
|
||||
ModelOutputAdjoint<Nd> ModelOutputAdjoint<Nd, Super>::makeTempLike() {
|
||||
return boost::apply_visitor(
|
||||
overload(
|
||||
[this](CArrayRef const *v) {
|
||||
auto tmp_p = this->mgr->allocate_ptr_complex_array();
|
||||
auto &tmp = tmp_p->get_array();
|
||||
return ModelOutputAdjoint<Nd>(
|
||||
this->mgr, this->box, tmp, std::move(tmp_p));
|
||||
},
|
||||
[this](ArrayRef const *v) {
|
||||
auto tmp_p = this->mgr->allocate_ptr_array();
|
||||
auto &tmp = tmp_p->get_array();
|
||||
return ModelOutputAdjoint<Nd>(
|
||||
this->mgr, this->box, tmp, std::move(tmp_p));
|
||||
}),
|
||||
this->holder);
|
||||
}
|
||||
|
||||
#define FORCE_IO_SET(q) \
|
||||
template class LibLSS::detail_input::ModelInputBase<q>; \
|
||||
template class LibLSS::detail_output::ModelOutputBase<q>; \
|
||||
template class LibLSS::detail_input::ModelInput<q>; \
|
||||
template class LibLSS::detail_input::ModelInputAdjoint<q>; \
|
||||
template class LibLSS::detail_output::ModelOutput<q>; \
|
||||
template class LibLSS::detail_output::ModelOutputAdjoint<q>;
|
||||
|
||||
// 2d does not work yet because of FFTW_Manager
|
||||
//FORCE_IO_SET(2);
|
||||
FORCE_IO_SET(3);
|
||||
|
||||
// ARES TAG: authors_num = 1
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
||||
// ARES TAG: year(0) = 2020
|
41
extra/hades/libLSS/physics/model_io.hpp
Normal file
41
extra/hades/libLSS/physics/model_io.hpp
Normal file
|
@ -0,0 +1,41 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/model_io.hpp
|
||||
Copyright (C) 2019-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#pragma once
|
||||
#ifndef __LIBLSS_PHYSICS_MODELIO_HPP
|
||||
# define __LIBLSS_PHYSICS_MODELIO_HPP
|
||||
|
||||
# include <memory>
|
||||
# include "libLSS/physics/cosmo.hpp"
|
||||
# include "libLSS/mpi/generic_mpi.hpp"
|
||||
# include "libLSS/tools/mpi_fftw_helper.hpp"
|
||||
# include "libLSS/tools/errors.hpp"
|
||||
# include "libLSS/samplers/core/types_samplers.hpp"
|
||||
# include <boost/variant.hpp>
|
||||
|
||||
# include "libLSS/physics/model_io/box.hpp"
|
||||
# include "libLSS/physics/model_io/base.hpp"
|
||||
# include "libLSS/physics/model_io/input.hpp"
|
||||
# include "libLSS/physics/model_io/output.hpp"
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
using detail_input::ModelInput;
|
||||
using detail_input::ModelInputAdjoint;
|
||||
using detail_input::ModelInputBase;
|
||||
using detail_output::ModelOutput;
|
||||
using detail_output::ModelOutputAdjoint;
|
||||
using detail_output::ModelOutputBase;
|
||||
|
||||
} // namespace LibLSS
|
||||
|
||||
#endif
|
||||
// ARES TAG: authors_num = 1
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
||||
// ARES TAG: year(0) = 2019-2020
|
173
extra/hades/libLSS/physics/model_io/base.hpp
Normal file
173
extra/hades/libLSS/physics/model_io/base.hpp
Normal file
|
@ -0,0 +1,173 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/model_io/base.hpp
|
||||
Copyright (C) 2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#pragma once
|
||||
#ifndef __LIBLSS_PHYSICS_MODELIO_BASE_HPP
|
||||
# define __LIBLSS_PHYSICS_MODELIO_BASE_HPP
|
||||
# include "libLSS/samplers/core/types_samplers.hpp"
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
enum PreferredIO { PREFERRED_NONE, PREFERRED_FOURIER, PREFERRED_REAL };
|
||||
|
||||
namespace detail_model {
|
||||
|
||||
template <typename T>
|
||||
struct _getPreferredType {};
|
||||
|
||||
template <>
|
||||
struct _getPreferredType<CArrayType::RefArrayType> {
|
||||
constexpr static const auto value = PREFERRED_FOURIER;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct _getPreferredType<CArrayType::ArrayType> {
|
||||
constexpr static const auto value = PREFERRED_FOURIER;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct _getPreferredType<ArrayType::RefArrayType> {
|
||||
constexpr static const auto value = PREFERRED_REAL;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct _getPreferredType<ArrayType::ArrayType> {
|
||||
constexpr static const auto value = PREFERRED_REAL;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
using getPreferredType = _getPreferredType<std::remove_cv_t<T>>;
|
||||
|
||||
/**
|
||||
* @brief Holder to capture different basis of representation of BORG vectors
|
||||
*/
|
||||
template <size_t Nd>
|
||||
struct ModelIO {
|
||||
protected:
|
||||
public:
|
||||
enum { Ndims = Nd };
|
||||
typedef ArrayType::ArrayType Array;
|
||||
typedef ArrayType::RefArrayType ArrayRef;
|
||||
typedef CArrayType::ArrayType CArray;
|
||||
typedef CArrayType::RefArrayType CArrayRef;
|
||||
typedef NBoxModel<Nd> BoxModelIO;
|
||||
typedef std::shared_ptr<void> Holder;
|
||||
|
||||
typedef FFTW_Manager<double, Nd> Mgr;
|
||||
typedef std::shared_ptr<Mgr> Mgr_p;
|
||||
typedef boost::variant<
|
||||
CArrayRef *, CArrayRef const *, ArrayRef *, ArrayRef const *>
|
||||
HolderType;
|
||||
|
||||
Mgr_p mgr;
|
||||
BoxModelIO box;
|
||||
PreferredIO current, active;
|
||||
HolderType holder;
|
||||
enum Direction { INPUT, OUTPUT };
|
||||
|
||||
typedef typename Mgr::U_ArrayReal U_ArrayReal;
|
||||
typedef typename Mgr::U_ArrayFourier U_ArrayFourier;
|
||||
|
||||
std::unique_ptr<U_ArrayReal> tmp_real;
|
||||
std::unique_ptr<U_ArrayFourier> tmp_fourier;
|
||||
|
||||
Direction direction;
|
||||
bool ioIsTransformed;
|
||||
bool uninitialized;
|
||||
|
||||
Holder hold_original;
|
||||
|
||||
/**
|
||||
* @brief Construct a new uninitialized Model IO object
|
||||
*
|
||||
*/
|
||||
ModelIO() : uninitialized(true), current(PREFERRED_NONE), active(PREFERRED_NONE) {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new Model IO object, with Fourier default representation
|
||||
*
|
||||
* @param t
|
||||
*/
|
||||
template <typename T>
|
||||
ModelIO(Mgr_p mgr_, BoxModelIO const &box_, T &t, Holder original)
|
||||
: mgr(mgr_), box(box_), holder(&t),
|
||||
current(getPreferredType<T>::value), ioIsTransformed(false),
|
||||
uninitialized(false), hold_original(original) {
|
||||
active = current;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Construct a new Model IO object, with Fourier default representation
|
||||
*
|
||||
* @param t
|
||||
*/
|
||||
template <typename T>
|
||||
ModelIO(Mgr_p mgr_, BoxModelIO const &box_, T &t)
|
||||
: mgr(mgr_), box(box_), holder(&t),
|
||||
current(getPreferredType<T>::value), ioIsTransformed(false),
|
||||
uninitialized(false) {
|
||||
active = current;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Obtain the primary IO buffer from the callee.
|
||||
* It is a variant type.
|
||||
*/
|
||||
HolderType getHolder() { return holder; }
|
||||
|
||||
/**
|
||||
* @brief Clear the associated memory.
|
||||
*
|
||||
* WARNING: use of the object beyond this point is not advised.
|
||||
*/
|
||||
void clear() { hold_original.reset(); }
|
||||
|
||||
/**
|
||||
* @brief bool operator to check whether the IO object is empty/uninitialized.
|
||||
*
|
||||
* @return bool true if initialized, false otherwise.
|
||||
*/
|
||||
operator bool() const { return !uninitialized; }
|
||||
|
||||
protected:
|
||||
/**
|
||||
* @brief Transfer ownership of the IO
|
||||
*
|
||||
* @param other
|
||||
* @return ModelIO&& Return *this
|
||||
*/
|
||||
void transfer(ModelIO &&other) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
mgr = std::move(other.mgr);
|
||||
box = other.box;
|
||||
current = other.current;
|
||||
holder = std::move(other.holder);
|
||||
tmp_real = std::move(other.tmp_real);
|
||||
tmp_fourier = std::move(other.tmp_fourier);
|
||||
direction = other.direction;
|
||||
ioIsTransformed = other.ioIsTransformed;
|
||||
uninitialized = other.uninitialized;
|
||||
other.uninitialized = true;
|
||||
hold_original = std::move(other.hold_original);
|
||||
active = other.active;
|
||||
}
|
||||
|
||||
void hermitic_fixer(CArrayRef &data);
|
||||
};
|
||||
} // namespace detail_model
|
||||
|
||||
using detail_model::ModelIO;
|
||||
|
||||
} // namespace LibLSS
|
||||
|
||||
#endif
|
||||
// ARES TAG: num_authors = 1
|
||||
// ARES TAG: author(0) = Guilhem Lavaux
|
||||
// ARES TAG: year(0) = 2020
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
83
extra/hades/libLSS/physics/model_io/box.hpp
Normal file
83
extra/hades/libLSS/physics/model_io/box.hpp
Normal file
|
@ -0,0 +1,83 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/model_io/box.hpp
|
||||
Copyright (C) 2019-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#pragma once
|
||||
#ifndef __LIBLSS_PHYSICS_MODELIO_BOX_HPP
|
||||
# define __LIBLSS_PHYSICS_MODELIO_BOX_HPP
|
||||
|
||||
# include <memory>
|
||||
# include "libLSS/physics/cosmo.hpp"
|
||||
# include "libLSS/mpi/generic_mpi.hpp"
|
||||
# include "libLSS/tools/mpi_fftw_helper.hpp"
|
||||
# include "libLSS/tools/errors.hpp"
|
||||
# include "libLSS/samplers/core/types_samplers.hpp"
|
||||
# include <boost/variant.hpp>
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
template <size_t Nd>
|
||||
struct NBoxModel {};
|
||||
|
||||
template <>
|
||||
struct NBoxModel<2> {
|
||||
double xmin0, xmin1;
|
||||
double L0, L1;
|
||||
long N0, N1;
|
||||
|
||||
void fill(std::array<ssize_t, 2> &N) const {
|
||||
N[0] = N0;
|
||||
N[1] = N1;
|
||||
}
|
||||
|
||||
double volume() const { return L0 * L1; }
|
||||
long numElements() const { return N0 * N1; }
|
||||
|
||||
bool operator==(NBoxModel<2> const &other) const {
|
||||
return xmin0 == other.xmin0 && xmin1 == other.xmin1 && L0 == other.L0 &&
|
||||
L1 == other.L1 && N0 == other.N0 && N1 == other.N1;
|
||||
}
|
||||
bool operator!=(NBoxModel<2> const &other) const {
|
||||
return !operator==(other);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct NBoxModel<3> {
|
||||
double xmin0, xmin1, xmin2;
|
||||
double L0, L1, L2;
|
||||
long N0, N1, N2;
|
||||
|
||||
void fill(std::array<ssize_t, 3> &N) const {
|
||||
N[0] = N0;
|
||||
N[1] = N1;
|
||||
N[2] = N2;
|
||||
}
|
||||
|
||||
double volume() const { return L0 * L1 * L2; }
|
||||
long numElements() const { return N0 * N1 * N2; }
|
||||
|
||||
bool operator==(NBoxModel<3> const &other) const {
|
||||
return xmin0 == other.xmin0 && xmin1 == other.xmin1 &&
|
||||
xmin2 == other.xmin2 && L0 == other.L0 && L1 == other.L1 &&
|
||||
L2 == other.L2 && N0 == other.N0 && N1 == other.N1 &&
|
||||
N2 == other.N2;
|
||||
}
|
||||
|
||||
bool operator!=(NBoxModel<3> const &other) const {
|
||||
return !operator==(other);
|
||||
}
|
||||
};
|
||||
|
||||
typedef NBoxModel<3> BoxModel;
|
||||
|
||||
} // namespace LibLSS
|
||||
#endif
|
||||
// ARES TAG: authors_num = 1
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
||||
// ARES TAG: year(0) = 2019-2020
|
233
extra/hades/libLSS/physics/model_io/input.hpp
Normal file
233
extra/hades/libLSS/physics/model_io/input.hpp
Normal file
|
@ -0,0 +1,233 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/model_io/input.hpp
|
||||
Copyright (C) 2019-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
namespace LibLSS {
|
||||
|
||||
namespace detail_input {
|
||||
template <typename Ref>
|
||||
struct ref_retriever {
|
||||
|
||||
template <typename U>
|
||||
using detect = typename std::enable_if<
|
||||
std::is_same<U *, Ref *>::value ||
|
||||
std::is_same<U *, Ref const *>::value,
|
||||
Ref const *>::type;
|
||||
template <typename U>
|
||||
using not_detect = typename std::enable_if<
|
||||
!(std::is_same<U *, Ref *>::value ||
|
||||
std::is_same<U *, Ref const *>::value),
|
||||
Ref const *>::type;
|
||||
|
||||
template <typename U>
|
||||
detect<U> operator()(U *u) {
|
||||
return u;
|
||||
}
|
||||
template <typename U>
|
||||
not_detect<U> operator()(U *u) {
|
||||
throw boost::bad_get();
|
||||
}
|
||||
};
|
||||
|
||||
template <size_t Nd>
|
||||
using BoxModelIO = NBoxModel<Nd>;
|
||||
|
||||
template <typename T, size_t Nd>
|
||||
struct _normalization {};
|
||||
|
||||
template <size_t Nd>
|
||||
struct _normalization<typename ModelIO<Nd>::CArrayRef, Nd> {
|
||||
static double fwd(BoxModelIO<Nd> const &b) { return 1.0 / b.volume(); }
|
||||
static double adj(BoxModelIO<Nd> const &b) {
|
||||
return b.volume() / b.numElements();
|
||||
}
|
||||
};
|
||||
|
||||
template <size_t Nd>
|
||||
struct _normalization<typename ModelIO<Nd>::CArray, Nd>
|
||||
: _normalization<typename ModelIO<Nd>::CArrayRef, Nd> {};
|
||||
|
||||
template <size_t Nd>
|
||||
struct _normalization<typename ModelIO<Nd>::ArrayRef, Nd> {
|
||||
static double fwd(BoxModelIO<Nd> const &b) {
|
||||
return b.volume() / b.numElements();
|
||||
}
|
||||
static double adj(BoxModelIO<Nd> const &b) { return 1.0 / b.volume(); }
|
||||
};
|
||||
|
||||
template <size_t Nd>
|
||||
struct _normalization<typename ModelIO<Nd>::Array, Nd>
|
||||
: _normalization<typename ModelIO<Nd>::ArrayRef, Nd> {};
|
||||
|
||||
template <typename T, size_t Nd>
|
||||
using normalization = _normalization<std::remove_cv_t<T>, Nd>;
|
||||
|
||||
template <size_t Nd, typename Super = ModelIO<Nd>>
|
||||
class ModelInputBase : public Super {
|
||||
protected:
|
||||
typedef Super super_t;
|
||||
typedef typename super_t::Mgr_p Mgr_p;
|
||||
typedef typename super_t::CArrayRef CArrayRef;
|
||||
typedef typename super_t::ArrayRef ArrayRef;
|
||||
typedef typename super_t::BoxModelIO BoxModelIO;
|
||||
typedef typename super_t::Holder Holder;
|
||||
double scaler;
|
||||
bool protect;
|
||||
|
||||
public:
|
||||
template <typename T>
|
||||
static inline typename std::add_const<T>::type &rdonly(T &t) {
|
||||
return t;
|
||||
}
|
||||
|
||||
ModelInputBase() : super_t() {}
|
||||
|
||||
template <typename T>
|
||||
ModelInputBase(
|
||||
Mgr_p mgr_, BoxModelIO const &box_, T &t, Holder original_,
|
||||
double scaler_)
|
||||
: super_t(mgr_, box_, t, original_), scaler(scaler_) {}
|
||||
|
||||
template <typename T>
|
||||
ModelInputBase(Mgr_p mgr_, BoxModelIO const &box_, T &t, double scaler_)
|
||||
: super_t(mgr_, box_, t), scaler(scaler_) {}
|
||||
|
||||
void needDestroyInput();
|
||||
void setRequestedIO(PreferredIO opt);
|
||||
|
||||
ArrayRef const &getRealConst() const {
|
||||
if (this->ioIsTransformed)
|
||||
return this->tmp_real->get_array();
|
||||
return *boost::apply_visitor(ref_retriever<ArrayRef>(), this->holder);
|
||||
}
|
||||
|
||||
CArrayRef const &getFourierConst() const {
|
||||
if (this->ioIsTransformed)
|
||||
return this->tmp_fourier->get_array();
|
||||
return *boost::apply_visitor(ref_retriever<CArrayRef>(), this->holder);
|
||||
}
|
||||
|
||||
ArrayRef &getReal() {
|
||||
if (this->ioIsTransformed)
|
||||
return this->tmp_real->get_array();
|
||||
try {
|
||||
return *boost::get<ArrayRef *>(this->holder);
|
||||
} catch (boost::bad_get const &e) {
|
||||
error_helper<ErrorBadState>(
|
||||
"Invalid RW access to input: " + std::string(e.what()));
|
||||
}
|
||||
}
|
||||
|
||||
CArrayRef &getFourier() {
|
||||
if (this->ioIsTransformed)
|
||||
return this->tmp_fourier->get_array();
|
||||
try {
|
||||
return *boost::get<CArrayRef *>(this->holder);
|
||||
} catch (boost::bad_get const &e) {
|
||||
error_helper<ErrorBadState>(
|
||||
"Invalid RW access to input: " + std::string(e.what()));
|
||||
}
|
||||
}
|
||||
|
||||
void transformInputRealToFourier();
|
||||
void transformInputFourierToReal();
|
||||
|
||||
double scaleFactor() const { return scaler; }
|
||||
|
||||
super_t &operator=(super_t const &) = delete;
|
||||
|
||||
protected:
|
||||
void transfer(ModelInputBase<Nd> &&other) {
|
||||
super_t::transfer(std::move(other));
|
||||
scaler = other.scaler;
|
||||
}
|
||||
};
|
||||
|
||||
template <size_t Nd, typename Super = ModelInputBase<Nd>>
|
||||
class ModelInput : public Super {
|
||||
protected:
|
||||
typedef Super super_t;
|
||||
typedef typename super_t::Mgr_p Mgr_p;
|
||||
typedef typename super_t::CArrayRef CArrayRef;
|
||||
typedef typename super_t::ArrayRef ArrayRef;
|
||||
typedef typename super_t::BoxModelIO BoxModelIO;
|
||||
typedef typename super_t::Holder Holder;
|
||||
|
||||
public:
|
||||
ModelInput() : super_t() {}
|
||||
ModelInput(ModelInput<Nd> &&other) { this->operator=(std::move(other)); }
|
||||
|
||||
template <typename T>
|
||||
ModelInput(
|
||||
Mgr_p mgr_, BoxModelIO const &box_, T &t, Holder original_,
|
||||
bool prenormed = false, double scale = 1.0)
|
||||
: super_t(
|
||||
mgr_, box_, t, original_,
|
||||
scale * (prenormed ? 1 : normalization<T, Nd>::fwd(box_))) {}
|
||||
|
||||
template <typename T>
|
||||
ModelInput(
|
||||
Mgr_p mgr_, BoxModelIO const &box_, T &t, bool prenormed = false,
|
||||
double scale = 1.0)
|
||||
: super_t(
|
||||
mgr_, box_, t,
|
||||
scale * (prenormed ? 1 : normalization<T, Nd>::fwd(box_))) {}
|
||||
|
||||
ModelInput &&operator=(ModelInput &&other) {
|
||||
super_t::transfer(std::move(other));
|
||||
return std::move(*this);
|
||||
}
|
||||
|
||||
ModelInput<Nd> shallowClone();
|
||||
};
|
||||
|
||||
template <size_t Nd, typename Super = ModelInputBase<Nd>>
|
||||
class ModelInputAdjoint : public Super {
|
||||
protected:
|
||||
typedef Super super_t;
|
||||
typedef typename super_t::Mgr_p Mgr_p;
|
||||
typedef typename super_t::CArrayRef CArrayRef;
|
||||
typedef typename super_t::ArrayRef ArrayRef;
|
||||
typedef typename super_t::BoxModelIO BoxModelIO;
|
||||
typedef typename super_t::Holder Holder;
|
||||
|
||||
public:
|
||||
ModelInputAdjoint() : super_t() {}
|
||||
ModelInputAdjoint(ModelInputAdjoint<Nd> &&other) {
|
||||
this->operator=(std::move(other));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
ModelInputAdjoint(
|
||||
Mgr_p mgr_, BoxModelIO const &box_, T &t, bool prenormed = false,
|
||||
double scale = 1.0)
|
||||
: super_t(
|
||||
mgr_, box_, t,
|
||||
scale * (prenormed ? 1 : normalization<T, Nd>::adj(box_))) {}
|
||||
|
||||
template <typename T>
|
||||
ModelInputAdjoint(
|
||||
Mgr_p mgr_, BoxModelIO const &box_, T &t, Holder original_,
|
||||
bool prenormed = false, double scale = 1.0)
|
||||
: super_t(
|
||||
mgr_, box_, t, original_,
|
||||
scale * (prenormed ? 1 : normalization<T, Nd>::adj(box_))) {}
|
||||
|
||||
ModelInputAdjoint<Nd> &&operator=(ModelInputAdjoint<Nd> &&other) {
|
||||
super_t::transfer(std::move(other));
|
||||
return std::move(*this);
|
||||
}
|
||||
|
||||
ModelInputAdjoint<Nd> shallowClone();
|
||||
};
|
||||
} // namespace detail_input
|
||||
} // namespace LibLSS
|
||||
|
||||
// ARES TAG: authors_num = 1
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
||||
// ARES TAG: year(0) = 2019-2020
|
261
extra/hades/libLSS/physics/model_io/output.hpp
Normal file
261
extra/hades/libLSS/physics/model_io/output.hpp
Normal file
|
@ -0,0 +1,261 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/model_io/output.hpp
|
||||
Copyright (C) 2019-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
namespace LibLSS {
|
||||
namespace detail_output {
|
||||
template <size_t Nd>
|
||||
using BoxModelIO = NBoxModel<Nd>;
|
||||
|
||||
template <typename T, size_t Nd>
|
||||
struct _normalization;
|
||||
|
||||
template <size_t Nd>
|
||||
struct _normalization<typename ModelIO<Nd>::CArrayRef, Nd> {
|
||||
static double fwd(BoxModelIO<Nd> const &b) {
|
||||
return b.volume() / b.numElements();
|
||||
}
|
||||
static double adj(BoxModelIO<Nd> const &b) { return 1 / b.volume(); }
|
||||
};
|
||||
|
||||
template <size_t Nd>
|
||||
struct _normalization<typename ModelIO<Nd>::CArray, Nd>
|
||||
: _normalization<typename ModelIO<Nd>::CArrayRef, Nd> {};
|
||||
|
||||
template <size_t Nd>
|
||||
struct _normalization<typename ModelIO<Nd>::ArrayRef, Nd> {
|
||||
static double fwd(BoxModelIO<Nd> const &b) { return 1 / b.volume(); }
|
||||
static double adj(BoxModelIO<Nd> const &b) {
|
||||
return b.volume() / b.numElements();
|
||||
}
|
||||
};
|
||||
|
||||
template <size_t Nd>
|
||||
struct _normalization<typename ModelIO<Nd>::Array, Nd>
|
||||
: _normalization<typename ModelIO<Nd>::ArrayRef, Nd> {};
|
||||
|
||||
template <typename T, size_t Nd>
|
||||
using normalization = _normalization<
|
||||
typename std::remove_const<
|
||||
typename std::remove_reference<T>::type>::type,
|
||||
Nd>;
|
||||
|
||||
template <size_t Nd, typename Super = ModelIO<Nd>>
|
||||
class ModelOutputBase : public Super {
|
||||
protected:
|
||||
typedef Super super_t;
|
||||
typedef typename super_t::Mgr_p Mgr_p;
|
||||
typedef typename super_t::CArrayRef CArrayRef;
|
||||
typedef typename super_t::ArrayRef ArrayRef;
|
||||
typedef typename super_t::BoxModelIO BoxModelIO;
|
||||
typedef typename super_t::Holder Holder;
|
||||
double scaler;
|
||||
bool alreadyTransformed;
|
||||
|
||||
template <typename T>
|
||||
using is_not_a_const =
|
||||
typename std::enable_if<!std::is_const<T>::value, void>::type;
|
||||
|
||||
template <typename T>
|
||||
using is_a_const =
|
||||
typename std::enable_if<!std::is_const<T>::value, void>::type;
|
||||
|
||||
public:
|
||||
ModelOutputBase() : super_t(), alreadyTransformed(false) {}
|
||||
|
||||
template <typename T, typename = is_not_a_const<T>>
|
||||
ModelOutputBase(Mgr_p mgr_, BoxModel const &box_, T &t, double scaler_)
|
||||
: super_t(mgr_, box_, t), scaler(scaler_), alreadyTransformed(false) {
|
||||
}
|
||||
|
||||
template <typename T, typename = is_not_a_const<T>>
|
||||
ModelOutputBase(
|
||||
Mgr_p mgr_, BoxModel const &box_, T &t, Holder original_,
|
||||
double scaler_)
|
||||
: super_t(mgr_, box_, t, original_), scaler(scaler_),
|
||||
alreadyTransformed(false) {}
|
||||
|
||||
~ModelOutputBase();
|
||||
|
||||
void setRequestedIO(PreferredIO opt);
|
||||
|
||||
inline ArrayRef &getRealOutput() {
|
||||
if (this->active != PREFERRED_REAL)
|
||||
error_helper<ErrorBadState>("Requesting (REAL) wrong output");
|
||||
try {
|
||||
return this->ioIsTransformed ? this->tmp_real->get_array()
|
||||
: *boost::get<ArrayRef *>(this->holder);
|
||||
} catch (boost::bad_get const &e) {
|
||||
error_helper<ErrorBadState>(
|
||||
"Bad access to output: " + std::string(e.what()));
|
||||
}
|
||||
}
|
||||
|
||||
inline CArrayRef &getFourierOutput() {
|
||||
if (this->active != PREFERRED_FOURIER)
|
||||
error_helper<ErrorBadState>("Requesting (FOURIER) wrong output");
|
||||
try {
|
||||
return this->ioIsTransformed ? this->tmp_fourier->get_array()
|
||||
: *boost::get<CArrayRef *>(this->holder);
|
||||
} catch (boost::bad_get const &e) {
|
||||
error_helper<ErrorBadState>(
|
||||
"Bad access to output: " + std::string(e.what()));
|
||||
}
|
||||
}
|
||||
|
||||
void copyFrom(ModelOutputBase<Nd, Super> &other);
|
||||
|
||||
void transformOutputRealToFourier();
|
||||
void transformOutputFourierToReal();
|
||||
|
||||
protected:
|
||||
void triggerTransform();
|
||||
|
||||
void transfer(ModelOutputBase &&other) {
|
||||
scaler = other.scaler;
|
||||
super_t::transfer(std::move(other));
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Class to handle output arrays from forward models.
|
||||
*/
|
||||
template <size_t Nd, typename Super = ModelOutputBase<Nd>>
|
||||
class ModelOutput : public Super {
|
||||
protected:
|
||||
typedef Super super_t;
|
||||
typedef typename super_t::Mgr_p Mgr_p;
|
||||
typedef typename super_t::CArrayRef CArrayRef;
|
||||
typedef typename super_t::ArrayRef ArrayRef;
|
||||
typedef typename super_t::BoxModelIO BoxModelIO;
|
||||
typedef typename super_t::Holder Holder;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct an empty output.
|
||||
*
|
||||
* The object is marked as uninitialized.
|
||||
*/
|
||||
ModelOutput() : super_t() {}
|
||||
|
||||
/**
|
||||
* @brief Construct an output
|
||||
*
|
||||
* @param Mgr_p FFT manager associated
|
||||
* @param BoxModelIO Object describing the physical size of the output.
|
||||
* @param T an array that must come from a boost::multi_array/boost::multi_array_ref
|
||||
*/
|
||||
template <typename T>
|
||||
ModelOutput(Mgr_p mgr_, BoxModelIO const &box_, T &t)
|
||||
: super_t(mgr_, box_, t, normalization<T, Nd>::fwd(box_)) {}
|
||||
|
||||
/**
|
||||
* @brief Construct an output
|
||||
*
|
||||
* @param Mgr_p FFT manager associated
|
||||
* @param BoxModelIO Object describing the physical size of the output.
|
||||
* @param T an array that must come from a boost::multi_array/boost::multi_array_ref
|
||||
* @param Holder a shared_ptr object that can be used to prevent the memory from being deallocated.
|
||||
*/
|
||||
template <typename T>
|
||||
ModelOutput(Mgr_p mgr_, BoxModelIO const &box_, T &t, Holder original_)
|
||||
: super_t(mgr_, box_, t, original_, normalization<T, Nd>::fwd(box_)) {
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Move constructor
|
||||
*/
|
||||
ModelOutput(ModelOutput<Nd> &&other) {
|
||||
this->operator=(std::move(other));
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Move assignment
|
||||
*/
|
||||
ModelOutput &&operator=(ModelOutput &&other) {
|
||||
super_t::transfer(std::move(other));
|
||||
return std::move(*this);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Construct a model output with a memory backing that has the same property as the original
|
||||
*
|
||||
* The memory is allocated but *not* initialized.
|
||||
*/
|
||||
ModelOutput<Nd> makeTempLike();
|
||||
|
||||
ModelOutput<Nd> shallowClone();
|
||||
};
|
||||
|
||||
template <size_t Nd, typename Super = ModelOutputBase<Nd>>
|
||||
class ModelOutputAdjoint : public Super {
|
||||
protected:
|
||||
typedef Super super_t;
|
||||
typedef typename super_t::Mgr_p Mgr_p;
|
||||
typedef typename super_t::CArrayRef CArrayRef;
|
||||
typedef typename super_t::ArrayRef ArrayRef;
|
||||
typedef typename super_t::BoxModelIO BoxModelIO;
|
||||
typedef typename super_t::Holder Holder;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct an empty adjoint gradient output.
|
||||
*
|
||||
* The object is marked as uninitialized.
|
||||
*/
|
||||
ModelOutputAdjoint() : super_t() {}
|
||||
|
||||
/**
|
||||
* @brief Construct an adjoint gradient output
|
||||
*
|
||||
* @param Mgr_p FFT manager associated
|
||||
* @param BoxModelIO Object describing the physical size of the output.
|
||||
* @param T an array that must come from a boost::multi_array/boost::multi_array_ref
|
||||
*/
|
||||
template <typename T>
|
||||
ModelOutputAdjoint(Mgr_p mgr_, BoxModelIO const &box_, T &t)
|
||||
: super_t(mgr_, box_, t, normalization<T, Nd>::adj(box_)) {}
|
||||
|
||||
/**
|
||||
* @brief Construct an adjoint gradient output
|
||||
*
|
||||
* @param Mgr_p FFT manager associated
|
||||
* @param BoxModelIO Object describing the physical size of the output.
|
||||
* @param T an array that must come from a boost::multi_array/boost::multi_array_ref
|
||||
* @param Holder a shared_ptr object that can be used to prevent the memory from being deallocated.
|
||||
*/
|
||||
template <typename T>
|
||||
ModelOutputAdjoint(
|
||||
Mgr_p mgr_, BoxModelIO const &box_, T &t, Holder original_)
|
||||
: super_t(mgr_, box_, t, original_, normalization<T, Nd>::adj(box_)) {
|
||||
}
|
||||
|
||||
ModelOutputAdjoint(ModelOutputAdjoint &&other) {
|
||||
this->operator=(std::move(other));
|
||||
}
|
||||
|
||||
ModelOutputAdjoint &&operator=(ModelOutputAdjoint &&other) {
|
||||
super_t::transfer(std::move(other));
|
||||
return std::move(*this);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Construct a model output with a memory backing that has the same property as the original
|
||||
*
|
||||
* The memory is allocated but *not* initialized.
|
||||
*/
|
||||
ModelOutputAdjoint<Nd> makeTempLike();
|
||||
|
||||
ModelOutputAdjoint<Nd> shallowClone();
|
||||
};
|
||||
} // namespace detail_output
|
||||
} // namespace LibLSS
|
||||
|
||||
// ARES TAG: authors_num = 1
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
||||
// ARES TAG: year(0) = 2019-2020
|
190
extra/hades/libLSS/physics/sum.cpp
Normal file
190
extra/hades/libLSS/physics/sum.cpp
Normal file
|
@ -0,0 +1,190 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/sum.cpp
|
||||
Copyright (C) 2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#include <boost/multi_array.hpp>
|
||||
#include "libLSS/physics/cosmo.hpp"
|
||||
#include "libLSS/mpi/generic_mpi.hpp"
|
||||
#include "libLSS/physics/forward_model.hpp"
|
||||
#include <list>
|
||||
#include <boost/variant.hpp>
|
||||
#include "libLSS/tools/errors.hpp"
|
||||
#include "libLSS/physics/sum.hpp"
|
||||
|
||||
using namespace LibLSS;
|
||||
|
||||
SumForwardModel::SumForwardModel(MPI_Communication *comm, const BoxModel &box)
|
||||
: BORGForwardModel(comm, box) {}
|
||||
|
||||
SumForwardModel::SumForwardModel(
|
||||
MPI_Communication *comm, const BoxModel &box, const BoxModel &outbox)
|
||||
: BORGForwardModel(comm, box, outbox) {}
|
||||
|
||||
SumForwardModel::~SumForwardModel() {}
|
||||
|
||||
void SumForwardModel::forwardModel_v2(ModelInput<3> delta_init) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
|
||||
for (auto &model : model_list) {
|
||||
model->forwardModel_v2(delta_init.shallowClone());
|
||||
}
|
||||
}
|
||||
|
||||
void SumForwardModel::getDensityFinal(ModelOutput<3> output) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
|
||||
{
|
||||
auto out_io = (*model_list.begin())->getPreferredOutput();
|
||||
if (out_io != PREFERRED_NONE)
|
||||
output.setRequestedIO(out_io);
|
||||
}
|
||||
|
||||
ModelOutput<3> tmp;
|
||||
|
||||
double num = 1.0; //model_list.size();
|
||||
auto accum = [num](auto &a, auto &b) {
|
||||
fwrap(a) = fwrap(a) + (1.0 / num) * fwrap(b);
|
||||
};
|
||||
|
||||
if (output.active == PREFERRED_REAL) {
|
||||
auto tmp_p = out_mgr->allocate_ptr_array();
|
||||
auto& tmp_a = tmp_p->get_array();
|
||||
tmp = std::move(ModelOutput<3>(out_mgr, get_box_model_output(), tmp_a, std::move(tmp_p)));
|
||||
fwrap(output.getRealOutput()) = 0.0;
|
||||
} else {
|
||||
auto tmp_p = out_mgr->allocate_ptr_complex_array();
|
||||
auto& tmp_a = tmp_p->get_array();
|
||||
tmp = std::move(ModelOutput<3>(out_mgr, get_box_model_output(), tmp_a, std::move(tmp_p)));
|
||||
fwrap(output.getFourierOutput()) = 0.0;
|
||||
}
|
||||
|
||||
for (auto &model : model_list) {
|
||||
model->getDensityFinal(tmp.shallowClone());
|
||||
|
||||
switch (tmp.active) {
|
||||
case PREFERRED_REAL:
|
||||
accum(output.getRealOutput(), tmp.getRealOutput());
|
||||
break;
|
||||
case PREFERRED_FOURIER:
|
||||
accum(output.getFourierOutput(), tmp.getFourierOutput());
|
||||
break;
|
||||
default:
|
||||
error_helper<ErrorBadState>("Unknown IO type.");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SumForwardModel::adjointModel_v2(ModelInputAdjoint<3> gradient_delta) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
|
||||
for (auto &model : model_list) {
|
||||
model->adjointModel_v2(gradient_delta.shallowClone());
|
||||
}
|
||||
}
|
||||
|
||||
void SumForwardModel::getAdjointModelOutput(ModelOutputAdjoint<3> ag_output) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
|
||||
{
|
||||
auto in_io = (*model_list.begin())->getPreferredInput();
|
||||
if (in_io != PREFERRED_NONE)
|
||||
ag_output.setRequestedIO(in_io);
|
||||
}
|
||||
|
||||
ModelOutputAdjoint<3> tmp;
|
||||
double num = 1.0; //model_list.size();
|
||||
auto accum = [num](auto &a, auto &b) {
|
||||
fwrap(a) = fwrap(a) + (1.0 / num) * fwrap(b);
|
||||
};
|
||||
|
||||
if (ag_output.active == PREFERRED_REAL) {
|
||||
auto tmp_p = out_mgr->allocate_ptr_array();
|
||||
auto& tmp_a = tmp_p->get_array();
|
||||
tmp = std::move(ModelOutputAdjoint<3>(out_mgr, get_box_model_output(), tmp_a, std::move(tmp_p)));
|
||||
fwrap(ag_output.getRealOutput()) = 0.0;
|
||||
} else {
|
||||
auto tmp_p = out_mgr->allocate_ptr_complex_array();
|
||||
auto& tmp_a = tmp_p->get_array();
|
||||
tmp = std::move(ModelOutputAdjoint<3>(out_mgr, get_box_model_output(), tmp_a, std::move(tmp_p)));
|
||||
fwrap(ag_output.getFourierOutput()) = 0.0;
|
||||
}
|
||||
|
||||
for (auto &model : model_list) {
|
||||
model->getAdjointModelOutput(tmp.shallowClone());
|
||||
|
||||
switch (tmp.active) {
|
||||
case PREFERRED_REAL:
|
||||
accum(ag_output.getRealOutput(), tmp.getRealOutput());
|
||||
break;
|
||||
case PREFERRED_FOURIER:
|
||||
accum(ag_output.getFourierOutput(), tmp.getFourierOutput());
|
||||
break;
|
||||
default:
|
||||
error_helper<ErrorBadState>("Unknown IO type.");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SumForwardModel::releaseParticles() {
|
||||
// Fill up with the chain
|
||||
for (auto model : model_list) {
|
||||
model->releaseParticles();
|
||||
}
|
||||
}
|
||||
|
||||
void SumForwardModel::addModel(std::shared_ptr<BORGForwardModel> model) {
|
||||
if (get_box_model() != model->get_box_model()) {
|
||||
error_helper<ErrorParams>(
|
||||
"Invalid model configuration with IO of the chain.");
|
||||
}
|
||||
model_list.push_back(model);
|
||||
}
|
||||
|
||||
void SumForwardModel::setAdjointRequired(bool required) {
|
||||
for (auto& model : model_list) {
|
||||
model->setAdjointRequired(required);
|
||||
}
|
||||
}
|
||||
|
||||
void SumForwardModel::updateCosmo() {
|
||||
// Fill up with the chain
|
||||
for (auto& model : model_list) {
|
||||
model->setCosmoParams(cosmo_params);
|
||||
}
|
||||
}
|
||||
|
||||
void SumForwardModel::setModelParams(ModelDictionnary const ¶ms) {
|
||||
for (auto& model : model_list) {
|
||||
model->setModelParams(params);
|
||||
}
|
||||
}
|
||||
|
||||
boost::any SumForwardModel::getModelParam(std::string const& name, std::string const& param) {
|
||||
if (name == modelName) {
|
||||
return boost::any();
|
||||
}
|
||||
|
||||
for (auto& model : model_list) {
|
||||
auto ret = model->getModelParam(name, param);
|
||||
if (!ret.empty())
|
||||
return ret;
|
||||
}
|
||||
return boost::any();
|
||||
}
|
||||
|
||||
void SumForwardModel::clearAdjointGradient() {
|
||||
for (auto model : model_list) {
|
||||
model->clearAdjointGradient();
|
||||
}
|
||||
}
|
||||
|
||||
// ARES TAG: authors_num = 1
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
||||
// ARES TAG: year(0) = 2020
|
68
extra/hades/libLSS/physics/sum.hpp
Normal file
68
extra/hades/libLSS/physics/sum.hpp
Normal file
|
@ -0,0 +1,68 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/physics/sum.hpp
|
||||
Copyright (C) 2018-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2018-2019 Doogesh Kodi Ramanah <ramanah@iap.fr>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#pragma once
|
||||
#ifndef __LIBLSS_SUM_FORWARD_MODEL_HPP
|
||||
# define __LIBLSS_SUM_FORWARD_MODEL_HPP
|
||||
|
||||
# include <boost/multi_array.hpp>
|
||||
# include "libLSS/physics/cosmo.hpp"
|
||||
# include "libLSS/mpi/generic_mpi.hpp"
|
||||
# include "libLSS/physics/forward_model.hpp"
|
||||
# include <list>
|
||||
# include <boost/variant.hpp>
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
class SumForwardModel : public BORGForwardModel {
|
||||
public:
|
||||
SumForwardModel(MPI_Communication *comm, const BoxModel &box);
|
||||
|
||||
SumForwardModel(
|
||||
MPI_Communication *comm, const BoxModel &box, const BoxModel &outbox);
|
||||
|
||||
~SumForwardModel() override;
|
||||
|
||||
void forwardModel_v2(ModelInput<3> delta_init) override;
|
||||
|
||||
void getDensityFinal(ModelOutput<3> output) override;
|
||||
|
||||
void clear_chain();
|
||||
|
||||
// adjointModel auto release particles. Beware !
|
||||
|
||||
void adjointModel_v2(ModelInputAdjoint<3> gradient_delta) override;
|
||||
void getAdjointModelOutput(ModelOutputAdjoint<3> ag_output) override;
|
||||
void releaseParticles() override;
|
||||
|
||||
void addModel(std::shared_ptr<BORGForwardModel> model);
|
||||
|
||||
void setAdjointRequired(bool required) override;
|
||||
void clearAdjointGradient() override;
|
||||
|
||||
void setModelParams(ModelDictionnary const& params) override;
|
||||
|
||||
boost::any getModelParam(std::string const& name, std::string const& param) override;
|
||||
|
||||
protected:
|
||||
std::list<std::shared_ptr<BORGForwardModel>> model_list;
|
||||
|
||||
void updateCosmo() override;
|
||||
};
|
||||
}; // namespace LibLSS
|
||||
|
||||
#endif
|
||||
|
||||
// ARES TAG: authors_num = 2
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
||||
// ARES TAG: year(0) = 2018-2020
|
||||
// ARES TAG: name(1) = Doogesh Kodi Ramanah
|
||||
// ARES TAG: email(1) = ramanah@iap.fr
|
||||
// ARES TAG: year(1) = 2018-2019
|
99
extra/hades/libLSS/samplers/bias_model_params.cpp
Normal file
99
extra/hades/libLSS/samplers/bias_model_params.cpp
Normal file
|
@ -0,0 +1,99 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/samplers/bias_model_params.cpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#include "libLSS/mpi/generic_mpi.hpp"
|
||||
#include "libLSS/mcmc/global_state.hpp"
|
||||
#include "libLSS/tools/fftw_allocator.hpp"
|
||||
#include "libLSS/samplers/core/markov.hpp"
|
||||
#include "libLSS/samplers/core/random_number.hpp"
|
||||
#include "libLSS/samplers/core/types_samplers.hpp"
|
||||
#include "libLSS/physics/forward_model.hpp"
|
||||
#include <vector>
|
||||
#include "libLSS/samplers/bias_model_params.hpp"
|
||||
#include "libLSS/samplers/rgen/slice_sweep.hpp"
|
||||
|
||||
using namespace LibLSS;
|
||||
|
||||
BiasModelParamsSampler::BiasModelParamsSampler(
|
||||
MPI_Communication *comm_,
|
||||
std::shared_ptr<GridDensityLikelihoodBase<3>> likelihood_,
|
||||
std::shared_ptr<BORGForwardModel> model_, int numBias_,
|
||||
std::string const &prefix_)
|
||||
: MarkovSampler(), comm(comm_), likelihood(likelihood_), model(model_),
|
||||
numBias(numBias_), biasElement(0), prefix(prefix_) {}
|
||||
|
||||
BiasModelParamsSampler::~BiasModelParamsSampler() {}
|
||||
|
||||
void BiasModelParamsSampler::initialize(MarkovState &state) { restore(state); }
|
||||
|
||||
void BiasModelParamsSampler::restore(MarkovState &state) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
if (!model) {
|
||||
error_helper<ErrorBadState>("Model for BiasModelParams is invalid");
|
||||
}
|
||||
state.newElement(
|
||||
prefix + "model_params_bias",
|
||||
biasElement = new ArrayType1d(boost::extents[numBias]), true);
|
||||
auto default_values = boost::any_cast<LibLSS::multi_array<double, 1>>(
|
||||
model->getModelParam("bias", "biasParameters"));
|
||||
fwrap(*biasElement->array) = default_values;
|
||||
biasElement->subscribeLoaded([this]() {
|
||||
model->setModelParams({{"biasParameters", *biasElement->array}});
|
||||
});
|
||||
}
|
||||
|
||||
void BiasModelParamsSampler::sample(MarkovState &state) {
|
||||
LIBLSS_AUTO_CONTEXT(LOG_DEBUG, ctx);
|
||||
auto const &x_density = *state.get<CArrayType>("s_hat_field")->array;
|
||||
double const ares_heat = state.getScalar<double>("ares_heat");
|
||||
|
||||
RandomGen *rng = state.get<RandomGen>("random_generator");
|
||||
|
||||
if (pre_hook)
|
||||
pre_hook();
|
||||
|
||||
likelihood->updateMetaParameters(state);
|
||||
|
||||
auto &biasParams = *biasElement->array;
|
||||
|
||||
for (size_t i = 0; i < numBias; i++) {
|
||||
if (bias_to_freeze.find(i) != bias_to_freeze.end())
|
||||
continue;
|
||||
|
||||
auto tmpParams = biasParams;
|
||||
ctx.format2<LOG_VERBOSE>("Sampling bias model %d", i);
|
||||
biasParams[i] = slice_sweep_double(
|
||||
comm, rng->get(),
|
||||
[&](double x) {
|
||||
ModelDictionnary this_param_map;
|
||||
tmpParams[i] = x;
|
||||
this_param_map["biasParameters"] = tmpParams;
|
||||
try {
|
||||
model->setModelParams(this_param_map);
|
||||
} catch (outOfBoundParam const &) {
|
||||
return -std::numeric_limits<double>::infinity();
|
||||
}
|
||||
double log_L = -likelihood->logLikelihood(x_density, false);
|
||||
ctx.format2<LOG_VERBOSE>("log_L = %g", log_L);
|
||||
return ares_heat * log_L;
|
||||
},
|
||||
biasParams[i], 1.);
|
||||
|
||||
comm->broadcast_t(&biasParams[i], 1, 0);
|
||||
}
|
||||
|
||||
ModelDictionnary this_param_map;
|
||||
this_param_map["biasParameters"] = biasParams;
|
||||
model->setModelParams(this_param_map);
|
||||
likelihood->logLikelihood(x_density, false);
|
||||
likelihood->commitAuxiliaryFields(state);
|
||||
|
||||
if (post_hook)
|
||||
post_hook();
|
||||
}
|
65
extra/hades/libLSS/samplers/bias_model_params.hpp
Normal file
65
extra/hades/libLSS/samplers/bias_model_params.hpp
Normal file
|
@ -0,0 +1,65 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/samplers/bias_model_params.hpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#pragma once
|
||||
#ifndef __LIBLSS_HADES_BIAS_MODEL_PARAMS_SAMPLER_HPP
|
||||
# define __LIBLSS_HADES_BIAS_MODEL_PARAMS_SAMPLER_HPP
|
||||
|
||||
# include "libLSS/mpi/generic_mpi.hpp"
|
||||
# include "libLSS/mcmc/global_state.hpp"
|
||||
# include "libLSS/tools/fftw_allocator.hpp"
|
||||
# include "libLSS/samplers/core/markov.hpp"
|
||||
# include "libLSS/samplers/core/random_number.hpp"
|
||||
# include "libLSS/samplers/core/types_samplers.hpp"
|
||||
# include "libLSS/physics/forward_model.hpp"
|
||||
# include "libLSS/samplers/core/gridLikelihoodBase.hpp"
|
||||
# include <vector>
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
class BiasModelParamsSampler : public MarkovSampler {
|
||||
protected:
|
||||
MPI_Communication *comm;
|
||||
std::vector<std::string> paramsToSample;
|
||||
std::shared_ptr<BORGForwardModel> model;
|
||||
std::shared_ptr<GridDensityLikelihoodBase<3>> likelihood;
|
||||
ModelDictionnary init_state;
|
||||
int numBias;
|
||||
ArrayType1d *biasElement;
|
||||
std::set<int> bias_to_freeze;
|
||||
typedef std::function<void()> HookType;
|
||||
|
||||
HookType pre_hook, post_hook;
|
||||
std::string prefix;
|
||||
|
||||
public:
|
||||
BiasModelParamsSampler(
|
||||
MPI_Communication *comm_,
|
||||
std::shared_ptr<GridDensityLikelihoodBase<3>> likelihood_,
|
||||
std::shared_ptr<BORGForwardModel> model_, int numBias_,
|
||||
std::string const &prefix);
|
||||
virtual ~BiasModelParamsSampler();
|
||||
|
||||
void freezeSet(std::set<int> bias_to_freeze) {
|
||||
this->bias_to_freeze = bias_to_freeze;
|
||||
}
|
||||
|
||||
void initialize(MarkovState &state) override;
|
||||
void restore(MarkovState &state) override;
|
||||
void sample(MarkovState &state) override;
|
||||
|
||||
void setLimiterHooks(HookType pre_hook_, HookType post_hook_) {
|
||||
pre_hook = pre_hook_;
|
||||
post_hook = post_hook_;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace LibLSS
|
||||
|
||||
#endif
|
60
extra/hades/libLSS/samplers/core/generate_random_field.cpp
Normal file
60
extra/hades/libLSS/samplers/core/generate_random_field.cpp
Normal file
|
@ -0,0 +1,60 @@
|
|||
#include "libLSS/samplers/core/generate_random_field.hpp"
|
||||
#include "libLSS/samplers/core/types_samplers.hpp"
|
||||
#include "libLSS/tools/fusewrapper.hpp"
|
||||
#include "libLSS/samplers/core/random_number.hpp"
|
||||
#include "libLSS/mpi/generic_mpi.hpp"
|
||||
#include "libLSS/tools/mpi_fftw_helper.hpp"
|
||||
|
||||
using namespace LibLSS;
|
||||
|
||||
void LibLSS::generateRandomField(MPI_Communication *comm, MarkovState &state) {
|
||||
auto &s_hat = *state.get<CArrayType>("s_hat_field")->array;
|
||||
auto &s = *state.get<ArrayType>("s_field")->array;
|
||||
auto &rgen = state.get<RandomGen>("random_generator")->get();
|
||||
std::array<long, 3> N;
|
||||
std::array<double, 3> L;
|
||||
|
||||
state.getScalarArray<long, 3>("N", N);
|
||||
state.getScalarArray<double, 3>("L", L);
|
||||
|
||||
FFTW_Manager<double, 3> mgr(N[0], N[1], N[2], comm);
|
||||
double volume = array::product(L);
|
||||
double volNorm = volume / array::product(N);
|
||||
double invN3 = 1.0 / array::product(N);
|
||||
double sqrt_invN3 = std::sqrt(invN3);
|
||||
|
||||
// auto s_hat_p = base_mgr->allocate_complex_array();
|
||||
// auto &s_hat = s_hat_p.get_array();
|
||||
auto s_hat_w = fwrap(s_hat);
|
||||
auto s_w = fwrap(s);
|
||||
auto tmp_real_field_p = mgr.allocate_array();
|
||||
auto &tmp_real_field = tmp_real_field_p.get_array();
|
||||
auto tmp_complex_field_p = mgr.allocate_complex_array();
|
||||
auto &tmp_complex_field = tmp_complex_field_p.get_array();
|
||||
|
||||
auto synthesis_plan =
|
||||
mgr.create_c2r_plan(tmp_complex_field.data(), tmp_real_field.data());
|
||||
auto analysis_plan =
|
||||
mgr.create_r2c_plan(tmp_real_field.data(), tmp_complex_field.data());
|
||||
|
||||
fwrap(tmp_real_field) = rgen.gaussian(fwrap(b_fused_idx<double, 3>(
|
||||
[](size_t, size_t, size_t) -> double { return 1; },
|
||||
mgr.extents_real_strict())));
|
||||
|
||||
mgr.execute_r2c(
|
||||
analysis_plan, tmp_real_field.data(), tmp_complex_field.data());
|
||||
|
||||
// Convolve with sqrt(P(k))
|
||||
s_hat_w = fwrap(tmp_complex_field) *
|
||||
b_fused_idx<double, 3>(
|
||||
[sqrt_invN3](size_t i, size_t j, size_t k) -> double {
|
||||
return sqrt_invN3;
|
||||
});
|
||||
{
|
||||
// Get back s_field now
|
||||
fwrap(tmp_complex_field) = s_hat_w * (1 / volume);
|
||||
mgr.execute_c2r(
|
||||
synthesis_plan, tmp_complex_field.data(), tmp_real_field.data());
|
||||
}
|
||||
fwrap(s[mgr.strict_range()]) = fwrap(tmp_real_field[mgr.strict_range()]);
|
||||
}
|
13
extra/hades/libLSS/samplers/core/generate_random_field.hpp
Normal file
13
extra/hades/libLSS/samplers/core/generate_random_field.hpp
Normal file
|
@ -0,0 +1,13 @@
|
|||
#ifndef __LIBLSS_GENERATE_RANDOM_FIELD_HPP
|
||||
#define __LIBLSS_GENERATE_RANDOM_FIELD_HPP
|
||||
|
||||
#include "libLSS/mpi/generic_mpi.hpp"
|
||||
#include "libLSS/mcmc/global_state.hpp"
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
void generateRandomField(MPI_Communication *comm, MarkovState &state);
|
||||
|
||||
}
|
||||
|
||||
#endif
|
111
extra/hades/libLSS/samplers/core/gridLikelihoodBase.cpp
Normal file
111
extra/hades/libLSS/samplers/core/gridLikelihoodBase.cpp
Normal file
|
@ -0,0 +1,111 @@
|
|||
#include <functional>
|
||||
#include "libLSS/tools/console.hpp"
|
||||
#include "libLSS/samplers/core/gridLikelihoodBase.hpp"
|
||||
#include <utility>
|
||||
|
||||
using namespace LibLSS;
|
||||
|
||||
namespace ph = std::placeholders;
|
||||
|
||||
template <typename T, std::size_t Dims, std::size_t... Is>
|
||||
std::unique_ptr<FFTW_Manager<T, Dims>> makeMgr(
|
||||
MPI_Communication *comm, std::array<size_t, Dims> const &d,
|
||||
std::index_sequence<Is...>) {
|
||||
typedef FFTW_Manager<T, Dims> Mgr;
|
||||
return std::unique_ptr<Mgr>(new Mgr(d[Is]..., comm));
|
||||
}
|
||||
|
||||
template <int Dims>
|
||||
GridDensityLikelihoodBase<Dims>::GridDensityLikelihoodBase(
|
||||
MPI_Communication *comm_, GridSizes const &N_, GridLengths const &L_)
|
||||
: LikelihoodBase(), comm(comm_), N(N_), L(L_), volume(array::product(L_)) {
|
||||
LIBLSS_AUTO_CONTEXT(LOG_DEBUG, ctx);
|
||||
|
||||
// FIXME: Is that the only preserved symmetry ?
|
||||
// This one is enforced because average(s_field) == 0 at all time
|
||||
// So the gradient of the likelihood may not create any mean or risk
|
||||
// imbalance.
|
||||
// FIXME: This is only valid for 3D. It has to be generalized to N-d
|
||||
//special_cases.push_back(std::make_tuple(Index{0, 0, 0}, 0));
|
||||
special_cases.push_back(std::make_tuple(
|
||||
Index{ssize_t(N[0] / 2), ssize_t(N[1] / 2), ssize_t(N[2] / 2)}, 0));
|
||||
for (size_t i = 1; i < 7; i++) {
|
||||
auto j = Index{0, 0, 0};
|
||||
|
||||
j[0] = (i & 1) * (N[0] / 2);
|
||||
j[1] = ((i & 2) >> 1) * (N[1] / 2);
|
||||
j[2] = ((i & 4) >> 2) * (N[2] / 2);
|
||||
special_cases.push_back(std::make_tuple(j, 0.5));
|
||||
}
|
||||
|
||||
mgr = makeMgr<double, Dims>(comm_, N_, std::make_index_sequence<Dims>());
|
||||
auto tmp_real_field = mgr->allocate_array();
|
||||
auto tmp_complex_field = mgr->allocate_complex_array();
|
||||
analysis_plan = mgr->create_r2c_plan(
|
||||
tmp_real_field.get_array().data(), tmp_complex_field.get_array().data());
|
||||
}
|
||||
|
||||
template <int Dims>
|
||||
GridDensityLikelihoodBase<Dims>::~GridDensityLikelihoodBase() {}
|
||||
|
||||
template <>
|
||||
void GridDensityLikelihoodBase<3>::computeFourierSpace_GradientPsi(
|
||||
ArrayRef &real_gradient, CArrayRef &grad_array, bool accumulate,
|
||||
double scaling) {
|
||||
LIBLSS_AUTO_CONTEXT(LOG_DEBUG, ctx);
|
||||
double normalizer =
|
||||
1 /
|
||||
volume; // Normalization is like synthesis here, we consider the transpose synthesis
|
||||
auto tmp_complex_field_p = mgr->allocate_complex_array();
|
||||
auto plane_set = std::array<size_t, 2>{0, N[2] / 2};
|
||||
using boost::multi_array_types::index_range;
|
||||
|
||||
// BEWARE: real_gradient is destroyed!
|
||||
mgr->execute_r2c(
|
||||
analysis_plan, real_gradient.data(),
|
||||
tmp_complex_field_p.get_array().data());
|
||||
|
||||
auto &c_field = tmp_complex_field_p.get_array();
|
||||
auto e = fwrap(c_field) * (2 * normalizer); // Factor 2 owed to hermiticity
|
||||
auto output = fwrap(grad_array);
|
||||
size_t startN0 = mgr->startN0;
|
||||
size_t endN0 = mgr->startN0 + mgr->localN0;
|
||||
|
||||
// FIXME: Why do we do this already ?
|
||||
ctx.print(" Handle special cases");
|
||||
for (auto &h : special_cases) {
|
||||
ssize_t i0 = std::get<0>(h)[0];
|
||||
if (i0 >= startN0 && i0 < endN0)
|
||||
c_field(std::get<0>(h)) *= std::get<1>(h);
|
||||
}
|
||||
|
||||
for (auto p : plane_set) {
|
||||
size_t i0_start = std::max(startN0, N[0] / 2 + 1);
|
||||
size_t i0_end = std::min(endN0, N[0]);
|
||||
ctx.print(
|
||||
"Fix plane " + std::to_string(p) + " " + std::to_string(i0_start) +
|
||||
" => " + std::to_string(i0_end));
|
||||
if (i0_start < i0_end)
|
||||
fwrap(c_field[boost::indices[index_range(i0_start, i0_end)][index_range()]
|
||||
[p]]) = 0;
|
||||
for (auto q : {size_t(0), N[0] / 2}) {
|
||||
ctx.print("Fix plane (2) " + std::to_string(q));
|
||||
if (mgr->on_core(q)) {
|
||||
fwrap(c_field[boost::indices[q][index_range(N[1] / 2 + 1, N[1])][p]]) =
|
||||
0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (mgr->on_core(0))
|
||||
c_field[0][0][0] = 0;
|
||||
|
||||
if (accumulate)
|
||||
output = output + scaling * e;
|
||||
else
|
||||
output = e * scaling;
|
||||
}
|
||||
|
||||
namespace LibLSS {
|
||||
template class GridDensityLikelihoodBase<3>;
|
||||
}
|
117
extra/hades/libLSS/samplers/core/gridLikelihoodBase.hpp
Normal file
117
extra/hades/libLSS/samplers/core/gridLikelihoodBase.hpp
Normal file
|
@ -0,0 +1,117 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/samplers/core/gridLikelihoodBase.hpp
|
||||
Copyright (C) 2018 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#ifndef __LIBLSS_SAMPLERS_CORE_GRID_LIKELIHOOD_BASE_HPP
|
||||
# define __LIBLSS_SAMPLERS_CORE_GRID_LIKELIHOOD_BASE_HPP
|
||||
|
||||
# include <array>
|
||||
# include <tuple>
|
||||
# include <vector>
|
||||
# include "libLSS/mpi/generic_mpi.hpp"
|
||||
# include "libLSS/samplers/core/types_samplers.hpp"
|
||||
# include "libLSS/tools/mpi_fftw_helper.hpp"
|
||||
# include "libLSS/mcmc/global_state.hpp"
|
||||
# include "libLSS/physics/forward_model.hpp"
|
||||
# include "libLSS/samplers/core/likelihood.hpp"
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
template <int Dims>
|
||||
class GridDensityLikelihoodBase : virtual public LikelihoodBase {
|
||||
public:
|
||||
typedef std::array<size_t, Dims> GridSizes;
|
||||
typedef std::array<double, Dims> GridLengths;
|
||||
typedef std::array<ssize_t, Dims> Index;
|
||||
|
||||
typedef boost::multi_array_ref<double, Dims> ArrayRef;
|
||||
typedef boost::multi_array_ref<std::complex<double>, Dims> CArrayRef;
|
||||
|
||||
GridDensityLikelihoodBase(
|
||||
MPI_Communication *comm, GridSizes const &N, GridLengths const &L);
|
||||
virtual ~GridDensityLikelihoodBase();
|
||||
|
||||
void commitAuxiliaryFields(MarkovState &state) override {}
|
||||
|
||||
virtual void
|
||||
generateMockData(CArrayRef const ¶meters, MarkovState &state) = 0;
|
||||
|
||||
/*
|
||||
* This is the opposite of log likelihood, Fourier representation. The same
|
||||
* rules applies as for the real space variant. The modes must be
|
||||
* in unit of Volume as per the continuous Fourier transform.
|
||||
*/
|
||||
virtual double
|
||||
logLikelihood(CArrayRef const ¶meters, bool gradientIsNext = false) = 0;
|
||||
|
||||
/*
|
||||
* This computes the opposite of the log likelihood. If gradientIsnext is
|
||||
* true the model has to prepare itself for a gradientLikelihood call.
|
||||
* Otherwise it can free temporary memory used to compute the forward model.
|
||||
* This variant takes the image in real space representation. The input is
|
||||
* preserved as indicated by the const.
|
||||
*/
|
||||
virtual double
|
||||
logLikelihood(ArrayRef const ¶meters, bool gradientIsNext = false) = 0;
|
||||
|
||||
/*
|
||||
* This is the gradient of the logarithm of the opposite of the log
|
||||
* likelihood (Fourier representation variant).
|
||||
* The same rules applies as for the real space variant.
|
||||
*/
|
||||
virtual void gradientLikelihood(
|
||||
CArrayRef const ¶meters, CArrayRef &gradient_parameters,
|
||||
bool accumulate = false, double scaling = 1.0) = 0;
|
||||
|
||||
/*
|
||||
* This is the gradient of the opposite of the log likelihood. It
|
||||
* returns the gradient in real space representation.
|
||||
* You must have called logLikelihood with gradientIsNext=true first.
|
||||
* If accumulate is set to true, the gradient will be summed with existing values.
|
||||
* 'scaling' indicates by how much the gradient must be scaled before accumulation.
|
||||
*/
|
||||
virtual void gradientLikelihood(
|
||||
ArrayRef const ¶meters, ArrayRef &gradient_parameters,
|
||||
bool accumulate = false, double scaling = 1.0) = 0;
|
||||
|
||||
protected:
|
||||
typedef std::tuple<std::array<ssize_t, Dims>, double> SpecialType;
|
||||
typedef FFTW_Manager<double, Dims> Mgr;
|
||||
std::vector<SpecialType> special_cases;
|
||||
GridSizes N;
|
||||
GridLengths L;
|
||||
long Ncat;
|
||||
MPI_Communication *comm;
|
||||
double volume;
|
||||
std::shared_ptr<Mgr> mgr;
|
||||
typename Mgr::plan_type analysis_plan;
|
||||
|
||||
void computeFourierSpace_GradientPsi(
|
||||
ArrayRef &real_gradient, CArrayRef &grad_array, bool accumulate,
|
||||
double scaling);
|
||||
|
||||
public:
|
||||
std::shared_ptr<Mgr> getManager() { return mgr; }
|
||||
};
|
||||
|
||||
class ForwardModelBasedLikelihood : public GridDensityLikelihoodBase<3> {
|
||||
public:
|
||||
ForwardModelBasedLikelihood(
|
||||
MPI_Communication *comm, GridSizes const &N, GridLengths const &L)
|
||||
: GridDensityLikelihoodBase<3>(comm, N, L) {}
|
||||
|
||||
virtual std::shared_ptr<BORGForwardModel> getForwardModel() = 0;
|
||||
};
|
||||
|
||||
} // namespace LibLSS
|
||||
|
||||
#endif
|
||||
|
||||
// ARES TAG: authors_num = 1
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
||||
// ARES TAG: year(0) = 2018
|
47
extra/hades/libLSS/samplers/core/likelihood.hpp
Normal file
47
extra/hades/libLSS/samplers/core/likelihood.hpp
Normal file
|
@ -0,0 +1,47 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/samplers/core/likelihood.hpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#pragma once
|
||||
#ifndef __LIBLSS_CORE_LIKELIHOOD_BASE_HPP
|
||||
# define __LIBLSS_CORE_LIKELIHOOD_BASE_HPP
|
||||
|
||||
# include "libLSS/physics/cosmo.hpp"
|
||||
# include "libLSS/mcmc/global_state.hpp"
|
||||
# include "libLSS/mcmc/state_element.hpp"
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
/**
|
||||
* @brief A type to provide name parameter space mapping to the state element in the MarkovChain
|
||||
*
|
||||
*/
|
||||
typedef std::map<std::string, std::shared_ptr<StateElement>> ParameterSpace;
|
||||
|
||||
/**
|
||||
* @brief This is the fundamental likelihood class.
|
||||
*
|
||||
* It does not express log likelihood evaluation but provides entry points to initialize, obtain
|
||||
* cosmology and manipulate a MarkovState.
|
||||
*
|
||||
*/
|
||||
class LikelihoodBase {
|
||||
public:
|
||||
LikelihoodBase() = default;
|
||||
virtual ~LikelihoodBase() {}
|
||||
|
||||
virtual void initializeLikelihood(MarkovState &state) = 0;
|
||||
virtual void updateMetaParameters(MarkovState &state) = 0;
|
||||
virtual void setupDefaultParameters(MarkovState &state, int catalog) = 0;
|
||||
virtual void updateCosmology(CosmologicalParameters const ¶ms) = 0;
|
||||
virtual void commitAuxiliaryFields(MarkovState &state) = 0;
|
||||
};
|
||||
|
||||
} // namespace LibLSS
|
||||
|
||||
#endif
|
16
extra/hades/libLSS/samplers/core/simpleLikelihood.cpp
Normal file
16
extra/hades/libLSS/samplers/core/simpleLikelihood.cpp
Normal file
|
@ -0,0 +1,16 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/samplers/core/simpleLikelihood.cpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#include "libLSS/samplers/core/simpleLikelihood.hpp"
|
||||
|
||||
using namespace LibLSS;
|
||||
|
||||
SimpleLikelihood::SimpleLikelihood(LikelihoodInfo const &info) {}
|
||||
|
||||
SimpleLikelihood::~SimpleLikelihood() {}
|
74
extra/hades/libLSS/samplers/core/simpleLikelihood.hpp
Normal file
74
extra/hades/libLSS/samplers/core/simpleLikelihood.hpp
Normal file
|
@ -0,0 +1,74 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/samplers/core/simpleLikelihood.hpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#pragma once
|
||||
#ifndef __HADES_SIMPLE_LIKELIHOOD_HPP
|
||||
# define __HADES_SIMPLE_LIKELIHOOD_HPP
|
||||
|
||||
# include "libLSS/physics/likelihoods/base.hpp"
|
||||
# include "libLSS/samplers/core/types_samplers.hpp"
|
||||
# include "libLSS/samplers/core/likelihood.hpp"
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
/**
|
||||
* @brief Likelihood that has a simple structure, parallelization wise.
|
||||
*
|
||||
* It assumes that all nodes are going to obtain the same copy of the parameters
|
||||
* and produce something collectively that may depend on other parameters that
|
||||
* are local to the MPI nodes. For single task node, the difference to the GridDensityLikelihoodBase
|
||||
* is only the shape of the parameter space. However for parallel jobs it can be
|
||||
* become very different.
|
||||
*
|
||||
* We note that the proposed_params arguments are unnamed, thus the behaviour will heavily
|
||||
* depend on the semantic of those parameters. Some synchronization of the behaviour may
|
||||
* be required with the user of such a likelihood.
|
||||
*/
|
||||
class SimpleLikelihood : virtual public LikelihoodBase {
|
||||
public:
|
||||
SimpleLikelihood(LikelihoodInfo const &info);
|
||||
virtual ~SimpleLikelihood();
|
||||
|
||||
/**
|
||||
* @brief Compute the log likelihood assuming the proposed parameters.
|
||||
*
|
||||
* @param global_params a global parameter state that is not changing with the current transition kernel
|
||||
* @param proposed_params the set of parameters that are provided to get a new evaluation of the likelihood
|
||||
* @return double the result
|
||||
*/
|
||||
virtual double logLikelihoodSimple(
|
||||
ParameterSpace const &global_params,
|
||||
LibLSS::const_multi_array_ref<double, 1> const &proposed_params) = 0;
|
||||
|
||||
/**
|
||||
* @brief Compute the gradient of the log-likelihood w.r.t to the proposed_params
|
||||
*
|
||||
* @param global_params a global parameter state that is not changing with the current transition kernel
|
||||
* @param proposed_params the set of parameters that are provided to get a new evaluation of the likelihood
|
||||
* @param gradient the gradient w.r.t proposed_params
|
||||
*/
|
||||
virtual void gradientLikelihoodSimple(
|
||||
ParameterSpace const &global_params,
|
||||
LibLSS::const_multi_array_ref<double, 1> const &proposed_params,
|
||||
LibLSS::multi_array_ref<double, 1> &gradient) = 0;
|
||||
|
||||
/**
|
||||
* @brief Returns the number of dimensions required by this likelihood.
|
||||
*
|
||||
* Note: as SimpleLikelihood is global. That number must be the same on all
|
||||
* MPI nodes.
|
||||
*
|
||||
* @return unsigned int
|
||||
*/
|
||||
virtual unsigned int numDimensions() const = 0;
|
||||
};
|
||||
|
||||
} // namespace LibLSS
|
||||
|
||||
#endif
|
164
extra/hades/libLSS/samplers/core/splitLikelihood.cpp
Normal file
164
extra/hades/libLSS/samplers/core/splitLikelihood.cpp
Normal file
|
@ -0,0 +1,164 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/samplers/core/splitLikelihood.cpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#include "libLSS/tools/console.hpp"
|
||||
#include "libLSS/samplers/core/splitLikelihood.hpp"
|
||||
#include "libLSS/tools/overload.hpp"
|
||||
#include "libLSS/tools/fusewrapper.hpp"
|
||||
#include <boost/variant.hpp>
|
||||
#include <functional>
|
||||
|
||||
using namespace LibLSS;
|
||||
|
||||
SplitAndReshapeLikelihood::SplitAndReshapeLikelihood(MPI_Communication *comm_)
|
||||
: comm(comm_) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
}
|
||||
|
||||
SplitAndReshapeLikelihood::~SplitAndReshapeLikelihood() {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
}
|
||||
|
||||
void SplitAndReshapeLikelihood::initializeLikelihood(MarkovState &state) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
for (auto &l : parameterLikelihood) {
|
||||
boost::apply_visitor(
|
||||
[&state](std::shared_ptr<LikelihoodBase> &likelihood) {
|
||||
likelihood->initializeLikelihood(state);
|
||||
},
|
||||
std::get<1>(l));
|
||||
}
|
||||
}
|
||||
|
||||
void SplitAndReshapeLikelihood::updateMetaParameters(MarkovState &state) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
for (auto &l : parameterLikelihood) {
|
||||
boost::apply_visitor(
|
||||
[&state](std::shared_ptr<LikelihoodBase> &likelihood) {
|
||||
likelihood->updateMetaParameters(state);
|
||||
},
|
||||
std::get<1>(l));
|
||||
}
|
||||
}
|
||||
void SplitAndReshapeLikelihood::setupDefaultParameters(
|
||||
MarkovState &state, int catalog) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
for (auto &l : parameterLikelihood) {
|
||||
boost::apply_visitor(
|
||||
[&state, catalog](std::shared_ptr<LikelihoodBase> &likelihood) {
|
||||
likelihood->setupDefaultParameters(state, catalog);
|
||||
},
|
||||
std::get<1>(l));
|
||||
}
|
||||
}
|
||||
|
||||
void SplitAndReshapeLikelihood::updateCosmology(
|
||||
CosmologicalParameters const ¶ms) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
for (auto &l : parameterLikelihood) {
|
||||
boost::apply_visitor(
|
||||
[¶ms](std::shared_ptr<LikelihoodBase> &likelihood) {
|
||||
likelihood->updateCosmology(params);
|
||||
},
|
||||
std::get<1>(l));
|
||||
}
|
||||
}
|
||||
|
||||
void SplitAndReshapeLikelihood::commitAuxiliaryFields(MarkovState &state) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
for (auto &l : parameterLikelihood) {
|
||||
boost::apply_visitor(
|
||||
[&state](std::shared_ptr<LikelihoodBase> &likelihood) {
|
||||
likelihood->commitAuxiliaryFields(state);
|
||||
},
|
||||
std::get<1>(l));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* ---------------------------------------------------------------------
|
||||
* getTotalNumberOfParameters
|
||||
*
|
||||
* getTotalNumberOfParameters needs to implement explicitly the function
|
||||
* for each variant of likelihood.
|
||||
*/
|
||||
|
||||
static unsigned int computeNumberOfParametersGrid(
|
||||
std::shared_ptr<GridDensityLikelihoodBase<3>> grid) {
|
||||
auto mgr = grid->getManager();
|
||||
|
||||
return mgr->localN0 * mgr->N1 * mgr->N2_HC * 2;
|
||||
}
|
||||
|
||||
static unsigned int computeNumberOfParametersSimple(
|
||||
MPI_Communication *comm, std::shared_ptr<SimpleLikelihood> simple) {
|
||||
// The node with rank 0 will be in charge of generating the required
|
||||
// random numbers/proposals.
|
||||
return comm->rank() == 0 ? simple->numDimensions() : 0;
|
||||
}
|
||||
|
||||
unsigned int SplitAndReshapeLikelihood::getTotalNumberOfParameters() {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
|
||||
unsigned int total = 0;
|
||||
for (auto &l : parameterLikelihood) {
|
||||
total += boost::apply_visitor(
|
||||
overload(
|
||||
computeNumberOfParametersGrid,
|
||||
std::bind(
|
||||
&computeNumberOfParametersSimple, comm, std::placeholders::_1)),
|
||||
std::get<1>(l));
|
||||
}
|
||||
return total;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
void SplitAndReshapeLikelihood::addNamedParameter(
|
||||
std::string parameter, LikelihoodVariant likelihood) {
|
||||
parameterLikelihood.push_back(std::make_tuple(parameter, likelihood));
|
||||
}
|
||||
|
||||
/*
|
||||
* ---------------------------------------------------------------------
|
||||
* logLikelihood needs a separate treatment based on each variant
|
||||
*
|
||||
*/
|
||||
|
||||
static double logLikelihoodSimple(
|
||||
MPI_Communication *comm, ParameterSpace &global,
|
||||
std::shared_ptr<SimpleLikelihood> simple, unsigned int start,
|
||||
boost::const_multi_array_ref<double, 1> const ¶ms) {
|
||||
unsigned int num = simple->numDimensions();
|
||||
boost::multi_array<double, 1> paramsSubSet(boost::extents[num]);
|
||||
typedef boost::multi_array_types::index_range range;
|
||||
|
||||
fwrap(paramsSubSet) = fwrap(params[range(start, start + num)]);
|
||||
double loc_L = simple->logLikelihoodSimple(global, params);
|
||||
double L = 0;
|
||||
|
||||
comm->all_reduce_t(&loc_L, &L, 1, MPI_SUM);
|
||||
return L;
|
||||
}
|
||||
|
||||
double SplitAndReshapeLikelihood::logLikelihood(
|
||||
LibLSS::const_multi_array_ref<double, 1> ¶ms) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
unsigned int start = 0;
|
||||
ParameterSpace global;
|
||||
double L = 0;
|
||||
|
||||
for (auto &l : parameterLikelihood) {
|
||||
L += boost::apply_visitor(
|
||||
overload(std::bind(&logLikelihoodSimple, comm, global, start, params)),
|
||||
std::get<1>(l));
|
||||
}
|
||||
|
||||
return L;
|
||||
}
|
103
extra/hades/libLSS/samplers/core/splitLikelihood.hpp
Normal file
103
extra/hades/libLSS/samplers/core/splitLikelihood.hpp
Normal file
|
@ -0,0 +1,103 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/samplers/core/splitLikelihood.hpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#pragma once
|
||||
#ifndef __LIBLSS_HADES_SPLIT_LIKELIHOOD_HPP
|
||||
# define __LIBLSS_HADES_SPLIT_LIKELIHOOD_HPP
|
||||
|
||||
# include "libLSS/samplers/core/likelihood.hpp"
|
||||
# include "libLSS/samplers/core/gridLikelihoodBase.hpp"
|
||||
# include "libLSS/samplers/core/simpleLikelihood.hpp"
|
||||
# include <boost/variant.hpp>
|
||||
# include <list>
|
||||
# include <memory>
|
||||
# include <tuple>
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
/**
|
||||
* @brief
|
||||
*
|
||||
*/
|
||||
class SplitAndReshapeLikelihood : public LikelihoodBase {
|
||||
protected:
|
||||
typedef boost::variant<
|
||||
std::shared_ptr<GridDensityLikelihoodBase<3>>,
|
||||
std::shared_ptr<SimpleLikelihood>>
|
||||
LikelihoodVariant;
|
||||
|
||||
MPI_Communication *comm;
|
||||
std::list<std::tuple<std::string, LikelihoodVariant>> parameterLikelihood;
|
||||
|
||||
public:
|
||||
SplitAndReshapeLikelihood(MPI_Communication *comm_);
|
||||
~SplitAndReshapeLikelihood();
|
||||
|
||||
void initializeLikelihood(MarkovState &state) override;
|
||||
void updateMetaParameters(MarkovState &state) override;
|
||||
void setupDefaultParameters(MarkovState &state, int catalog) override;
|
||||
void updateCosmology(CosmologicalParameters const ¶ms) override;
|
||||
void commitAuxiliaryFields(MarkovState &state) override;
|
||||
|
||||
/**
|
||||
* @brief Add that this parameter must be evaluated with a specific log-likelihood object
|
||||
* The parameter needs to exist later in the MarkovState.
|
||||
*
|
||||
* Depending on the likelihood variant, the behaviour will be different:
|
||||
* - GridDensityLikelihoodBase<3> is for likelihoods
|
||||
* that are grid based and thus split over MPI nodes with FFTW slabbing.
|
||||
* - SimpleLikelihoodBase is for likelihoods that have their parameters identical
|
||||
* and replicated on all MPI nodes
|
||||
*
|
||||
* @param parameter
|
||||
* @param likelihood
|
||||
*/
|
||||
void addNamedParameter(std::string parameter, LikelihoodVariant likelihood);
|
||||
|
||||
/**
|
||||
* @brief Get the total number Of parameters that are resident on the
|
||||
* current MPI node.
|
||||
*
|
||||
* The sampler/optimizer will need allocate only this
|
||||
* portion.
|
||||
*
|
||||
* @return unsigned int
|
||||
*/
|
||||
unsigned int getTotalNumberOfParameters();
|
||||
|
||||
/**
|
||||
* @brief Compute the log likelihood using the provided array.
|
||||
*
|
||||
* WARNING!! The parameters provided here depends heavily on the inner structure
|
||||
* of the split likelihood parameter space. Some parameters need be replicated over MPI nodes and
|
||||
* some others not.
|
||||
*
|
||||
* @param params
|
||||
* @return double
|
||||
*/
|
||||
double logLikelihood(LibLSS::const_multi_array_ref<double, 1> ¶ms);
|
||||
|
||||
/**
|
||||
* @brief Compute the gradient of the log likelihood using the provided array to evaluate it.
|
||||
*
|
||||
* WARNING!! The parameters provided here depends heavily on the inner structure
|
||||
* of the split likelihood parameter space. Some parameters need be replicated over MPI nodes and
|
||||
* some others not.
|
||||
*
|
||||
* @param params
|
||||
* @return double
|
||||
*/
|
||||
double gradientLikelihood(
|
||||
LibLSS::const_multi_array_ref<double, 1> ¶ms,
|
||||
LibLSS::multi_array_ref<double, 1> &gradient_params);
|
||||
};
|
||||
|
||||
} // namespace LibLSS
|
||||
|
||||
#endif
|
344
extra/hades/libLSS/samplers/hades/base_likelihood.cpp
Normal file
344
extra/hades/libLSS/samplers/hades/base_likelihood.cpp
Normal file
|
@ -0,0 +1,344 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/samplers/hades/base_likelihood.cpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#include "libLSS/cconfig.h"
|
||||
#include <CosmoTool/algo.hpp>
|
||||
#include <cmath>
|
||||
#include <Eigen/Core>
|
||||
#include <boost/format.hpp>
|
||||
#include <boost/bind.hpp>
|
||||
#include <CosmoTool/fourier/fft/fftw_calls.hpp>
|
||||
#include "libLSS/samplers/core/random_number.hpp"
|
||||
#include "libLSS/tools/mpi_fftw_helper.hpp"
|
||||
#include "libLSS/tools/array_tools.hpp"
|
||||
#include "libLSS/samplers/rgen/hmc/hmc_density_sampler.hpp"
|
||||
#include "libLSS/samplers/hades/base_likelihood.hpp"
|
||||
#include "libLSS/tools/fusewrapper.hpp"
|
||||
#include "libLSS/samplers/rgen/slice_sweep.hpp"
|
||||
|
||||
using namespace LibLSS;
|
||||
using boost::extents;
|
||||
using boost::format;
|
||||
|
||||
using CosmoTool::square;
|
||||
|
||||
using boost::c_storage_order;
|
||||
typedef boost::multi_array_types::extent_range range;
|
||||
|
||||
static const int ROOT_RANK = 0;
|
||||
|
||||
namespace LI = LibLSS::Likelihood;
|
||||
|
||||
HadesBaseDensityLikelihood::HadesBaseDensityLikelihood(
|
||||
LikelihoodInfo &info, size_t numBiasParams_)
|
||||
: super_t(LI::getMPI(info), LI::gridResolution(info), LI::gridSide(info)),
|
||||
SimpleLikelihood(info), corners(LI::gridCorners(info)),
|
||||
numBiasParams(numBiasParams_), volume(array::product(L)) {}
|
||||
|
||||
HadesBaseDensityLikelihood::~HadesBaseDensityLikelihood() {}
|
||||
|
||||
void HadesBaseDensityLikelihood::updateCosmology(
|
||||
CosmologicalParameters const &cosmo_params) {
|
||||
cosmology = std::unique_ptr<Cosmology>(new Cosmology(cosmo_params));
|
||||
model->setCosmoParams(cosmo_params);
|
||||
}
|
||||
|
||||
void HadesBaseDensityLikelihood::updateMetaParameters(MarkovState &state) {
|
||||
LIBLSS_AUTO_CONTEXT(LOG_VERBOSE, ctx);
|
||||
auto cosmo_params = state.getScalar<CosmologicalParameters>("cosmology");
|
||||
|
||||
Ncat = state.getScalar<long>("NCAT");
|
||||
|
||||
//initialize model uncertainty
|
||||
model = state.get<BorgModelElement>("BORG_model")->obj;
|
||||
|
||||
ai = state.getScalar<double>("borg_a_initial");
|
||||
|
||||
// Update forward model for maybe new cosmo params
|
||||
updateCosmology(cosmo_params);
|
||||
|
||||
auto e_Ncat = boost::extents[Ncat];
|
||||
nmean.resize(e_Ncat);
|
||||
biasRef.resize(Ncat);
|
||||
data.resize(Ncat);
|
||||
sel_field.resize(Ncat);
|
||||
bias.resize(Ncat);
|
||||
|
||||
for (int c = 0; c < Ncat; c++) {
|
||||
biasRef[c] = state.getScalar<bool>(format("galaxy_bias_ref_%d") % c);
|
||||
nmean[c] = state.getScalar<double>(format("galaxy_nmean_%d") % c);
|
||||
auto &stateBias =
|
||||
*state.get<ArrayType1d>(format("galaxy_bias_%d") % c)->array;
|
||||
if (stateBias.size() < numBiasParams) {
|
||||
stateBias.resize(boost::extents[numBiasParams]);
|
||||
Console::instance().print<LOG_WARNING>("I will pick default bias parameters as the present array is too short.");
|
||||
setupDefaultParameters(state, c);
|
||||
}
|
||||
|
||||
bias[c] = std::make_unique<BiasArray>(boost::extents[stateBias.size()]);
|
||||
|
||||
fwrap(*bias[c]) = fwrap(stateBias);
|
||||
ctx.print(format(" b0=%g") % (*bias[c])[0]);
|
||||
auto data_name = str(format("galaxy_data_%d") % c);
|
||||
if (state.exists(data_name)) {
|
||||
data[c] = state.get<ArrayType>(data_name)->array;
|
||||
sel_field[c] =
|
||||
state.get<ArrayType>(format("galaxy_synthetic_sel_window_%d") % c)
|
||||
->array;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
double HadesBaseDensityLikelihood::logLikelihood(
|
||||
ArrayRef const &s_array, bool final_call) {
|
||||
LIBLSS_AUTO_CONTEXT(LOG_DEBUG, ctx);
|
||||
using CosmoTool::square;
|
||||
typedef ArrayType::ArrayType::element ElementType;
|
||||
double L = 0;
|
||||
Cosmology &cosmo = *cosmology.get();
|
||||
|
||||
ctx.print("Run forward");
|
||||
// Simulate forward model
|
||||
auto box = model->get_box_model();
|
||||
auto out_density_p = mgr->allocate_array();
|
||||
auto &out_density = out_density_p.get_array();
|
||||
model->setAdjointRequired(false);
|
||||
model->forwardModel_v2(ModelInput<3>(mgr, box, s_array));
|
||||
model->getDensityFinal(ModelOutput<3>(mgr, box, out_density));
|
||||
|
||||
L = logLikelihoodSpecific(out_density);
|
||||
comm->all_reduce_t(MPI_IN_PLACE, &L, 1, MPI_SUM);
|
||||
|
||||
return L;
|
||||
}
|
||||
|
||||
double HadesBaseDensityLikelihood::logLikelihood(
|
||||
CArrayRef const &s_array, bool final_call) {
|
||||
using CosmoTool::square;
|
||||
typedef ArrayType::ArrayType::element ElementType;
|
||||
double L = 0;
|
||||
auto &out_density = final_density_field->get_array();
|
||||
|
||||
auto box = model->get_box_model();
|
||||
model->setAdjointRequired(false);
|
||||
model->forwardModel_v2(ModelInput<3>(mgr, box, s_array));
|
||||
model->getDensityFinal(ModelOutput<3>(mgr, box, out_density));
|
||||
|
||||
L = logLikelihoodSpecific(out_density);
|
||||
comm->all_reduce_t(MPI_IN_PLACE, &L, 1, MPI_SUM);
|
||||
|
||||
return L;
|
||||
}
|
||||
|
||||
void HadesBaseDensityLikelihood::gradientLikelihood_internal(
|
||||
ModelInput<3> input_field, ModelOutputAdjoint<3> grad_array) {
|
||||
using CosmoTool::square;
|
||||
typedef ArrayType::ArrayType::element ElementType;
|
||||
double Epoisson = 0;
|
||||
|
||||
auto box = model->get_box_model();
|
||||
auto &out_density = final_density_field->get_array();
|
||||
|
||||
model->setAdjointRequired(true);
|
||||
model->forwardModel_v2(std::move(input_field));
|
||||
model->getDensityFinal(ModelOutput<3>(mgr, box, out_density));
|
||||
|
||||
auto tmp_grad_p = model->out_mgr->allocate_array();
|
||||
auto &tmp_grad = tmp_grad_p.get_array();
|
||||
gradientLikelihoodSpecific(out_density, tmp_grad);
|
||||
|
||||
model->adjointModel_v2(ModelInputAdjoint<3>(mgr, box, tmp_grad));
|
||||
model->getAdjointModelOutput(std::move(grad_array));
|
||||
}
|
||||
|
||||
unsigned int HadesBaseDensityLikelihood::numDimensions() const { return 0; }
|
||||
|
||||
double HadesBaseDensityLikelihood::logLikelihoodSimple(
|
||||
ParameterSpace const &global,
|
||||
LibLSS::const_multi_array_ref<double, 1> const ¶ms) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
void HadesBaseDensityLikelihood::gradientLikelihoodSimple(
|
||||
ParameterSpace const &global,
|
||||
LibLSS::const_multi_array_ref<double, 1> const ¶ms,
|
||||
LibLSS::multi_array_ref<double, 1> &gradient) {}
|
||||
|
||||
void HadesBaseDensityLikelihood::gradientLikelihood(
|
||||
ArrayRef const &s_array, ArrayRef &grad_array, bool accumulate,
|
||||
double scaling) {
|
||||
LIBLSS_AUTO_CONTEXT(LOG_DEBUG, ctx);
|
||||
|
||||
auto box = model->get_box_model();
|
||||
|
||||
if (!accumulate) {
|
||||
gradientLikelihood_internal(
|
||||
ModelInput<3>(mgr, box, s_array),
|
||||
ModelOutputAdjoint<3>(mgr, box, grad_array));
|
||||
fwrap(grad_array) = fwrap(grad_array) * scaling;
|
||||
} else {
|
||||
auto real_gradient_p = mgr->allocate_array();
|
||||
auto &real_gradient = real_gradient_p.get_array();
|
||||
|
||||
gradientLikelihood_internal(
|
||||
ModelInput<3>(mgr, box, s_array),
|
||||
ModelOutputAdjoint<3>(mgr, box, real_gradient));
|
||||
fwrap(grad_array) = fwrap(grad_array) + scaling * fwrap(real_gradient);
|
||||
}
|
||||
}
|
||||
|
||||
void HadesBaseDensityLikelihood::gradientLikelihood(
|
||||
CArrayRef const ¶meters, CArrayRef &grad_array, bool accumulate,
|
||||
double scaling) {
|
||||
LIBLSS_AUTO_CONTEXT(LOG_DEBUG, ctx);
|
||||
// auto real_gradient_p = mgr->allocate_array();
|
||||
// auto &real_gradient = real_gradient_p.get_array();
|
||||
auto box = model->get_box_model();
|
||||
|
||||
{
|
||||
if (!accumulate) {
|
||||
gradientLikelihood_internal(
|
||||
ModelInput<3>(mgr, box, parameters),
|
||||
ModelOutputAdjoint<3>(mgr, box, grad_array));
|
||||
fwrap(grad_array) = fwrap(grad_array) * scaling;
|
||||
} else {
|
||||
auto tmp_complex_field = mgr->allocate_complex_array();
|
||||
gradientLikelihood_internal(
|
||||
ModelInput<3>(mgr, box, parameters),
|
||||
ModelOutputAdjoint<3>(mgr, box, tmp_complex_field.get_array()));
|
||||
|
||||
fwrap(grad_array) =
|
||||
fwrap(grad_array) + fwrap(tmp_complex_field.get_array()) * scaling;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void HadesBaseDensityLikelihood::generateMockData(
|
||||
CArrayRef const ¶meters, MarkovState &state) {
|
||||
LIBLSS_AUTO_CONTEXT(LOG_INFO, ctx);
|
||||
|
||||
auto &out_density = final_density_field->get_array();
|
||||
auto box = model->get_box_model();
|
||||
|
||||
model->setAdjointRequired(false);
|
||||
model->forwardModel_v2(ModelInput<3>(mgr, box, parameters));
|
||||
model->getDensityFinal(ModelOutput<3>(
|
||||
model->out_mgr, model->get_box_model_output(), out_density));
|
||||
|
||||
generateMockSpecific(out_density, state);
|
||||
commitAuxiliaryFields(state);
|
||||
}
|
||||
|
||||
void HadesBaseDensityLikelihood::initializeLikelihood(MarkovState &state) {
|
||||
Ncat = state.getScalar<long>("NCAT");
|
||||
model = state.get<BorgModelElement>("BORG_model")->obj;
|
||||
borg_final_density = new ArrayType(model->out_mgr->extents_real_strict());
|
||||
final_density_field = model->out_mgr->allocate_ptr_array();
|
||||
|
||||
std::array<ssize_t, 3> out_N, local_N;
|
||||
model->get_box_model_output().fill(out_N);
|
||||
borg_final_density->setRealDims(out_N);
|
||||
|
||||
std::copy(
|
||||
borg_final_density->array->shape(),
|
||||
borg_final_density->array->shape() + 3, local_N.begin());
|
||||
|
||||
for (size_t c = 0; c < Ncat; c++) {
|
||||
auto &stateBias =
|
||||
*state.get<ArrayType1d>(format("galaxy_bias_%d") % c)->array;
|
||||
if (stateBias.size() < numBiasParams) {
|
||||
stateBias.resize(boost::extents[numBiasParams]);
|
||||
Console::instance().print<LOG_WARNING>("I will pick default bias parameters as the present array is too short.");
|
||||
setupDefaultParameters(state, c);
|
||||
}
|
||||
|
||||
auto &data = *state.get<ArrayType>(format("galaxy_data_%d") % c)->array;
|
||||
// Check data has correct shape
|
||||
if (std::mismatch(local_N.begin(), local_N.end(), data.shape()).first !=
|
||||
local_N.end())
|
||||
error_helper<ErrorParams>(boost::str(
|
||||
boost::format(
|
||||
"Invalid output size (%dx%dx%d) vs data size (%dx%dx%d)") %
|
||||
local_N[0] % local_N[1] % local_N[2] % data.shape()[0] %
|
||||
data.shape()[1] % data.shape()[2]));
|
||||
}
|
||||
|
||||
state.newElement("BORG_final_density", borg_final_density, true);
|
||||
}
|
||||
|
||||
void HadesBaseDensityLikelihood::commitAuxiliaryFields(MarkovState &state) {
|
||||
array::scaleAndCopyArray3d(
|
||||
*borg_final_density->array, final_density_field->get_array(), 1, true);
|
||||
}
|
||||
|
||||
void HadesBaseDensityLikelihood::updateNmean(int catalog, double nmean_) {
|
||||
nmean[catalog] = nmean_;
|
||||
}
|
||||
|
||||
void HadesBaseDensityLikelihood::updateBiasParameters(
|
||||
int catalog, BiasArray const ¶ms) {
|
||||
fwrap(*bias[catalog]) = fwrap(params);
|
||||
}
|
||||
|
||||
void HadesMetaSampler::initialize(MarkovState &state) {
|
||||
Ncat = state.getScalar<long>("NCAT");
|
||||
}
|
||||
|
||||
void HadesMetaSampler::restore(MarkovState &state) {
|
||||
Ncat = state.getScalar<long>("NCAT");
|
||||
}
|
||||
|
||||
void HadesMetaSampler::sample(MarkovState &state) {
|
||||
LIBLSS_AUTO_CONTEXT(LOG_DEBUG, ctx);
|
||||
RandomGen *rng = state.get<RandomGen>("random_generator");
|
||||
|
||||
if (state.getScalar<bool>("bias_sampler_blocked"))
|
||||
return;
|
||||
|
||||
auto const &density = *state.get<ArrayType>("BORG_final_density")->array;
|
||||
double const ares_heat = state.getScalar<double>("ares_heat");
|
||||
|
||||
likelihood->updateMetaParameters(state);
|
||||
|
||||
for (int c = 0; c < Ncat; c++) {
|
||||
bool biasRef = state.getScalar<bool>(format("galaxy_bias_ref_%d") % c);
|
||||
auto const &sel_array =
|
||||
*state.get<SelArrayType>(format("galaxy_sel_window_%d") % c)->array;
|
||||
double &nmean = state.getScalar<double>(format("galaxy_nmean_%d") % c);
|
||||
auto &bias = *state.get<ArrayType1d>(format("galaxy_bias_%d") % c)->array;
|
||||
|
||||
if (biasRef)
|
||||
continue;
|
||||
|
||||
ctx.print(format("considering catalog %d") % c);
|
||||
|
||||
nmean = slice_sweep_double(
|
||||
comm, rng->get(),
|
||||
[&](double x) -> double {
|
||||
likelihood->updateNmean(c, x);
|
||||
return -ares_heat * likelihood->logLikelihoodSpecific(density);
|
||||
},
|
||||
nmean, 0.1);
|
||||
|
||||
likelihood->updateNmean(c, nmean);
|
||||
|
||||
for (int ib = 0; ib < bias.size(); ib++) {
|
||||
bias[ib] = slice_sweep(
|
||||
comm, rng->get(),
|
||||
[&](double x) -> double {
|
||||
boost::multi_array<double, 1> loc_bias = bias;
|
||||
loc_bias[ib] = x;
|
||||
likelihood->updateBiasParameters(c, loc_bias);
|
||||
return -ares_heat * likelihood->logLikelihoodSpecific(density);
|
||||
},
|
||||
bias[ib], 0.1);
|
||||
likelihood->updateBiasParameters(c, bias);
|
||||
}
|
||||
}
|
||||
}
|
165
extra/hades/libLSS/samplers/hades/base_likelihood.hpp
Normal file
165
extra/hades/libLSS/samplers/hades/base_likelihood.hpp
Normal file
|
@ -0,0 +1,165 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/samplers/hades/base_likelihood.hpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#ifndef __LIBLSS_HADES_BASE_LIKELIHOOD_HPP
|
||||
#define __LIBLSS_HADES_BASE_LIKELIHOOD_HPP
|
||||
|
||||
#include "libLSS/cconfig.h"
|
||||
#include <CosmoTool/fourier/fft/fftw_calls.hpp>
|
||||
#include "libLSS/mpi/generic_mpi.hpp"
|
||||
#include "libLSS/mcmc/global_state.hpp"
|
||||
#include "libLSS/tools/fftw_allocator.hpp"
|
||||
#include "libLSS/samplers/core/markov.hpp"
|
||||
#include "libLSS/samplers/core/random_number.hpp"
|
||||
#include "libLSS/samplers/core/types_samplers.hpp"
|
||||
#include "libLSS/samplers/core/gridLikelihoodBase.hpp"
|
||||
#include "libLSS/samplers/core/likelihood.hpp"
|
||||
#include "libLSS/samplers/core/simpleLikelihood.hpp"
|
||||
#include "libLSS/physics/forward_model.hpp"
|
||||
#include "libLSS/physics/likelihoods/base.hpp"
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
/**
|
||||
* This is a base class for simple bias models. It inherits both from forward model based likelihood
|
||||
* and simple likelihood as two kind of gradients will be produced. The ForwardModelBasedLikelihood produces
|
||||
* gradients w.r.t the density field, whereas the SimpleLikelihood produces gradients w.r.t to the bias parameters.
|
||||
*/
|
||||
class HadesBaseDensityLikelihood : public ForwardModelBasedLikelihood,
|
||||
public SimpleLikelihood {
|
||||
public:
|
||||
//typedef GridDensityLikelihoodBase<3> super_t;
|
||||
typedef ForwardModelBasedLikelihood super_t;
|
||||
|
||||
protected:
|
||||
GridLengths corners;
|
||||
ArrayType1d *vobs;
|
||||
ArrayType *borg_final_density;
|
||||
|
||||
std::shared_ptr<BORGForwardModel> model;
|
||||
|
||||
std::unique_ptr<Cosmology> cosmology;
|
||||
std::shared_ptr<Mgr::U_ArrayReal> final_density_field;
|
||||
double ai;
|
||||
double volume;
|
||||
|
||||
boost::multi_array<double, 1> nmean;
|
||||
std::vector<std::shared_ptr<ArrayType::ArrayType>> data, sel_field;
|
||||
|
||||
typedef ArrayType1d::ArrayType BiasArray;
|
||||
std::vector<std::unique_ptr<BiasArray>> bias;
|
||||
std::vector<bool> biasRef;
|
||||
size_t numBiasParams;
|
||||
|
||||
void gradientLikelihood_internal(
|
||||
ModelInput<3> parameters, ModelOutputAdjoint<3> gradient_parameters);
|
||||
|
||||
public:
|
||||
HadesBaseDensityLikelihood(LikelihoodInfo &info, size_t numBiasParams);
|
||||
virtual ~HadesBaseDensityLikelihood();
|
||||
|
||||
void
|
||||
generateMockData(CArrayRef const ¶meters, MarkovState &state) override;
|
||||
|
||||
std::shared_ptr<BORGForwardModel> getForwardModel() override {
|
||||
return model;
|
||||
}
|
||||
|
||||
void initializeLikelihood(MarkovState &state) override;
|
||||
void updateMetaParameters(MarkovState &state) override;
|
||||
void updateCosmology(CosmologicalParameters const ¶ms) override;
|
||||
void updateBiasParameters(int catalog, BiasArray const ¶ms);
|
||||
void updateNmean(int catalog, double nmean);
|
||||
void commitAuxiliaryFields(MarkovState &state) override;
|
||||
|
||||
/**
|
||||
* This is for computing a log likelihood with a provided dark matter density field
|
||||
*/
|
||||
virtual double logLikelihoodSpecific(ArrayRef const &density) = 0;
|
||||
|
||||
/**
|
||||
* This is for computing a gradient log likelihood with a provided dark matter density field.
|
||||
* The output gradient must be stored in "grad_density".
|
||||
*/
|
||||
virtual void gradientLikelihoodSpecific(
|
||||
ArrayRef const &density, ArrayRef &grad_density) = 0;
|
||||
|
||||
virtual void
|
||||
generateMockSpecific(ArrayRef const &density, MarkovState &state) = 0;
|
||||
|
||||
/*
|
||||
* This computes the opposite of the log likelihood. If gradientIsnext is
|
||||
* true the model has to prepare itself for a gradientLikelihood call.
|
||||
* Otherwise it can free temporary memory used to compute the forward model.
|
||||
* This variant takes the image in real space representation. The input is
|
||||
* preserved as indicated by the const.
|
||||
*/
|
||||
double logLikelihood(
|
||||
ArrayRef const ¶meters, bool gradientIsNext = false) override;
|
||||
|
||||
/*
|
||||
* This is the gradient of the opposite of the log likelihood. It
|
||||
* returns the gradient in real space representation.
|
||||
* You must have called logLikelihood with gradientIsNext=true first.
|
||||
* If accumulate is set to true, the gradient will be summed with existing values.
|
||||
* 'scaling' indicates by how much the gradient must be scaled before accumulation.
|
||||
*/
|
||||
void gradientLikelihood(
|
||||
ArrayRef const ¶meters, ArrayRef &gradient_parameters,
|
||||
bool accumulate, double scaling) override;
|
||||
|
||||
/*
|
||||
* This is the opposite of log likelihood, Fourier representation. The same
|
||||
* rules applies as for the real space variant. The modes must be
|
||||
* in unit of Volume as per the continuous Fourier transform.
|
||||
*/
|
||||
double logLikelihood(
|
||||
CArrayRef const ¶meters, bool gradientIsNext = false) override;
|
||||
|
||||
/*
|
||||
* This is the gradient of the logarithm of the opposite of the log
|
||||
* likelihood (Fourier representation variant).
|
||||
* The same rules applies as for the real space variant.
|
||||
*/
|
||||
void gradientLikelihood(
|
||||
CArrayRef const ¶meters, CArrayRef &gradient_parameters,
|
||||
bool accumulate, double scaling) override;
|
||||
|
||||
double logLikelihoodSimple(
|
||||
ParameterSpace const &global,
|
||||
LibLSS::const_multi_array_ref<double, 1> const ¶ms) override;
|
||||
|
||||
void gradientLikelihoodSimple(
|
||||
ParameterSpace const &global,
|
||||
LibLSS::const_multi_array_ref<double, 1> const ¶ms,
|
||||
LibLSS::multi_array_ref<double, 1> &gradient) override;
|
||||
|
||||
unsigned int numDimensions() const override;
|
||||
};
|
||||
|
||||
class HadesMetaSampler : public MarkovSampler {
|
||||
protected:
|
||||
typedef std::shared_ptr<HadesBaseDensityLikelihood> likelihood_t;
|
||||
int Ncat;
|
||||
MPI_Communication *comm;
|
||||
likelihood_t likelihood;
|
||||
|
||||
public:
|
||||
HadesMetaSampler(MPI_Communication *comm_, likelihood_t likelihood_)
|
||||
: comm(comm_), likelihood(likelihood_) {}
|
||||
virtual ~HadesMetaSampler() {}
|
||||
|
||||
void initialize(MarkovState &state) override;
|
||||
void restore(MarkovState &state) override;
|
||||
void sample(MarkovState &state) override;
|
||||
};
|
||||
|
||||
}; // namespace LibLSS
|
||||
|
||||
#endif
|
172
extra/hades/libLSS/samplers/hades/hades_linear_likelihood.cpp
Normal file
172
extra/hades/libLSS/samplers/hades/hades_linear_likelihood.cpp
Normal file
|
@ -0,0 +1,172 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/samplers/hades/hades_linear_likelihood.cpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#include <CosmoTool/algo.hpp>
|
||||
#include <cmath>
|
||||
#include <Eigen/Core>
|
||||
#include <boost/format.hpp>
|
||||
#include <boost/bind.hpp>
|
||||
#include <CosmoTool/fourier/fft/fftw_calls.hpp>
|
||||
#include "libLSS/samplers/core/random_number.hpp"
|
||||
#include "libLSS/tools/mpi_fftw_helper.hpp"
|
||||
#include "libLSS/tools/array_tools.hpp"
|
||||
#include "libLSS/samplers/hades/hades_linear_likelihood.hpp"
|
||||
#include "libLSS/tools/hermiticity_fixup.hpp"
|
||||
|
||||
using namespace LibLSS;
|
||||
using boost::extents;
|
||||
using boost::format;
|
||||
|
||||
using CosmoTool::square;
|
||||
|
||||
using boost::c_storage_order;
|
||||
typedef boost::multi_array_types::extent_range range;
|
||||
|
||||
typedef Eigen::Map<Eigen::ArrayXd, Eigen::Aligned> MappedArray;
|
||||
|
||||
static const int ROOT_RANK = 0;
|
||||
|
||||
HadesLinearDensityLikelihood::HadesLinearDensityLikelihood(LikelihoodInfo &info)
|
||||
: super_t(info, 1) {}
|
||||
|
||||
HadesLinearDensityLikelihood::~HadesLinearDensityLikelihood() {}
|
||||
|
||||
double
|
||||
HadesLinearDensityLikelihood::logLikelihoodSpecific(ArrayRef const &delta) {
|
||||
double L = 0;
|
||||
size_t const startN0 = model->out_mgr->startN0;
|
||||
size_t const endN0 = startN0 + model->out_mgr->localN0;
|
||||
size_t const N1 = model->out_mgr->N1;
|
||||
size_t const N2 = model->out_mgr->N2;
|
||||
|
||||
for (int c = 0; c < Ncat; c++) {
|
||||
auto &sel_array = *(sel_field[c]);
|
||||
double nmean_c = nmean[c];
|
||||
double bias_c = (*(bias[c]))[0];
|
||||
auto &data_c = *(data[c]);
|
||||
|
||||
#pragma omp parallel for schedule(static) collapse(3) reduction(+ : L)
|
||||
for (size_t n0 = startN0; n0 < endN0; n0++) {
|
||||
for (size_t n1 = 0; n1 < N1; n1++) {
|
||||
for (size_t n2 = 0; n2 < N2; n2++) {
|
||||
double selection = sel_array[n0][n1][n2];
|
||||
|
||||
if (selection > 0) {
|
||||
double Nobs = data_c[n0][n1][n2];
|
||||
double d_galaxy = bias_c * delta[n0][n1][n2];
|
||||
|
||||
assert(!isnan(Nobs));
|
||||
assert(!isnan(d_galaxy));
|
||||
L += square(selection * nmean_c * (1 + d_galaxy) - Nobs) /
|
||||
(selection * nmean_c);
|
||||
if (std::isnan(L)) {
|
||||
error_helper<ErrorBadState>("NaN in Likelihood");
|
||||
}
|
||||
if (std::isinf(L)) {
|
||||
error_helper<ErrorBadState>(
|
||||
format("Inf in hamiltonian at n0=%d n1=%d n2=%d d_g=%lg "
|
||||
"Nobs=%lg") %
|
||||
n0 % n1 % n2 % d_galaxy % Nobs);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
L *= 0.5;
|
||||
|
||||
return L;
|
||||
}
|
||||
|
||||
void HadesLinearDensityLikelihood::gradientLikelihoodSpecific(
|
||||
ArrayRef const &delta, ArrayRef &grad_array) {
|
||||
|
||||
size_t const startN0 = model->out_mgr->startN0;
|
||||
size_t const endN0 = startN0 + model->out_mgr->localN0;
|
||||
size_t const N1 = model->out_mgr->N1;
|
||||
size_t const N2 = model->out_mgr->N2;
|
||||
|
||||
fwrap(grad_array) = 0;
|
||||
|
||||
for (int c = 0; c < Ncat; c++) {
|
||||
auto &sel_array = *(sel_field[c]);
|
||||
auto &data_c = *(data[c]);
|
||||
double bias_c = (*bias[c])[0];
|
||||
double nmean_c = nmean[c];
|
||||
|
||||
#pragma omp parallel for collapse(3)
|
||||
for (size_t n0 = startN0; n0 < endN0; n0++) {
|
||||
for (size_t n1 = 0; n1 < N1; n1++) {
|
||||
for (size_t n2 = 0; n2 < N2; n2++) {
|
||||
double deltaElement = delta[n0][n1][n2];
|
||||
double d_galaxy = bias_c * deltaElement;
|
||||
double d_galaxy_prime = bias_c;
|
||||
double response = sel_array[n0][n1][n2];
|
||||
double Nobs = data_c[n0][n1][n2];
|
||||
|
||||
if (response == 0)
|
||||
continue;
|
||||
|
||||
grad_array[n0][n1][n2] +=
|
||||
(nmean_c * response * (1 + d_galaxy) - Nobs) * d_galaxy_prime;
|
||||
assert(!isnan(grad_array[n0][n1][n2]));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void HadesLinearDensityLikelihood::generateMockSpecific(
|
||||
ArrayRef const &delta, MarkovState &state) {
|
||||
LIBLSS_AUTO_CONTEXT(LOG_INFO, ctx);
|
||||
RandomGen *rgen = state.get<RandomGen>("random_generator");
|
||||
size_t const startN0 = model->out_mgr->startN0;
|
||||
size_t const endN0 = startN0 + model->out_mgr->localN0;
|
||||
size_t const N1 = model->out_mgr->N1;
|
||||
size_t const N2 = model->out_mgr->N2;
|
||||
|
||||
for (int c = 0; c < Ncat; c++) {
|
||||
ctx.print(format("Generating mock data %d") % c);
|
||||
auto &sel_array = *sel_field[c];
|
||||
auto &g_field = *state.get<ArrayType>(format("galaxy_data_%d") % c)->array;
|
||||
double nmean_c = nmean[c];
|
||||
double bias_c = (*bias[c])[0];
|
||||
|
||||
#pragma omp parallel for schedule(static) collapse(3)
|
||||
for (size_t n0 = startN0; n0 < endN0; n0++) {
|
||||
for (size_t n1 = 0; n1 < N1; n1++) {
|
||||
for (size_t n2 = 0; n2 < N2; n2++) {
|
||||
double R = nmean_c * sel_array[n0][n1][n2];
|
||||
double gmean = R * (1 + bias_c * delta[n0][n1][n2]);
|
||||
assert(!isnan(gmean));
|
||||
assert(!isnan(R));
|
||||
assert(R >= 0);
|
||||
g_field[n0][n1][n2] = rgen->get().gaussian() * sqrt(R) + gmean;
|
||||
assert(!isnan(g_field[n0][n1][n2]));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void HadesLinearDensityLikelihood::initializeLikelihood(MarkovState &state) {
|
||||
LIBLSS_AUTO_CONTEXT(LOG_INFO, ctx);
|
||||
super_t::initializeLikelihood(state);
|
||||
}
|
||||
|
||||
void HadesLinearDensityLikelihood::setupDefaultParameters(
|
||||
MarkovState &state, int catalog) {
|
||||
auto &bias_c =
|
||||
*state.formatGet<ArrayType1d>("galaxy_bias_%d", catalog)->array;
|
||||
auto &nmean_c = state.formatGetScalar<double>("galaxy_nmean_%d", catalog);
|
||||
|
||||
bias_c[0] = 1.0;
|
||||
nmean_c = 1;
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/samplers/hades/hades_linear_likelihood.hpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#ifndef __LIBLSS_HADES_LINEAR_DENSITY_HPP
|
||||
#define __LIBLSS_HADES_LINEAR_DENSITY_HPP
|
||||
|
||||
#include <CosmoTool/fourier/fft/fftw_calls.hpp>
|
||||
#include "libLSS/mpi/generic_mpi.hpp"
|
||||
#include "libLSS/mcmc/global_state.hpp"
|
||||
#include "libLSS/tools/fftw_allocator.hpp"
|
||||
#include "libLSS/samplers/core/markov.hpp"
|
||||
#include "libLSS/samplers/core/random_number.hpp"
|
||||
#include "libLSS/samplers/core/types_samplers.hpp"
|
||||
#include "libLSS/samplers/core/gridLikelihoodBase.hpp"
|
||||
#include "libLSS/physics/forward_model.hpp"
|
||||
#include "libLSS/samplers/hades/base_likelihood.hpp"
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
class HadesLinearDensityLikelihood : public HadesBaseDensityLikelihood {
|
||||
public:
|
||||
typedef HadesBaseDensityLikelihood super_t;
|
||||
typedef HadesBaseDensityLikelihood::super_t grid_t;
|
||||
|
||||
public:
|
||||
HadesLinearDensityLikelihood(LikelihoodInfo &info);
|
||||
virtual ~HadesLinearDensityLikelihood();
|
||||
|
||||
virtual void setupDefaultParameters(MarkovState &state, int catalog);
|
||||
virtual void
|
||||
generateMockSpecific(ArrayRef const ¶meters, MarkovState &state);
|
||||
virtual double logLikelihoodSpecific(ArrayRef const ¶meters);
|
||||
virtual void gradientLikelihoodSpecific(
|
||||
ArrayRef const ¶meters, ArrayRef &gradient_parameters);
|
||||
virtual void initializeLikelihood(MarkovState &state);
|
||||
};
|
||||
|
||||
}; // namespace LibLSS
|
||||
|
||||
#endif
|
81
extra/hades/libLSS/samplers/model_params.cpp
Normal file
81
extra/hades/libLSS/samplers/model_params.cpp
Normal file
|
@ -0,0 +1,81 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/samplers/model_params.cpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#include "libLSS/mpi/generic_mpi.hpp"
|
||||
#include "libLSS/mcmc/global_state.hpp"
|
||||
#include "libLSS/tools/fftw_allocator.hpp"
|
||||
#include "libLSS/samplers/core/markov.hpp"
|
||||
#include "libLSS/samplers/core/random_number.hpp"
|
||||
#include "libLSS/samplers/core/types_samplers.hpp"
|
||||
#include "libLSS/physics/forward_model.hpp"
|
||||
#include <vector>
|
||||
#include "libLSS/samplers/model_params.hpp"
|
||||
#include "libLSS/samplers/rgen/slice_sweep.hpp"
|
||||
|
||||
using namespace LibLSS;
|
||||
|
||||
ModelParamsSampler::ModelParamsSampler(
|
||||
MPI_Communication *comm_, std::string const& prefix_, std::vector<std::string> const ¶ms,
|
||||
std::shared_ptr<GridDensityLikelihoodBase<3>> likelihood_,
|
||||
std::shared_ptr<BORGForwardModel> model_, ModelDictionnary init)
|
||||
: MarkovSampler(), comm(comm_), paramsToSample(params),
|
||||
likelihood(likelihood_), model(model_), init_state(init), prefix(prefix_) {}
|
||||
|
||||
ModelParamsSampler::~ModelParamsSampler() {}
|
||||
|
||||
void ModelParamsSampler::initialize(MarkovState &state) { restore(state); }
|
||||
|
||||
void ModelParamsSampler::restore(MarkovState &state) {
|
||||
for (auto const &p : paramsToSample) {
|
||||
std::string pName = std::string("model_params_") + prefix + p;
|
||||
state
|
||||
.newScalar<double>(pName, boost::any_cast<double>(init_state[p]), true)
|
||||
->subscribeLoaded([this, p, pName, &state]() {
|
||||
model->setModelParams({{p, state.getScalar<double>(pName)}});
|
||||
});
|
||||
}
|
||||
model->setModelParams(init_state);
|
||||
}
|
||||
|
||||
void ModelParamsSampler::sample(MarkovState &state) {
|
||||
LIBLSS_AUTO_CONTEXT(LOG_DEBUG, ctx);
|
||||
auto const &x_density = *state.get<CArrayType>("s_hat_field")->array;
|
||||
double const ares_heat = state.getScalar<double>("ares_heat");
|
||||
|
||||
RandomGen *rng = state.get<RandomGen>("random_generator");
|
||||
|
||||
likelihood->updateMetaParameters(state);
|
||||
|
||||
for (auto const &p : paramsToSample) {
|
||||
double &value = state.getScalar<double>(std::string("model_params_") + prefix + p);
|
||||
ctx.format2<LOG_VERBOSE>("Sampling model parameter '%s'", p);
|
||||
value = slice_sweep_double(
|
||||
comm, rng->get(),
|
||||
[&](double x) {
|
||||
ctx.format2<LOG_VERBOSE>("try x[%s] = %g", p, x);
|
||||
if (x < 0)
|
||||
return -std::numeric_limits<double>::infinity();
|
||||
ModelDictionnary this_param_map;
|
||||
this_param_map[p] = x;
|
||||
model->setModelParams(this_param_map);
|
||||
double log_L = -likelihood->logLikelihood(x_density, false);
|
||||
ctx.format2<LOG_VERBOSE>("log_L = %g", log_L);
|
||||
return ares_heat * log_L;
|
||||
},
|
||||
value, 1.);
|
||||
|
||||
comm->broadcast_t(&value, 1, 0);
|
||||
|
||||
ModelDictionnary this_param_map;
|
||||
this_param_map[p] = value;
|
||||
model->setModelParams(this_param_map);
|
||||
}
|
||||
likelihood->logLikelihood(x_density, false);
|
||||
likelihood->commitAuxiliaryFields(state);
|
||||
}
|
51
extra/hades/libLSS/samplers/model_params.hpp
Normal file
51
extra/hades/libLSS/samplers/model_params.hpp
Normal file
|
@ -0,0 +1,51 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/samplers/model_params.hpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#pragma once
|
||||
#ifndef __LIBLSS_HADES_MODEL_PARAMS_SAMPLER_HPP
|
||||
# define __LIBLSS_HADES_MODEL_PARAMS_SAMPLER_HPP
|
||||
|
||||
# include "libLSS/mpi/generic_mpi.hpp"
|
||||
# include "libLSS/mcmc/global_state.hpp"
|
||||
# include "libLSS/tools/fftw_allocator.hpp"
|
||||
# include "libLSS/samplers/core/markov.hpp"
|
||||
# include "libLSS/samplers/core/random_number.hpp"
|
||||
# include "libLSS/samplers/core/types_samplers.hpp"
|
||||
# include "libLSS/physics/forward_model.hpp"
|
||||
# include "libLSS/samplers/core/gridLikelihoodBase.hpp"
|
||||
# include <vector>
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
class ModelParamsSampler : public MarkovSampler {
|
||||
protected:
|
||||
MPI_Communication *comm;
|
||||
std::vector<std::string> paramsToSample;
|
||||
std::shared_ptr<BORGForwardModel> model;
|
||||
std::shared_ptr<GridDensityLikelihoodBase<3>> likelihood;
|
||||
ModelDictionnary init_state;
|
||||
std::string prefix;
|
||||
|
||||
public:
|
||||
ModelParamsSampler(
|
||||
MPI_Communication *comm_, std::string const& prefix, std::vector<std::string> const ¶ms,
|
||||
std::shared_ptr<GridDensityLikelihoodBase<3>> likelihood_,
|
||||
std::shared_ptr<BORGForwardModel> model_,
|
||||
ModelDictionnary init = ModelDictionnary());
|
||||
virtual ~ModelParamsSampler();
|
||||
|
||||
void initialize(MarkovState &state) override;
|
||||
void restore(MarkovState &state) override;
|
||||
void sample(MarkovState &state) override;
|
||||
};
|
||||
|
||||
} // namespace LibLSS
|
||||
|
||||
#endif
|
||||
|
25
extra/hades/libLSS/samplers/rgen/density_sampler.hpp
Normal file
25
extra/hades/libLSS/samplers/rgen/density_sampler.hpp
Normal file
|
@ -0,0 +1,25 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/samplers/rgen/density_sampler.hpp
|
||||
Copyright (C) 2019 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#ifndef __LIBLSS_SAMPLERS_DENSITY_SAMPLER_HPP
|
||||
#define __LIBLSS_SAMPLERS_DENSITY_SAMPLER_HPP
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
class GenericDensitySampler: public MarkovSampler {
|
||||
public:
|
||||
virtual void generateMockData(MarkovState& state) = 0;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
// ARES TAG: authors_num = 1
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
||||
// ARES TAG: year(0) = 2019
|
|
@ -0,0 +1,188 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/samplers/rgen/frozen/frozen_phase_density_sampler.cpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#include <CosmoTool/algo.hpp>
|
||||
#include <cmath>
|
||||
#include <Eigen/Core>
|
||||
#include <boost/format.hpp>
|
||||
#include <functional>
|
||||
#include <CosmoTool/fourier/fft/fftw_calls.hpp>
|
||||
#include "libLSS/tools/mpi_fftw_helper.hpp"
|
||||
#include "libLSS/tools/array_tools.hpp"
|
||||
#include "libLSS/samplers/rgen/frozen/frozen_phase_density_sampler.hpp"
|
||||
#include "libLSS/tools/uninitialized_type.hpp"
|
||||
#include "libLSS/tools/hermiticity_fixup.hpp"
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <H5Cpp.h>
|
||||
|
||||
static const bool ULTRA_VERBOSE = false;
|
||||
static const bool HMC_PERF_TEST = true;
|
||||
static const bool FIXED_INTEGRATION_PATH = false;
|
||||
|
||||
using namespace LibLSS;
|
||||
using boost::extents;
|
||||
using boost::format;
|
||||
|
||||
using CosmoTool::square;
|
||||
|
||||
using boost::c_storage_order;
|
||||
typedef boost::multi_array_types::extent_range range;
|
||||
|
||||
typedef Eigen::Map<Eigen::ArrayXd, Eigen::Aligned> MappedArray;
|
||||
|
||||
static const int ROOT_RANK = 0;
|
||||
|
||||
namespace ph = std::placeholders;
|
||||
|
||||
FrozenPhaseDensitySampler::FrozenPhaseDensitySampler(
|
||||
MPI_Communication *comm, Likelihood_t likelihood)
|
||||
: analysis_plan(0), synthesis_plan(0), comm(comm) {
|
||||
this->likelihood = likelihood;
|
||||
}
|
||||
|
||||
void FrozenPhaseDensitySampler::generateMockData(MarkovState &state) {
|
||||
likelihood->updateMetaParameters(state);
|
||||
|
||||
auto &rgen = state.get<RandomGen>("random_generator")->get();
|
||||
|
||||
{
|
||||
auto tmp_real = base_mgr->allocate_array();
|
||||
double const inv_sqN = 1.0 / std::sqrt(N0 * N1 * N2);
|
||||
|
||||
// Generate a bunch of gaussian distributed random number (variance 1)
|
||||
fwrap(tmp_real.get_array()) =
|
||||
rgen.gaussian(ones<double, 3>(base_mgr->extents_real()) * inv_sqN);
|
||||
|
||||
base_mgr->execute_r2c(
|
||||
analysis_plan, tmp_real.get_array().data(), x_hat_field->array->data());
|
||||
|
||||
fwrap(*s_hat_field->array) = fwrap(*x_hat_field->array);
|
||||
}
|
||||
auto tmp_complex = base_mgr->allocate_complex_array();
|
||||
fwrap(tmp_complex.get_array()) = fwrap(*s_hat_field->array) / volume;
|
||||
|
||||
base_mgr->execute_c2r(
|
||||
synthesis_plan, tmp_complex.get_array().data(), s_field->array->data());
|
||||
|
||||
likelihood->generateMockData(*s_hat_field->array, state);
|
||||
}
|
||||
|
||||
void FrozenPhaseDensitySampler::initialize(MarkovState &state) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
restore(state);
|
||||
|
||||
// Now load phases
|
||||
if (phaseFilename) {
|
||||
H5::H5File f(*phaseFilename, H5F_ACC_RDONLY);
|
||||
|
||||
ctx.print("Read-in phase data");
|
||||
{
|
||||
U_Array<double, 3> tmp_x(
|
||||
base_mgr
|
||||
->extents_real_strict()); // We need to allocate this temporary array to adapt shape.
|
||||
CosmoTool::hdf5_read_array(f, dataName, tmp_x.get_array(), false, true);
|
||||
|
||||
// FS: updated according to GL, 6/23/20
|
||||
fwrap(array::slice_array(*x_field->array, base_mgr->strict_range())) = fwrap(tmp_x.get_array());
|
||||
}
|
||||
|
||||
auto tmp_field = base_mgr->allocate_array();
|
||||
fwrap(tmp_field.get_array()) = fwrap(*x_field->array) * volNorm;
|
||||
|
||||
ctx.print("Fourier transform");
|
||||
base_mgr->execute_r2c(
|
||||
analysis_plan, tmp_field.get_array().data(),
|
||||
x_hat_field->array->data());
|
||||
|
||||
fwrap(*s_hat_field->array) = fwrap(*x_hat_field->array);
|
||||
// WARNING: s_field/s_hat_field are not consistent at that moment. They will become at the first
|
||||
// call to sample here.
|
||||
}
|
||||
}
|
||||
|
||||
void FrozenPhaseDensitySampler::restore(MarkovState &state) {
|
||||
Console &cons = Console::instance();
|
||||
ConsoleContext<LOG_DEBUG> ctx("Initialize frozen density sampler");
|
||||
|
||||
N0 = state.getScalar<long>("N0");
|
||||
N1 = state.getScalar<long>("N1");
|
||||
N2 = state.getScalar<long>("N2");
|
||||
|
||||
base_mgr = std::unique_ptr<DFT_Manager>(new DFT_Manager(N0, N1, N2, comm));
|
||||
size_t Ntot = N0 * N1 * N2;
|
||||
|
||||
L0 = state.getScalar<double>("L0");
|
||||
L1 = state.getScalar<double>("L1");
|
||||
L2 = state.getScalar<double>("L2");
|
||||
|
||||
startN0 = base_mgr->startN0;
|
||||
localN0 = base_mgr->localN0;
|
||||
endN0 = startN0 + localN0;
|
||||
|
||||
cons.print<LOG_DEBUG>("Allocating s field");
|
||||
s_hat_field =
|
||||
new CArrayType(base_mgr->extents_complex(), base_mgr->allocator_complex);
|
||||
s_hat_field->setRealDims(ArrayDimension(N0, N1, base_mgr->N2_HC));
|
||||
x_hat_field =
|
||||
new CArrayType(base_mgr->extents_complex(), base_mgr->allocator_complex);
|
||||
x_hat_field->setRealDims(ArrayDimension(N0, N1, base_mgr->N2_HC));
|
||||
x_field = new ArrayType(base_mgr->extents_real(), base_mgr->allocator_real);
|
||||
x_field->setRealDims(ArrayDimension(N0, N1, base_mgr->N2real));
|
||||
s_field = new ArrayType(base_mgr->extents_real(), base_mgr->allocator_real);
|
||||
s_field->setRealDims(ArrayDimension(N0, N1, base_mgr->N2real));
|
||||
|
||||
cons.print<LOG_DEBUG>("Allocating momentum field");
|
||||
|
||||
// Pass the ownership to state
|
||||
state.newElement("s_hat_field", s_hat_field, true);
|
||||
state.newElement("s_field", s_field);
|
||||
state.newElement("x_hat_field", x_hat_field);
|
||||
state.newElement("x_field", x_field);
|
||||
|
||||
fwrap(*x_hat_field->array) = 0;
|
||||
fwrap(*x_field->array) = 0;
|
||||
|
||||
volume = L0 * L1 * L2;
|
||||
volNorm = volume / Ntot;
|
||||
|
||||
state.newScalar("hmc_force_save_final", true);
|
||||
|
||||
auto tmp_field = base_mgr->allocate_array();
|
||||
synthesis_plan = base_mgr->create_c2r_plan(
|
||||
x_hat_field->array->data(), tmp_field.get_array().data());
|
||||
analysis_plan = base_mgr->create_r2c_plan(
|
||||
tmp_field.get_array().data(), x_hat_field->array->data());
|
||||
|
||||
likelihood->initializeLikelihood(state);
|
||||
}
|
||||
|
||||
FrozenPhaseDensitySampler::~FrozenPhaseDensitySampler() {
|
||||
if (base_mgr) {
|
||||
MFCalls::destroy_plan(analysis_plan);
|
||||
MFCalls::destroy_plan(synthesis_plan);
|
||||
}
|
||||
}
|
||||
|
||||
void FrozenPhaseDensitySampler::sample(MarkovState &state) {
|
||||
ConsoleContext<LOG_INFO_SINGLE> ctx("hades density field sampler");
|
||||
|
||||
fwrap(*s_hat_field->array) =
|
||||
fwrap(*state.get<CArrayType>("x_hat_field")->array);
|
||||
|
||||
auto tmp_complex = base_mgr->allocate_complex_array();
|
||||
fwrap(tmp_complex.get_array()) = fwrap(*s_hat_field->array) / volume;
|
||||
|
||||
base_mgr->execute_c2r(
|
||||
synthesis_plan, tmp_complex.get_array().data(), s_field->array->data());
|
||||
|
||||
likelihood->updateMetaParameters(state);
|
||||
likelihood->logLikelihood(*s_hat_field->array, false);
|
||||
likelihood->commitAuxiliaryFields(state);
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/samplers/rgen/frozen/frozen_phase_density_sampler.hpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#ifndef __LIBLSS_FROZEN_DENSITY_HPP
|
||||
#define __LIBLSS_FROZEN_DENSITY_HPP
|
||||
|
||||
#include <boost/optional.hpp>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <CosmoTool/fourier/fft/fftw_calls.hpp>
|
||||
#include "libLSS/mpi/generic_mpi.hpp"
|
||||
#include "libLSS/mcmc/global_state.hpp"
|
||||
#include "libLSS/tools/fftw_allocator.hpp"
|
||||
#include "libLSS/samplers/core/markov.hpp"
|
||||
#include "libLSS/samplers/core/random_number.hpp"
|
||||
#include "libLSS/samplers/core/types_samplers.hpp"
|
||||
#include "libLSS/tools/symplectic_integrator.hpp"
|
||||
#include "libLSS/tools/mpi_fftw_helper.hpp"
|
||||
#include "libLSS/samplers/core/gridLikelihoodBase.hpp"
|
||||
#include "libLSS/samplers/rgen/density_sampler.hpp"
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
class FrozenPhaseDensitySampler : public GenericDensitySampler {
|
||||
public:
|
||||
typedef ArrayType::ArrayType Array;
|
||||
typedef ArrayType::RefArrayType ArrayRef;
|
||||
typedef CArrayType::ArrayType CArray;
|
||||
typedef CArrayType::RefArrayType CArrayRef;
|
||||
typedef IArrayType::ArrayType IArray;
|
||||
|
||||
typedef std::shared_ptr<GridDensityLikelihoodBase<3>> Likelihood_t;
|
||||
|
||||
protected:
|
||||
typedef FFTW_Manager_3d<double> DFT_Manager;
|
||||
|
||||
MPI_Communication *comm;
|
||||
std::shared_ptr<DFT_Manager> base_mgr;
|
||||
Likelihood_t likelihood;
|
||||
|
||||
FCalls::plan_type analysis_plan, synthesis_plan;
|
||||
size_t N0, N1, N2;
|
||||
size_t startN0, localN0, endN0;
|
||||
double L0, L1, L2, volume, volNorm;
|
||||
|
||||
ArrayType *x_field, *s_field;
|
||||
CArrayType *x_hat_field, *s_hat_field;
|
||||
|
||||
boost::optional<std::string> phaseFilename;
|
||||
std::string dataName;
|
||||
|
||||
auto sqrt_Pk(MarkovState &state);
|
||||
|
||||
public:
|
||||
FrozenPhaseDensitySampler(MPI_Communication *comm, Likelihood_t likelihood);
|
||||
virtual ~FrozenPhaseDensitySampler();
|
||||
|
||||
virtual void generateMockData(MarkovState &state);
|
||||
void generateRandomField(MarkovState &state);
|
||||
|
||||
void
|
||||
setPhaseFile(std::string const &filename, std::string const &objectName) {
|
||||
phaseFilename = filename;
|
||||
dataName = objectName;
|
||||
}
|
||||
|
||||
void restore(MarkovState &state);
|
||||
void initialize(MarkovState &state);
|
||||
|
||||
virtual void sample(MarkovState &state);
|
||||
};
|
||||
}; // namespace LibLSS
|
||||
|
||||
#endif
|
720
extra/hades/libLSS/samplers/rgen/hmc/hmc_density_sampler.cpp
Normal file
720
extra/hades/libLSS/samplers/rgen/hmc/hmc_density_sampler.cpp
Normal file
|
@ -0,0 +1,720 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/samplers/rgen/hmc/hmc_density_sampler.cpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#include "libLSS/cconfig.h"
|
||||
#include <CosmoTool/algo.hpp>
|
||||
#include <cmath>
|
||||
#include <Eigen/Core>
|
||||
#include <boost/format.hpp>
|
||||
#include <functional>
|
||||
#include <CosmoTool/fourier/fft/fftw_calls.hpp>
|
||||
#include "libLSS/samplers/core/random_number.hpp"
|
||||
#include "libLSS/tools/mpi_fftw_helper.hpp"
|
||||
#include "libLSS/tools/array_tools.hpp"
|
||||
#include "libLSS/samplers/rgen/hmc/hmc_density_sampler.hpp"
|
||||
#include "libLSS/tools/uninitialized_type.hpp"
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include "libLSS/samplers/core/generate_random_field.hpp"
|
||||
#include "libLSS/samplers/core/powerspec_tools.hpp"
|
||||
|
||||
static const bool ULTRA_VERBOSE = false;
|
||||
static const bool HMC_PERF_TEST = true;
|
||||
static const bool FIXED_INTEGRATION_PATH = false;
|
||||
|
||||
using namespace LibLSS;
|
||||
using boost::extents;
|
||||
using boost::format;
|
||||
|
||||
using CosmoTool::square;
|
||||
|
||||
using boost::c_storage_order;
|
||||
typedef boost::multi_array_types::extent_range range;
|
||||
|
||||
typedef Eigen::Map<Eigen::ArrayXd, Eigen::Aligned> MappedArray;
|
||||
|
||||
static const int ROOT_RANK = 0;
|
||||
|
||||
namespace ph = std::placeholders;
|
||||
|
||||
HMCDensitySampler::HMCDensitySampler(
|
||||
MPI_Communication *comm, Likelihood_t likelihood, double k_max_,
|
||||
std::string const &prefix)
|
||||
: momentum_field(0), analysis_plan(0), synthesis_plan(0), attempt_field(0),
|
||||
accept_field(0), bad_sample(0), comm(comm), k_max(k_max_) {
|
||||
this->maxTime = 50;
|
||||
this->maxEpsilon = 0.01;
|
||||
this->likelihood = likelihood;
|
||||
setIntegratorScheme(HMCOption::SI_2A);
|
||||
setupNames(prefix);
|
||||
}
|
||||
|
||||
void HMCDensitySampler::setupNames(std::string const &prefix) {
|
||||
momentum_field_name = prefix + "momentum_field";
|
||||
s_hat_field_name = prefix + "s_hat_field";
|
||||
s_field_name = prefix + "s_field";
|
||||
hades_attempt_count_name = prefix + "hades_attempt_count";
|
||||
hades_accept_count_name = prefix + "hades_accept_count";
|
||||
hmc_bad_sample_name = prefix + "hmc_bad_sample";
|
||||
hmc_force_save_final_name = prefix + "hmc_force_save_final";
|
||||
hmc_Elh_name = prefix + "hmc_Elh";
|
||||
hmc_Eprior_name = prefix + "hmc_Eprior";
|
||||
}
|
||||
|
||||
void HMCDensitySampler::generateMockData(MarkovState &state) {
|
||||
likelihood->updateMetaParameters(state);
|
||||
if (!phaseFilename) {
|
||||
generateRandomField(state);
|
||||
}
|
||||
Console::instance().print<LOG_VERBOSE>(
|
||||
format("Max of s_field = %g") % fwrap(*s_field->array).max());
|
||||
likelihood->generateMockData(*s_hat_field->array, state);
|
||||
}
|
||||
|
||||
void HMCDensitySampler::computeGradientPsi_Likelihood(
|
||||
MarkovState &state, CArrayRef const &s, CArrayRef &grad_array,
|
||||
bool accumulate) {
|
||||
double temp = state.getScalar<double>("ares_heat");
|
||||
if (posttransform) {
|
||||
BoxModel box = posttransform->get_box_model();
|
||||
posttransform->forwardModel_v2(ModelInput<3>(base_mgr, box, s));
|
||||
auto tmp_s = base_mgr->allocate_ptr_complex_array();
|
||||
posttransform->getDensityFinal(
|
||||
ModelOutput<3>(base_mgr, box, tmp_s->get_array()));
|
||||
|
||||
likelihood->gradientLikelihood(*tmp_s, grad_array, accumulate, temp);
|
||||
tmp_s.reset();
|
||||
|
||||
posttransform->adjointModel_v2(
|
||||
ModelInputAdjoint<3>(base_mgr, box, grad_array));
|
||||
posttransform->getAdjointModelOutput(
|
||||
ModelOutputAdjoint<3>(base_mgr, box, grad_array));
|
||||
} else {
|
||||
likelihood->gradientLikelihood(s, grad_array, accumulate, temp);
|
||||
}
|
||||
}
|
||||
|
||||
double HMCDensitySampler::computeHamiltonian_Likelihood(
|
||||
MarkovState &state, CArrayRef const &s_hat, bool final_call) {
|
||||
double temp = state.getScalar<double>("ares_heat");
|
||||
Console::instance().print<LOG_VERBOSE>(
|
||||
format("[LIKELIHOOD] Temperature is %lg") % temp);
|
||||
|
||||
if (posttransform) {
|
||||
BoxModel box = posttransform->get_box_model();
|
||||
posttransform->forwardModel_v2(ModelInput<3>(base_mgr, box, s_hat));
|
||||
auto tmp_s = base_mgr->allocate_ptr_complex_array();
|
||||
posttransform->getDensityFinal(
|
||||
ModelOutput<3>(base_mgr, box, tmp_s->get_array()));
|
||||
return likelihood->logLikelihood(tmp_s->get_array(), !final_call) * temp;
|
||||
} else
|
||||
|
||||
return likelihood->logLikelihood(s_hat, !final_call) * temp;
|
||||
}
|
||||
|
||||
void HMCDensitySampler::restore(MarkovState &state) { initialize(state); }
|
||||
|
||||
void HMCDensitySampler::initialize(MarkovState &state) {
|
||||
Console &cons = Console::instance();
|
||||
ConsoleContext<LOG_INFO_SINGLE> ctx("Initialize hades density sampler");
|
||||
|
||||
N0 = state.getScalar<long>("N0");
|
||||
N1 = state.getScalar<long>("N1");
|
||||
N2 = state.getScalar<long>("N2");
|
||||
|
||||
base_mgr = std::make_shared<DFT_Manager>(N0, N1, N2, comm);
|
||||
size_t Ntot = N0 * N1 * N2;
|
||||
|
||||
L0 = state.getScalar<double>("L0");
|
||||
L1 = state.getScalar<double>("L1");
|
||||
L2 = state.getScalar<double>("L2");
|
||||
Ncat = state.getScalar<long>("NCAT");
|
||||
|
||||
startN0 = base_mgr->startN0;
|
||||
localN0 = base_mgr->localN0;
|
||||
endN0 = startN0 + localN0;
|
||||
|
||||
fixer = std::make_shared<Hermiticity_fixer<double, 3>>(base_mgr);
|
||||
|
||||
ctx.format("Allocating s_hat_field field: %dx%dx%d", N0, N1, base_mgr->N2_HC);
|
||||
s_hat_field =
|
||||
new CArrayType(base_mgr->extents_complex(), base_mgr->allocator_complex);
|
||||
s_hat_field->setRealDims(ArrayDimension(N0, N1, base_mgr->N2_HC));
|
||||
s_field = new ArrayType(extents[range(startN0, startN0 + localN0)][N1][N2]);
|
||||
s_field->setRealDims(ArrayDimension(N0, N1, N2));
|
||||
cons.print<LOG_DEBUG>("Allocating momentum field");
|
||||
momentum_field =
|
||||
new CArrayType(base_mgr->extents_complex(), base_mgr->allocator_complex);
|
||||
momentum_field->setRealDims(ArrayDimension(N0, N1, base_mgr->N2_HC));
|
||||
|
||||
// Pass the ownership to state
|
||||
state.newElement(momentum_field_name, momentum_field);
|
||||
state.newElement(s_hat_field_name, s_hat_field, true);
|
||||
state.newElement(s_field_name, s_field, true);
|
||||
state.newElement(hades_attempt_count_name, attempt_field = new SLong(), true);
|
||||
state.newElement(hades_accept_count_name, accept_field = new SLong(), true);
|
||||
bad_sample = state.newScalar<int>(hmc_bad_sample_name, 0);
|
||||
|
||||
attempt_field->value = 0;
|
||||
accept_field->value = 0;
|
||||
attempt_field->setResetOnSave(0);
|
||||
accept_field->setResetOnSave(0);
|
||||
bad_sample->setResetOnSave(0);
|
||||
|
||||
s_hat_field->eigen().fill(0);
|
||||
s_field->eigen().fill(0);
|
||||
momentum_field->eigen().fill(0);
|
||||
|
||||
volume = L0 * L1 * L2;
|
||||
volNorm = volume / Ntot;
|
||||
|
||||
mass_field =
|
||||
new ArrayType(base_mgr->extents_complex(), base_mgr->allocator_real);
|
||||
mass_field->setRealDims(ArrayDimension(N0, N1, base_mgr->N2_HC));
|
||||
mass_field->eigen().fill(0);
|
||||
state.newElement("hades_mass", mass_field);
|
||||
|
||||
state.newScalar(hmc_force_save_final_name, true);
|
||||
state.newScalar(hmc_Elh_name, 0.0, true);
|
||||
state.newScalar(hmc_Eprior_name, 0.0, true);
|
||||
|
||||
auto tmp_field = base_mgr->allocate_array();
|
||||
synthesis_plan = base_mgr->create_c2r_plan(
|
||||
s_hat_field->array->data(), tmp_field.get_array().data());
|
||||
analysis_plan = base_mgr->create_r2c_plan(
|
||||
tmp_field.get_array().data(), s_hat_field->array->data());
|
||||
|
||||
likelihood->initializeLikelihood(state);
|
||||
|
||||
// Now load phases
|
||||
if (phaseFilename) {
|
||||
H5::H5File f(*phaseFilename, H5F_ACC_RDONLY);
|
||||
|
||||
ctx.print("Read-in phase data");
|
||||
{
|
||||
U_Array<double, 3> tmp_x(
|
||||
base_mgr
|
||||
->extents_real_strict()); // We need to allocate this temporary array to adapt shape.
|
||||
CosmoTool::hdf5_read_array(f, dataName, tmp_x.get_array(), false, true);
|
||||
|
||||
ctx.print("Saving and Rescaling");
|
||||
fwrap(array::slice_array(*s_field->array, base_mgr->strict_range())) =
|
||||
fwrap(tmp_x.get_array());
|
||||
}
|
||||
|
||||
auto tmp_field = base_mgr->allocate_array();
|
||||
fwrap(array::slice_array(tmp_field.get_array(), base_mgr->strict_range())) =
|
||||
fwrap(*s_field->array) * volNorm;
|
||||
|
||||
ctx.print("Fourier transform");
|
||||
base_mgr->execute_r2c(
|
||||
analysis_plan, tmp_field.get_array().data(),
|
||||
s_hat_field->array->data());
|
||||
}
|
||||
}
|
||||
|
||||
HMCDensitySampler::~HMCDensitySampler() {
|
||||
if (base_mgr) {
|
||||
Console::instance().print<LOG_INFO_SINGLE>("Cleaning up HMCDensitySampler");
|
||||
|
||||
MFCalls::destroy_plan(analysis_plan);
|
||||
MFCalls::destroy_plan(synthesis_plan);
|
||||
}
|
||||
}
|
||||
|
||||
auto HMCDensitySampler::free_phase_mask() {
|
||||
double kmax2 = k_max * k_max;
|
||||
|
||||
return fwrap(b_fused_idx<double, 3>([this, kmax2](int a, int b, int c) {
|
||||
double kx = kmode(a, N0, L0);
|
||||
double ky = kmode(b, N1, L1);
|
||||
double kz = kmode(c, N2, L2);
|
||||
|
||||
return (kx * kx + ky * ky + kz * kz) > kmax2;
|
||||
}));
|
||||
}
|
||||
|
||||
void HMCDensitySampler::initializeMomenta(MarkovState &state) {
|
||||
CArrayType::ArrayType &momentum_array = *momentum_field->array;
|
||||
RandomGen *rgen = state.get<RandomGen>("random_generator");
|
||||
fwrap(momentum_array) = make_complex(
|
||||
rgen->get().gaussian(
|
||||
constant<double, 3>(M_SQRT2, base_mgr->extents_complex())),
|
||||
rgen->get().gaussian(
|
||||
constant<double, 3>(M_SQRT2, base_mgr->extents_complex())));
|
||||
|
||||
fwrap(momentum_array) = fwrap(momentum_array) * free_phase_mask();
|
||||
}
|
||||
|
||||
#include "hmc_kinetic.tcc"
|
||||
#include "hmc_prior.tcc"
|
||||
|
||||
void HMCDensitySampler::computeGradientPsi(
|
||||
MarkovState &state, CArrayRef const &s, CArrayRef &grad_array) {
|
||||
array::fill(grad_array, 0);
|
||||
computeGradientPsi_Prior(state, s, grad_array);
|
||||
computeGradientPsi_Likelihood(state, s, grad_array, true);
|
||||
fwrap(grad_array) = fwrap(grad_array) * free_phase_mask();
|
||||
}
|
||||
|
||||
HMCDensitySampler::HamiltonianType HMCDensitySampler::computeHamiltonian(
|
||||
MarkovState &state, CArrayRef const &s_hat, bool final_call) {
|
||||
ConsoleContext<LOG_DEBUG> ctx("hamiltonian computation");
|
||||
|
||||
HamiltonianType Ekin = computeHamiltonian_Kinetic();
|
||||
ctx.print(format("Ekin = %lg") % double(Ekin));
|
||||
HamiltonianType Eprior = computeHamiltonian_Prior(state, s_hat);
|
||||
ctx.print(format("Eprior = %lg") % double(Eprior));
|
||||
HamiltonianType Elh = computeHamiltonian_Likelihood(state, s_hat, final_call);
|
||||
ctx.print(format("Elh = %lg") % double(Elh));
|
||||
|
||||
return Ekin + Eprior + Elh;
|
||||
}
|
||||
|
||||
void HMCDensitySampler::setTransforms(
|
||||
Model_t pretransform_, Model_t posttransform_) {
|
||||
pretransform = pretransform_;
|
||||
posttransform = posttransform_;
|
||||
}
|
||||
|
||||
void HMCDensitySampler::setIntegratorScheme(IntegratorScheme scheme) {
|
||||
current_scheme = scheme;
|
||||
symp.setIntegratorScheme(scheme);
|
||||
}
|
||||
|
||||
void HMCDensitySampler::doSympInt(MarkovState &state, CArrayRef &s_hat) {
|
||||
ConsoleContext<LOG_INFO_SINGLE> ctx("Symplectic integration");
|
||||
RandomGen *rgen = state.get<RandomGen>("random_generator");
|
||||
int Ntime;
|
||||
double epsilon;
|
||||
ArrayType::ArrayType &mass = *mass_field->array;
|
||||
CArrayType::ArrayType &momentum_array = *momentum_field->array;
|
||||
|
||||
if (comm->rank() == ROOT_RANK) {
|
||||
lastEpsilon = epsilon = maxEpsilon * rgen->get().uniform();
|
||||
do {
|
||||
lastTime = Ntime = 1 + int(std::floor(rgen->get().uniform() * maxTime));
|
||||
} while (Ntime == 0);
|
||||
}
|
||||
|
||||
if (HMC_PERF_TEST && FIXED_INTEGRATION_PATH) {
|
||||
epsilon = maxEpsilon;
|
||||
Ntime = maxTime;
|
||||
}
|
||||
|
||||
comm->broadcast_t(&epsilon, 1, ROOT_RANK);
|
||||
comm->broadcast_t(&Ntime, 1, ROOT_RANK);
|
||||
|
||||
ctx.print(format("epsilon = %lg, Ntime = %d") % epsilon % Ntime);
|
||||
|
||||
auto gradient_psi_p = base_mgr->allocate_complex_array();
|
||||
auto &gradient_psi = gradient_psi_p.get_array();
|
||||
|
||||
symp.integrate(
|
||||
std::bind(
|
||||
&HMCDensitySampler::computeGradientPsi, this, std::ref(state), ph::_1,
|
||||
ph::_2),
|
||||
mass, epsilon, Ntime, s_hat, momentum_array, gradient_psi);
|
||||
}
|
||||
|
||||
void HMCDensitySampler::updateMass(MarkovState &state) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
ArrayType::ArrayType &mass = *mass_field->array;
|
||||
|
||||
fwrap(mass) = 1 / 2.0 * free_phase_mask();
|
||||
}
|
||||
|
||||
void HMCDensitySampler::sample(MarkovState &state) {
|
||||
RandomGen *rgen = state.get<RandomGen>("random_generator");
|
||||
double HamiltonianInit, HamiltonianFinal, deltaH;
|
||||
ConsoleContext<LOG_INFO_SINGLE> ctx("hades density field sampler");
|
||||
//adjust_field = state.get<IArrayType>("adjust_mode_multiplier");
|
||||
|
||||
auto s_hat_p = base_mgr->allocate_complex_array();
|
||||
auto &s_hat = s_hat_p.get_array();
|
||||
|
||||
if (pretransform) {
|
||||
BoxModel box = pretransform->get_box_model();
|
||||
// We explicitly protect the input here.
|
||||
pretransform->forwardModel_v2(ModelInput<3>(
|
||||
base_mgr, box,
|
||||
(CArrayRef const &)*state.get<CArrayType>("s_hat_field")->array));
|
||||
pretransform->getDensityFinal(ModelOutput<3>(base_mgr, box, s_hat));
|
||||
} else
|
||||
array::copyArray3d(s_hat, *state.get<CArrayType>("s_hat_field")->array);
|
||||
|
||||
updateMass(state);
|
||||
|
||||
likelihood->updateMetaParameters(state);
|
||||
initializeMomenta(state);
|
||||
|
||||
HamiltonianType init_Ekin = computeHamiltonian_Kinetic();
|
||||
HamiltonianType init_Eprior = computeHamiltonian_Prior(state, s_hat);
|
||||
HamiltonianType init_Elh = computeHamiltonian_Likelihood(state, s_hat, false);
|
||||
HamiltonianInit = init_Ekin + init_Eprior +
|
||||
init_Elh; //computeHamiltonian(state, s_hat, false);
|
||||
// If we are the very first step, save the result of the forward model for the other samplers.
|
||||
bool &force_save = state.getScalar<bool>("hmc_force_save_final");
|
||||
if (state.get<SLong>("MCMC_STEP")->value == 0 || force_save) {
|
||||
likelihood->commitAuxiliaryFields(state);
|
||||
force_save = false;
|
||||
}
|
||||
|
||||
if (state.get<SBool>("hades_sampler_blocked")->value)
|
||||
return;
|
||||
|
||||
boost::chrono::system_clock::time_point time_start;
|
||||
if (HMC_PERF_TEST) {
|
||||
time_start = boost::chrono::system_clock::now();
|
||||
}
|
||||
|
||||
HamiltonianType final_Ekin = 0;
|
||||
HamiltonianType final_Eprior = 0;
|
||||
HamiltonianType final_Elh = 0;
|
||||
|
||||
try {
|
||||
if (std::isnan(HamiltonianInit)) {
|
||||
error_helper<ErrorBadState>("NaN in hamiltonian initial");
|
||||
}
|
||||
doSympInt(state, s_hat);
|
||||
final_Ekin = computeHamiltonian_Kinetic();
|
||||
final_Eprior = computeHamiltonian_Prior(state, s_hat);
|
||||
final_Elh = computeHamiltonian_Likelihood(state, s_hat, true);
|
||||
HamiltonianFinal = final_Ekin + final_Eprior + final_Elh;
|
||||
//if (std::isnan(HamiltonianFinal)) { error_helper<ErrorBadState>("NaN in hamiltonian final"); }
|
||||
|
||||
} catch (const ErrorLoadBalance &) {
|
||||
// Stop everything now
|
||||
state.getScalar<int>(hmc_bad_sample_name)++;
|
||||
if (HMC_PERF_TEST && comm->rank() == ROOT_RANK) {
|
||||
std::ofstream f("hmc_performance.txt", std::ios::app);
|
||||
boost::chrono::duration<double> compute_time =
|
||||
boost::chrono::system_clock::now() - time_start;
|
||||
|
||||
f << format("% 10.5le % 6d % 15.15le % 15.15le %d 0 0") % lastEpsilon %
|
||||
lastTime % 0 % compute_time.count() % int(current_scheme)
|
||||
<< std::endl;
|
||||
}
|
||||
return;
|
||||
}
|
||||
double log_u;
|
||||
|
||||
ctx.format(
|
||||
"init_Ekin=%g, init_Eprior=%g, init_Elh=%g, final_Ekin=%g, "
|
||||
"final_Eprior=%g, final_Elh=%g",
|
||||
init_Ekin, init_Eprior, init_Elh, final_Ekin, final_Eprior, final_Elh);
|
||||
|
||||
deltaH = HamiltonianFinal - HamiltonianInit;
|
||||
if (comm->rank() == ROOT_RANK)
|
||||
log_u = log(rgen->get().uniform());
|
||||
|
||||
comm->broadcast_t(&log_u, 1, ROOT_RANK);
|
||||
|
||||
if (attempt_field)
|
||||
attempt_field->value++;
|
||||
ctx.print2<LOG_INFO_SINGLE>(
|
||||
format("log_u = %lg, deltaH = %lg") % log_u % deltaH);
|
||||
if (HMC_PERF_TEST && comm->rank() == ROOT_RANK) {
|
||||
std::ofstream f("hmc_performance.txt", std::ios::app);
|
||||
boost::chrono::duration<double> compute_time =
|
||||
boost::chrono::system_clock::now() - time_start;
|
||||
|
||||
f << format("% 10.5le % 6d % 15.15le % 15.15le %d % 15.15le 0 %d") %
|
||||
lastEpsilon % lastTime % deltaH % compute_time.count() %
|
||||
int(current_scheme) % HamiltonianFinal % (log_u <= -deltaH)
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
if (log_u <= -deltaH) {
|
||||
// Accept the move
|
||||
if (comm->rank() == ROOT_RANK)
|
||||
ctx.print("accepting the move");
|
||||
|
||||
likelihood->commitAuxiliaryFields(state);
|
||||
|
||||
state.getScalar<double>(hmc_Elh_name) = final_Elh;
|
||||
state.getScalar<double>(hmc_Eprior_name) = final_Eprior;
|
||||
|
||||
auto tmp_real_field = base_mgr->allocate_array();
|
||||
fixer->forward(s_hat);
|
||||
if (posttransform) {
|
||||
BoxModel box = posttransform->get_box_model();
|
||||
posttransform->forwardModel_v2(ModelInput<3>(base_mgr, box, s_hat));
|
||||
posttransform->getDensityFinal(ModelOutput<3>(
|
||||
base_mgr, box, *state.get<CArrayType>(s_hat_field_name)->array));
|
||||
LibLSS::copy_array(
|
||||
s_hat, *state.get<CArrayType>(s_hat_field_name)->array);
|
||||
} else {
|
||||
LibLSS::copy_array(
|
||||
*state.get<CArrayType>(s_hat_field_name)->array, s_hat);
|
||||
}
|
||||
base_mgr->execute_c2r(
|
||||
synthesis_plan, s_hat.data(), tmp_real_field.get_array().data());
|
||||
|
||||
// This one handles padded and unpadded data through multi_array
|
||||
array::scaleAndCopyArray3d(
|
||||
*state.get<ArrayType>(s_field_name)->array, tmp_real_field.get_array(),
|
||||
1. / volume, true);
|
||||
|
||||
if (accept_field)
|
||||
accept_field->value++;
|
||||
} else {
|
||||
state.getScalar<double>(hmc_Elh_name) = init_Elh;
|
||||
state.getScalar<double>(hmc_Eprior_name) = init_Eprior;
|
||||
}
|
||||
}
|
||||
|
||||
void HMCDensitySampler::checkGradientReal(MarkovState &state, int step) {
|
||||
ConsoleContext<LOG_DEBUG> ctx("checkGradientReal");
|
||||
HamiltonianType H0, H1, H2;
|
||||
HamiltonianType H0prior, H1prior, H2prior;
|
||||
HamiltonianType H0poisson, H1poisson, H2poisson;
|
||||
auto s_hat_p = base_mgr->allocate_complex_array();
|
||||
auto &s_hat = s_hat_p.get_array();
|
||||
double epsilon = 0.001;
|
||||
|
||||
ArrayType *gradient_field_lh, *gradient_field_lh_ref;
|
||||
ArrayType *s_field = state.get<ArrayType>(s_field_name);
|
||||
|
||||
if (state.exists("gradient_array_lh_real")) {
|
||||
gradient_field_lh = state.get<ArrayType>("gradient_array_lh_real");
|
||||
gradient_field_lh_ref = state.get<ArrayType>("gradient_array_lh_ref_real");
|
||||
} else {
|
||||
auto real_dim =
|
||||
ArrayDimension(base_mgr->N0, base_mgr->N1, base_mgr->N2real);
|
||||
|
||||
state.newElement(
|
||||
"gradient_array_lh_real",
|
||||
gradient_field_lh =
|
||||
new ArrayType(base_mgr->extents_real(), base_mgr->allocator_real));
|
||||
gradient_field_lh->setRealDims(real_dim);
|
||||
|
||||
state.newElement(
|
||||
"gradient_array_lh_ref_real",
|
||||
gradient_field_lh_ref =
|
||||
new ArrayType(base_mgr->extents_real(), base_mgr->allocator_real));
|
||||
gradient_field_lh_ref->setRealDims(real_dim);
|
||||
}
|
||||
|
||||
auto &gradient_array_lh = *gradient_field_lh->array;
|
||||
auto &gradient_array_lh_ref = *gradient_field_lh_ref->array;
|
||||
auto &s = *s_field->array;
|
||||
|
||||
double log_L0, log_L1, log_L2;
|
||||
likelihood->gradientLikelihood(s, gradient_array_lh, false, 1.0);
|
||||
|
||||
log_L0 = likelihood->logLikelihood(s, false);
|
||||
Progress<LOG_INFO_SINGLE> &progress =
|
||||
Console::instance().start_progress<LOG_INFO_SINGLE>(
|
||||
"doing numerical gradient (real)",
|
||||
base_mgr->N0 * base_mgr->N1 * base_mgr->N2, 5);
|
||||
|
||||
for (int n0 = 0; n0 < N0; n0 += step) {
|
||||
for (int n1 = 0; n1 < N1; n1++) {
|
||||
for (int n2 = 0; n2 < base_mgr->N2; n2++) {
|
||||
double backup;
|
||||
double pert;
|
||||
double n_backup;
|
||||
|
||||
bool oncore = base_mgr->on_core(n0);
|
||||
|
||||
if (n0 == 0 && n1 == 0 && n2 == 0)
|
||||
continue;
|
||||
|
||||
if (oncore) {
|
||||
backup = s[n0][n1][n2];
|
||||
n_backup = abs(backup);
|
||||
if (n_backup == 0)
|
||||
n_backup = 1;
|
||||
pert = backup + n_backup * epsilon;
|
||||
s[n0][n1][n2] = pert;
|
||||
}
|
||||
|
||||
log_L1 = likelihood->logLikelihood(s, false);
|
||||
|
||||
if (oncore) {
|
||||
gradient_array_lh_ref[n0][n1][n2] =
|
||||
(log_L1 - log_L0) / (n_backup * epsilon);
|
||||
s[n0][n1][n2] = backup;
|
||||
}
|
||||
|
||||
long n = ((n0 - startN0) * N1 + n1) * base_mgr->N2 + n2;
|
||||
progress.update(n);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void HMCDensitySampler::checkGradient(MarkovState &state, int step) {
|
||||
ConsoleContext<LOG_DEBUG> ctx("checkGradient");
|
||||
HamiltonianType H0, H1, H2;
|
||||
HamiltonianType H0prior, H1prior, H2prior;
|
||||
HamiltonianType H0poisson, H1poisson, H2poisson;
|
||||
auto s_hat_p = base_mgr->allocate_complex_array();
|
||||
auto &s_hat = s_hat_p.get_array();
|
||||
double epsilon = 0.01;
|
||||
|
||||
CArrayType *gradient_field, *gradient_field_ref;
|
||||
CArrayType *gradient_field_prior, *gradient_field_prior_ref;
|
||||
CArrayType *gradient_field_poisson, *gradient_field_poisson_ref;
|
||||
CArrayType *s_hat_field = state.get<CArrayType>(s_hat_field_name);
|
||||
|
||||
if (state.exists("gradient_array")) {
|
||||
gradient_field = state.get<CArrayType>("gradient_array");
|
||||
gradient_field_prior = state.get<CArrayType>("gradient_array_prior");
|
||||
gradient_field_poisson = state.get<CArrayType>("gradient_array_lh");
|
||||
gradient_field_ref = state.get<CArrayType>("gradient_array_ref");
|
||||
gradient_field_prior_ref =
|
||||
state.get<CArrayType>("gradient_array_prior_ref");
|
||||
gradient_field_poisson_ref = state.get<CArrayType>("gradient_array_lh_ref");
|
||||
} else {
|
||||
auto complex_dim =
|
||||
ArrayDimension(base_mgr->N0, base_mgr->N1, base_mgr->N2_HC);
|
||||
state.newElement(
|
||||
"gradient_array",
|
||||
gradient_field = new CArrayType(
|
||||
base_mgr->extents_complex(), base_mgr->allocator_complex));
|
||||
gradient_field->setRealDims(complex_dim);
|
||||
|
||||
state.newElement(
|
||||
"gradient_array_ref",
|
||||
gradient_field_ref = new CArrayType(
|
||||
base_mgr->extents_complex(), base_mgr->allocator_complex));
|
||||
gradient_field_ref->setRealDims(complex_dim);
|
||||
|
||||
state.newElement(
|
||||
"gradient_array_prior",
|
||||
gradient_field_prior = new CArrayType(
|
||||
base_mgr->extents_complex(), base_mgr->allocator_complex));
|
||||
gradient_field_prior->setRealDims(complex_dim);
|
||||
|
||||
state.newElement(
|
||||
"gradient_array_prior_ref",
|
||||
gradient_field_prior_ref = new CArrayType(
|
||||
base_mgr->extents_complex(), base_mgr->allocator_complex));
|
||||
gradient_field_prior_ref->setRealDims(complex_dim);
|
||||
|
||||
state.newElement(
|
||||
"gradient_array_lh",
|
||||
gradient_field_poisson = new CArrayType(
|
||||
base_mgr->extents_complex(), base_mgr->allocator_complex));
|
||||
gradient_field_poisson->setRealDims(complex_dim);
|
||||
|
||||
state.newElement(
|
||||
"gradient_array_lh_ref",
|
||||
gradient_field_poisson_ref = new CArrayType(
|
||||
base_mgr->extents_complex(), base_mgr->allocator_complex));
|
||||
gradient_field_poisson_ref->setRealDims(complex_dim);
|
||||
}
|
||||
FFTW_Complex_Array &grad_array = *gradient_field->array;
|
||||
FFTW_Complex_Array &grad_array_ref = *gradient_field_ref->array;
|
||||
FFTW_Complex_Array &grad_array_prior = *gradient_field_prior->array;
|
||||
FFTW_Complex_Array &grad_array_prior_ref = *gradient_field_prior_ref->array;
|
||||
FFTW_Complex_Array &grad_array_poisson = *gradient_field_poisson->array;
|
||||
FFTW_Complex_Array &grad_array_poisson_ref =
|
||||
*gradient_field_poisson_ref->array;
|
||||
//adjust_field = state.get<IArrayType>("adjust_mode_multiplier");
|
||||
|
||||
updateMass(state);
|
||||
if (startN0 == 0 && localN0 > 0)
|
||||
grad_array[0][0][0] = 0;
|
||||
ctx.print("Initialize momenta");
|
||||
initializeMomenta(state);
|
||||
|
||||
ctx.print("Compute for prior");
|
||||
computeGradientPsi_Prior(state, *s_hat_field->array, grad_array_prior);
|
||||
ctx.print("Compute for likelihood");
|
||||
computeGradientPsi_Likelihood(
|
||||
state, *s_hat_field->array, grad_array_poisson, false);
|
||||
ctx.print("Compute for both");
|
||||
computeGradientPsi(state, *s_hat_field->array, grad_array);
|
||||
|
||||
gradient_field_ref->eigen().fill(0);
|
||||
gradient_field_prior_ref->eigen().fill(0);
|
||||
gradient_field_poisson_ref->eigen().fill(0);
|
||||
|
||||
Progress<LOG_INFO_SINGLE> &progress =
|
||||
Console::instance().start_progress<LOG_INFO_SINGLE>(
|
||||
"doing numerical gradient",
|
||||
base_mgr->N0 * base_mgr->N1 * base_mgr->N2_HC, 5);
|
||||
array::copyArray3d(s_hat, *s_hat_field->array);
|
||||
H0 = computeHamiltonian(state, s_hat, false);
|
||||
H0prior = computeHamiltonian_Prior(state, s_hat);
|
||||
H0poisson = computeHamiltonian_Likelihood(state, s_hat, false);
|
||||
for (int n0 = 0; n0 < N0; n0 += step) {
|
||||
for (int n1 = 0; n1 < N1; n1++) {
|
||||
for (int n2 = 0; n2 < base_mgr->N2_HC; n2++) {
|
||||
FFTW_Complex_Array::element backup;
|
||||
std::complex<double> pert_r, pert_i;
|
||||
double n_backup;
|
||||
|
||||
bool oncore = (n0 >= startN0 && n0 < startN0 + localN0);
|
||||
|
||||
if (n0 == 0 && n1 == 0 && n2 == 0)
|
||||
continue;
|
||||
|
||||
if (oncore) {
|
||||
backup = s_hat[n0][n1][n2];
|
||||
n_backup = abs(backup);
|
||||
if (n_backup == 0)
|
||||
n_backup = 1;
|
||||
Console::instance().print<LOG_DEBUG>(
|
||||
format("n_backup=%lg") % n_backup);
|
||||
pert_r = backup + std::complex<double>(n_backup * epsilon, 0);
|
||||
s_hat[n0][n1][n2] = pert_r;
|
||||
}
|
||||
|
||||
H1 = computeHamiltonian(state, s_hat, false);
|
||||
H1prior = computeHamiltonian_Prior(state, s_hat);
|
||||
H1poisson = computeHamiltonian_Likelihood(state, s_hat, false);
|
||||
|
||||
if (oncore) {
|
||||
pert_i = backup + std::complex<double>(0, n_backup * epsilon);
|
||||
s_hat[n0][n1][n2] = pert_i;
|
||||
}
|
||||
|
||||
H2 = computeHamiltonian(state, s_hat, false);
|
||||
H2prior = computeHamiltonian_Prior(state, s_hat);
|
||||
H2poisson = computeHamiltonian_Likelihood(state, s_hat, false);
|
||||
|
||||
if (oncore) {
|
||||
grad_array_ref[n0][n1][n2] =
|
||||
std::complex<double>((H1), (H0)) / (n_backup * epsilon);
|
||||
grad_array_prior_ref[n0][n1][n2] =
|
||||
std::complex<double>((H1prior - H0prior), (H2prior - H0prior)) /
|
||||
(n_backup * epsilon);
|
||||
grad_array_poisson_ref[n0][n1][n2] =
|
||||
std::complex<double>(
|
||||
(H1poisson - H0poisson), (H2poisson - H0poisson)) /
|
||||
(n_backup * epsilon);
|
||||
s_hat[n0][n1][n2] = backup;
|
||||
}
|
||||
|
||||
long n = ((n0 - startN0) * N1 + n1) * base_mgr->N2_HC + n2;
|
||||
progress.update(n);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
progress.destroy();
|
||||
}
|
||||
|
||||
void HMCDensitySampler::generateRandomField(MarkovState &state) {
|
||||
LibLSS::generateRandomField(comm, state);
|
||||
}
|
||||
|
||||
HMCDensitySampler::HamiltonianType
|
||||
HMCDensitySampler::computeHamiltonian(MarkovState &state, bool gradient_next) {
|
||||
auto s_hat_p = base_mgr->allocate_complex_array();
|
||||
auto &s_hat = s_hat_p.get_array();
|
||||
|
||||
array::copyArray3d(s_hat, *state.get<CArrayType>(s_hat_field_name)->array);
|
||||
|
||||
return computeHamiltonian_Likelihood(state, s_hat, gradient_next);
|
||||
}
|
247
extra/hades/libLSS/samplers/rgen/hmc/hmc_density_sampler.hpp
Normal file
247
extra/hades/libLSS/samplers/rgen/hmc/hmc_density_sampler.hpp
Normal file
|
@ -0,0 +1,247 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/samplers/rgen/hmc/hmc_density_sampler.hpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#ifndef __LIBLSS_HMC_DENSITY_HPP
|
||||
#define __LIBLSS_HMC_DENSITY_HPP
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <CosmoTool/fourier/fft/fftw_calls.hpp>
|
||||
#include "libLSS/mpi/generic_mpi.hpp"
|
||||
#include "libLSS/mcmc/global_state.hpp"
|
||||
#include "libLSS/tools/fftw_allocator.hpp"
|
||||
#include "libLSS/samplers/core/markov.hpp"
|
||||
#include "libLSS/samplers/core/random_number.hpp"
|
||||
#include "libLSS/samplers/core/types_samplers.hpp"
|
||||
#include "libLSS/tools/symplectic_integrator.hpp"
|
||||
#include "libLSS/tools/mpi_fftw_helper.hpp"
|
||||
#include "libLSS/samplers/core/gridLikelihoodBase.hpp"
|
||||
#include "libLSS/samplers/rgen/density_sampler.hpp"
|
||||
#include "libLSS/tools/hermiticity_fixup.hpp"
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
namespace HMC_Details {
|
||||
|
||||
template <typename Function>
|
||||
void accumulateHamiltonian(
|
||||
int n0, int n1, int n2, const IArrayType::ArrayType &adjust_array,
|
||||
const Function &f, double &E) {
|
||||
E += f(n0, n1, n2, adjust_array);
|
||||
}
|
||||
|
||||
} // namespace HMC_Details
|
||||
|
||||
namespace HMCOption {
|
||||
using namespace SymplecticOption;
|
||||
};
|
||||
|
||||
class HMCDensitySampler : public GenericDensitySampler {
|
||||
public:
|
||||
typedef ArrayType::ArrayType Array;
|
||||
typedef ArrayType::RefArrayType ArrayRef;
|
||||
typedef CArrayType::ArrayType CArray;
|
||||
typedef CArrayType::RefArrayType CArrayRef;
|
||||
typedef IArrayType::ArrayType IArray;
|
||||
|
||||
typedef double HamiltonianType;
|
||||
|
||||
typedef std::shared_ptr<GridDensityLikelihoodBase<3>> Likelihood_t;
|
||||
typedef std::shared_ptr<BORGForwardModel> Model_t;
|
||||
|
||||
protected:
|
||||
typedef boost::multi_array_ref<IArrayType::ArrayType::element, 1>
|
||||
FlatIntType;
|
||||
typedef FFTW_Manager_3d<double> DFT_Manager;
|
||||
|
||||
MPI_Communication *comm;
|
||||
|
||||
std::shared_ptr<DFT_Manager> base_mgr;
|
||||
Model_t pretransform, posttransform;
|
||||
Likelihood_t likelihood;
|
||||
|
||||
long fourierLocalSize;
|
||||
FCalls::plan_type analysis_plan, synthesis_plan;
|
||||
size_t N0, N1, N2;
|
||||
size_t startN0, localN0, endN0;
|
||||
double L0, L1, L2, volume, volNorm;
|
||||
int Ncat;
|
||||
SLong *attempt_field, *accept_field;
|
||||
ScalarStateElement<int> *bad_sample;
|
||||
|
||||
ArrayType *mass_field, *s_field;
|
||||
IArrayType *adjust_field;
|
||||
CArrayType *momentum_field, *s_hat_field;
|
||||
|
||||
int maxTime, lastTime;
|
||||
double lastEpsilon, maxEpsilon;
|
||||
SymplecticIntegrators symp;
|
||||
|
||||
boost::optional<std::string> phaseFilename;
|
||||
std::string dataName;
|
||||
|
||||
std::string momentum_field_name, s_hat_field_name, s_field_name,
|
||||
hades_attempt_count_name, hades_accept_count_name, hmc_bad_sample_name,
|
||||
hmc_force_save_final_name, hmc_Elh_name, hmc_Eprior_name;
|
||||
|
||||
void setupNames(std::string const &prefix);
|
||||
|
||||
void doSympInt(MarkovState &state, CArrayRef &s_hat);
|
||||
HamiltonianType computeHamiltonian(
|
||||
MarkovState &state, CArrayRef const &s_hat, bool final_call);
|
||||
HamiltonianType
|
||||
computeHamiltonian_Prior(MarkovState &state, CArrayRef const &s_hat);
|
||||
HamiltonianType computeHamiltonian_Kinetic();
|
||||
HamiltonianType computeHamiltonian_Likelihood(
|
||||
MarkovState &state, CArrayRef const &s_hat, bool final_call);
|
||||
void initializeMomenta(MarkovState &state);
|
||||
void computeGradientPsi(
|
||||
MarkovState &state, CArrayRef const &s, CArrayRef &grad_array);
|
||||
void computeGradientPsi_Prior(
|
||||
MarkovState &state, CArrayRef const &s, CArrayRef &grad_array);
|
||||
|
||||
void updateMass(MarkovState &state);
|
||||
|
||||
void computeGradientPsi_Likelihood(
|
||||
MarkovState &state, CArrayRef const &s, CArrayRef &grad_array,
|
||||
bool accumulate);
|
||||
|
||||
typedef boost::multi_array<double, 2> IntegratorCoefficients;
|
||||
typedef HMCOption::IntegratorScheme IntegratorScheme;
|
||||
|
||||
IntegratorScheme current_scheme;
|
||||
std::shared_ptr<Hermiticity_fixer<double, 3>> fixer;
|
||||
double k_max;
|
||||
|
||||
auto free_phase_mask();
|
||||
|
||||
public:
|
||||
HMCDensitySampler(
|
||||
MPI_Communication *comm, Likelihood_t likelihood, double k_max_ = 1000,
|
||||
std::string const &prefix = std::string());
|
||||
virtual ~HMCDensitySampler();
|
||||
|
||||
void
|
||||
setPhaseFile(std::string const &filename, std::string const &objectName) {
|
||||
phaseFilename = filename;
|
||||
dataName = objectName;
|
||||
}
|
||||
|
||||
virtual void generateMockData(MarkovState &state);
|
||||
void generateRandomField(MarkovState &state);
|
||||
|
||||
void setTransforms(Model_t pretransform_, Model_t posttransform_);
|
||||
void setIntegratorScheme(IntegratorScheme scheme);
|
||||
|
||||
double computeHamiltonian(MarkovState &state, bool gradient_next = false);
|
||||
|
||||
void restore(MarkovState &state);
|
||||
void initialize(MarkovState &state);
|
||||
|
||||
virtual void sample(MarkovState &state);
|
||||
|
||||
void checkGradient(MarkovState &state, int step = 10);
|
||||
void checkGradientReal(MarkovState &state, int step = 10);
|
||||
|
||||
void setMaxEpsilon(double eps) { this->maxEpsilon = eps; }
|
||||
void setMaxTimeSteps(int Ntime) { this->maxTime = Ntime; }
|
||||
|
||||
// Codelet generic functions
|
||||
template <typename Function>
|
||||
void codeletGeneral(IArray &adjust_array, Function codelet) {
|
||||
size_t N2_HC = base_mgr->N2_HC;
|
||||
#pragma omp parallel for schedule(static) collapse(3)
|
||||
for (size_t n0 = startN0; n0 < endN0; n0++) {
|
||||
for (size_t n1 = 0; n1 < N1; n1++) {
|
||||
for (size_t n2 = 1; n2 < N2_HC - 1; n2++) {
|
||||
codelet(n0, n1, n2, adjust_array);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
codeletNyquist(0, adjust_array, codelet);
|
||||
codeletNyquist(base_mgr->N2_HC - 1, adjust_array, codelet);
|
||||
}
|
||||
|
||||
template <typename Function>
|
||||
void codeletNyquist(int n2, const IArray &adjust_array, Function codelet) {
|
||||
size_t N0end = std::min(endN0, N0);
|
||||
size_t N0start = std::max(startN0, size_t(0));
|
||||
#pragma omp parallel for schedule(static) collapse(2)
|
||||
for (size_t n0 = N0start; n0 < N0end; n0++) {
|
||||
for (size_t n1 = 0; n1 < N1; n1++) {
|
||||
codelet(n0, n1, n2, adjust_array);
|
||||
}
|
||||
}
|
||||
|
||||
if (startN0 == 0 && localN0 > 0)
|
||||
for (size_t n1 = 0; n1 < N1; n1++)
|
||||
codelet(0, n1, n2, adjust_array);
|
||||
|
||||
if (startN0 <= N0 / 2 && startN0 + localN0 > N0 / 2)
|
||||
for (size_t n1 = 0; n1 < N1; n1++)
|
||||
codelet(N0 / 2, n1, n2, adjust_array);
|
||||
}
|
||||
|
||||
template <typename Function>
|
||||
HamiltonianType codeletNyquistHamiltonian(
|
||||
int n2, const IArray &adjust_array, Function codelet) {
|
||||
size_t N0end = std::min(endN0, N0);
|
||||
size_t N0start = std::max(startN0, size_t(0));
|
||||
HamiltonianType E = 0;
|
||||
|
||||
#pragma omp parallel for schedule(static) collapse(2) reduction(+ : E)
|
||||
for (size_t n0 = N0start; n0 < N0end; n0++) {
|
||||
for (size_t n1 = 0; n1 < N1; n1++) {
|
||||
E += codelet(n0, n1, n2, adjust_array);
|
||||
}
|
||||
}
|
||||
|
||||
if (startN0 == 0 && localN0 > 0)
|
||||
#pragma omp parallel for schedule(static) reduction(+ : E)
|
||||
for (size_t n1 = 0; n1 < N1; n1++)
|
||||
E += codelet(0, n1, n2, adjust_array);
|
||||
|
||||
if (startN0 <= N0 / 2 && startN0 + localN0 > N0 / 2)
|
||||
#pragma omp parallel for schedule(static) reduction(+ : E)
|
||||
for (size_t n1 = 0; n1 < N1; n1++)
|
||||
E += codelet(N0 / 2, n1, n2, adjust_array);
|
||||
|
||||
return E;
|
||||
}
|
||||
|
||||
template <typename Function>
|
||||
HamiltonianType
|
||||
codeletGeneralHamiltonian(const IArray &adjust_array, Function codelet) {
|
||||
HamiltonianType E = 0;
|
||||
namespace ph = std::placeholders;
|
||||
size_t N2_HC = base_mgr->N2_HC;
|
||||
|
||||
#pragma omp parallel for schedule(static) collapse(3) reduction(+ : E)
|
||||
for (size_t n0 = startN0; n0 < startN0 + localN0; n0++) {
|
||||
for (size_t n1 = 0; n1 < N1; n1++) {
|
||||
for (size_t n2 = 1; n2 < N2_HC - 1; n2++) {
|
||||
E += codelet(n0, n1, n2, adjust_array);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
E += codeletNyquistHamiltonian(
|
||||
0, adjust_array, std::bind(codelet, ph::_1, ph::_2, ph::_3, ph::_4));
|
||||
E += codeletNyquistHamiltonian(
|
||||
base_mgr->N2_HC - 1, adjust_array,
|
||||
std::bind(codelet, ph::_1, ph::_2, ph::_3, ph::_4));
|
||||
|
||||
return E;
|
||||
}
|
||||
};
|
||||
|
||||
}; // namespace LibLSS
|
||||
|
||||
#endif
|
13
extra/hades/libLSS/samplers/rgen/hmc/hmc_kinetic.tcc
Normal file
13
extra/hades/libLSS/samplers/rgen/hmc/hmc_kinetic.tcc
Normal file
|
@ -0,0 +1,13 @@
|
|||
HMCDensitySampler::HamiltonianType
|
||||
HMCDensitySampler::computeHamiltonian_Kinetic() {
|
||||
CArrayType::ArrayType &momentum_array = *momentum_field->array;
|
||||
|
||||
|
||||
auto m_w = fwrap(momentum_array);
|
||||
auto r = std::real(m_w);
|
||||
auto i = std::imag(m_w);
|
||||
|
||||
double Ekin = (r*r+i*i).sum() / (2.);
|
||||
comm->all_reduce_t(MPI_IN_PLACE, &Ekin, 1, MPI_SUM);
|
||||
return Ekin*0.5;
|
||||
}
|
17
extra/hades/libLSS/samplers/rgen/hmc/hmc_prior.tcc
Normal file
17
extra/hades/libLSS/samplers/rgen/hmc/hmc_prior.tcc
Normal file
|
@ -0,0 +1,17 @@
|
|||
HMCDensitySampler::HamiltonianType HMCDensitySampler::computeHamiltonian_Prior(
|
||||
MarkovState &state, CArrayRef const &s_array) {
|
||||
auto s_w = fwrap(s_array);
|
||||
auto r = std::real(s_w);
|
||||
auto i = std::imag(s_w);
|
||||
double Eprior = (r*r+i*i).sum() / (0.5);
|
||||
|
||||
comm->all_reduce_t(MPI_IN_PLACE, &Eprior, 1, MPI_SUM);
|
||||
|
||||
return Eprior*0.5;
|
||||
}
|
||||
|
||||
void HMCDensitySampler::computeGradientPsi_Prior(
|
||||
MarkovState &state, CArrayRef const &s, CArrayRef &grad_array) {
|
||||
ConsoleContext<LOG_DEBUG> ctx("gradient psi prior");
|
||||
fwrap(grad_array) = fwrap(s)*2.0;
|
||||
}
|
1031
extra/hades/libLSS/samplers/rgen/nuts/nuts_density_sampler.cpp
Normal file
1031
extra/hades/libLSS/samplers/rgen/nuts/nuts_density_sampler.cpp
Normal file
File diff suppressed because it is too large
Load diff
244
extra/hades/libLSS/samplers/rgen/nuts/nuts_density_sampler.hpp
Normal file
244
extra/hades/libLSS/samplers/rgen/nuts/nuts_density_sampler.hpp
Normal file
|
@ -0,0 +1,244 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/samplers/rgen/nuts/nuts_density_sampler.hpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#ifndef __LIBLSS_NUTS_DENSITY_HPP
|
||||
#define __LIBLSS_NUTS_DENSITY_HPP
|
||||
|
||||
#include <CosmoTool/fourier/fft/fftw_calls.hpp>
|
||||
#include "libLSS/mpi/generic_mpi.hpp"
|
||||
#include "libLSS/mcmc/global_state.hpp"
|
||||
#include "libLSS/tools/fftw_allocator.hpp"
|
||||
#include "libLSS/samplers/core/markov.hpp"
|
||||
#include "libLSS/samplers/core/random_number.hpp"
|
||||
#include "libLSS/samplers/core/types_samplers.hpp"
|
||||
#include "libLSS/tools/symplectic_integrator.hpp"
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
namespace NUTS_step {
|
||||
|
||||
//1) store some constants that are required during recursion
|
||||
struct nuts_util {
|
||||
// Constants through each recursion
|
||||
double log_u;
|
||||
double H0;
|
||||
int sign;
|
||||
|
||||
// Aggregators through each recursion
|
||||
int n_tree;
|
||||
double sum_prob;
|
||||
bool criterion;
|
||||
};
|
||||
} // namespace NUTS_step
|
||||
|
||||
namespace NUTS_Details {
|
||||
|
||||
template <typename Function>
|
||||
void accumulateHamiltonian(
|
||||
int n0, int n1, int n2, const IArrayType::ArrayType &adjust_array,
|
||||
const Function &f, double &E) {
|
||||
E += f(n0, n1, n2, adjust_array);
|
||||
}
|
||||
|
||||
} // namespace NUTS_Details
|
||||
|
||||
namespace NUTSOption {
|
||||
using namespace SymplecticOption;
|
||||
};
|
||||
|
||||
class NUTSDensitySampler : public MarkovSampler {
|
||||
public:
|
||||
typedef ArrayType::ArrayType Array;
|
||||
typedef ArrayType::RefArrayType ArrayRef;
|
||||
typedef CArrayType::ArrayType CArray;
|
||||
typedef CArrayType::RefArrayType CArrayRef;
|
||||
typedef IArrayType::ArrayType IArray;
|
||||
|
||||
typedef double HamiltonianType;
|
||||
|
||||
protected:
|
||||
typedef boost::multi_array_ref<IArrayType::ArrayType::element, 1>
|
||||
FlatIntType;
|
||||
|
||||
MPI_Communication *comm;
|
||||
|
||||
long fourierLocalSize;
|
||||
FCalls::plan_type analysis_plan, synthesis_plan;
|
||||
FlatIntType *flat_key;
|
||||
long N0, N1, N2, N2real, Ntot, Ntot_k, N2_HC;
|
||||
long startN0, localN0, localNtot, localNtot_k;
|
||||
double L0, L1, L2, volume, volNorm;
|
||||
int Ncat;
|
||||
SLong *attempt_field, *accept_field;
|
||||
|
||||
FFTW_Allocator<double> allocator_real;
|
||||
FFTW_Allocator<std::complex<double>> allocator_complex;
|
||||
|
||||
ArrayType *sqrt_mass_field, *s_field;
|
||||
IArrayType *adjust_field;
|
||||
CArrayType *momentum_field, *s_hat_field;
|
||||
FFTW_Real_Array *tmp_real_field;
|
||||
FFTW_Complex_Array *tmp_complex_field;
|
||||
boost::multi_array<int, 1> peer;
|
||||
|
||||
int maxTime;
|
||||
double maxEpsilon;
|
||||
SymplecticIntegrators symp;
|
||||
|
||||
void doSympInt(MarkovState &state, MarkovState &state, CArray &s_hat);
|
||||
void updateMomentum(MarkovState &state, double dt, CArrayRef &force);
|
||||
void updatePosition(double dt, CArray &s_hat);
|
||||
HamiltonianType computeHamiltonian(
|
||||
MarkovState &state, MarkovState &state, CArray &s_hat, bool final_call);
|
||||
HamiltonianType computeHamiltonian_Prior(
|
||||
MarkovState &state, MarkovState &state, CArray &s_hat);
|
||||
HamiltonianType computeHamiltonian_Kinetic();
|
||||
void initializeMomenta(MarkovState &state);
|
||||
void computeGradientPsi(
|
||||
MarkovState &state, MarkovState &state, CArray &s,
|
||||
CArrayRef &grad_array);
|
||||
void computeGradientPsi_Prior(
|
||||
MarkovState &state, MarkovState &state, CArray &s,
|
||||
CArrayRef &grad_array);
|
||||
|
||||
void updateMass(MarkovState &state, MarkovState &state);
|
||||
void Hermiticity_fixup(CArrayType::ArrayType &a);
|
||||
void Hermiticity_fixup_plane(int Nplane, CArrayType::ArrayType &a);
|
||||
|
||||
virtual void computeGradientPsi_Likelihood(
|
||||
MarkovState &state, MarkovState &state, CArray &s,
|
||||
CArrayRef &grad_array, bool accumulate) = 0;
|
||||
virtual HamiltonianType computeHamiltonian_Likelihood(
|
||||
MarkovState &state, MarkovState &state, CArray &s_hat,
|
||||
bool final_call) = 0;
|
||||
|
||||
virtual void
|
||||
saveAuxiliaryAcceptedFields(MarkovState &state, MarkovState &state) {}
|
||||
|
||||
void computeFourierSpace_GradientPsi(
|
||||
ArrayRef &real_gradient, CArrayRef &grad_array, bool accumulate);
|
||||
|
||||
typedef boost::multi_array<double, 2> IntegratorCoefficients;
|
||||
typedef NUTSOption::IntegratorScheme IntegratorScheme;
|
||||
|
||||
IntegratorCoefficients I_coefs;
|
||||
|
||||
public:
|
||||
NUTSDensitySampler(
|
||||
MPI_Communication *comm, int maxTimeIntegration, double maxEpsilon);
|
||||
virtual ~NUTSDensitySampler();
|
||||
|
||||
virtual void generateMockData(MarkovState &state, MarkovState &state) = 0;
|
||||
void generateRandomField(MarkovState &state, MarkovState &state);
|
||||
|
||||
void setIntegratorScheme(IntegratorScheme scheme);
|
||||
// void setIntegratorScheme(const IntegratorCoefficients& coefs);
|
||||
|
||||
HamiltonianType computeHamiltonian(MarkovState &state, MarkovState &state);
|
||||
|
||||
void restore_NUTS(MarkovState &state, MarkovState &state);
|
||||
void initialize_NUTS(MarkovState &state, MarkovState &state);
|
||||
|
||||
virtual void sample(MarkovState &state, MarkovState &state);
|
||||
|
||||
void checkGradient(MarkovState &state, MarkovState &state, int step = 10);
|
||||
void checkHermiticityFixup(MarkovState &state, MarkovState &state);
|
||||
|
||||
void setMaxEpsilon(double eps) { this->maxEpsilon = eps; }
|
||||
void setMaxTimeSteps(int Ntime) { this->maxTime = Ntime; }
|
||||
|
||||
// Codelet generic functions
|
||||
template <typename Function>
|
||||
void codeletGeneral(IArray &adjust_array, Function codelet) {
|
||||
#pragma omp parallel for schedule(static)
|
||||
for (int n0 = startN0; n0 < startN0 + localN0; n0++) {
|
||||
for (int n1 = 0; n1 < N1; n1++) {
|
||||
for (int n2 = 1; n2 < N2_HC - 1; n2++) {
|
||||
codelet(n0, n1, n2, adjust_array);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
codeletNyquist(0, adjust_array, codelet);
|
||||
codeletNyquist(N2_HC - 1, adjust_array, codelet);
|
||||
}
|
||||
|
||||
template <typename Function>
|
||||
void codeletNyquist(int n2, const IArray &adjust_array, Function codelet) {
|
||||
int N0end = std::min(startN0 + localN0, N0 / 2);
|
||||
int N0start = std::max(startN0, 1L);
|
||||
#pragma omp parallel for schedule(static)
|
||||
for (int n0 = N0start; n0 < N0end; n0++) {
|
||||
for (int n1 = 0; n1 < N1; n1++) {
|
||||
codelet(n0, n1, n2, adjust_array);
|
||||
}
|
||||
}
|
||||
|
||||
if (startN0 == 0 && localN0 > 0)
|
||||
for (int n1 = 0; n1 <= N1 / 2; n1++)
|
||||
codelet(0, n1, n2, adjust_array);
|
||||
|
||||
if (startN0 <= N0 / 2 && startN0 + localN0 > N0 / 2)
|
||||
for (int n1 = 0; n1 <= N1 / 2; n1++)
|
||||
codelet(N0 / 2, n1, n2, adjust_array);
|
||||
}
|
||||
|
||||
template <typename Function>
|
||||
HamiltonianType codeletNyquistHamiltonian(
|
||||
int n2, const IArray &adjust_array, Function codelet) {
|
||||
int N0end = std::min(startN0 + localN0, N0 / 2);
|
||||
int N0start = std::max(startN0, 1L);
|
||||
HamiltonianType E = 0;
|
||||
|
||||
#pragma omp parallel for schedule(static) reduction(+ : E)
|
||||
for (int n0 = N0start; n0 < N0end; n0++) {
|
||||
for (int n1 = 0; n1 < N1; n1++) {
|
||||
E += codelet(n0, n1, n2, adjust_array);
|
||||
}
|
||||
}
|
||||
|
||||
if (startN0 == 0 && localN0 > 0)
|
||||
#pragma omp parallel for schedule(static) reduction(+ : E)
|
||||
for (int n1 = 0; n1 <= N1 / 2; n1++)
|
||||
E += codelet(0, n1, n2, adjust_array);
|
||||
|
||||
if (startN0 <= N0 / 2 && startN0 + localN0 > N0 / 2)
|
||||
#pragma omp parallel for schedule(static) reduction(+ : E)
|
||||
for (int n1 = 0; n1 <= N1 / 2; n1++)
|
||||
E += codelet(N0 / 2, n1, n2, adjust_array);
|
||||
|
||||
return E;
|
||||
}
|
||||
|
||||
template <typename Function>
|
||||
HamiltonianType
|
||||
codeletGeneralHamiltonian(const IArray &adjust_array, Function codelet) {
|
||||
HamiltonianType E = 0;
|
||||
|
||||
#pragma omp parallel for schedule(static) reduction(+ : E)
|
||||
for (int n0 = startN0; n0 < startN0 + localN0; n0++) {
|
||||
for (int n1 = 0; n1 < N1; n1++) {
|
||||
for (int n2 = 1; n2 < N2_HC - 1; n2++) {
|
||||
E += codelet(n0, n1, n2, adjust_array);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
E += codeletNyquistHamiltonian(
|
||||
0, adjust_array, boost::bind(codelet, _1, _2, _3, _4));
|
||||
E += codeletNyquistHamiltonian(
|
||||
N2_HC - 1, adjust_array, boost::bind(codelet, _1, _2, _3, _4));
|
||||
|
||||
return E;
|
||||
}
|
||||
};
|
||||
|
||||
}; // namespace LibLSS
|
||||
|
||||
#endif
|
34
extra/hades/libLSS/samplers/rgen/nuts/nuts_kinetic.tcc
Normal file
34
extra/hades/libLSS/samplers/rgen/nuts/nuts_kinetic.tcc
Normal file
|
@ -0,0 +1,34 @@
|
|||
inline
|
||||
HMCDensitySampler::HamiltonianType codeletHamiltonianKinetic(int n0, int n1, int n2,
|
||||
const IArrayType::ArrayType& adjust_array,
|
||||
const ArrayType::ArrayType& sqM,
|
||||
const CArrayType::ArrayType& momentum_array
|
||||
)
|
||||
{
|
||||
const CArrayType::ArrayType::element& e = momentum_array[n0][n1][n2];
|
||||
int adjust = adjust_array[n0][n1][n2];
|
||||
double Amplitude = (sqM[n0][n1][n2]);
|
||||
if (Amplitude == 0 || adjust == 0)
|
||||
return 0;
|
||||
|
||||
double Ekin = (square(e.real()) + square(e.imag()))/Amplitude;
|
||||
if (isnan(Ekin)) {
|
||||
error_helper<ErrorBadState>(format("NaN in kinetic hamiltonian n0=%d n1=%d n2=%d Mass=%lg") % n0 % n1 % n2 % Amplitude);
|
||||
}
|
||||
|
||||
return Ekin;
|
||||
}
|
||||
|
||||
HMCDensitySampler::HamiltonianType HMCDensitySampler::computeHamiltonian_Kinetic()
|
||||
{
|
||||
CArrayType::ArrayType& momentum_array = *momentum_field->array;
|
||||
ArrayType::ArrayType& sqrt_mass = *sqrt_mass_field->array;
|
||||
IArrayType::ArrayType& adjust_array = *adjust_field->array;
|
||||
|
||||
double Ekin = codeletGeneralHamiltonian(adjust_array,
|
||||
boost::bind(codeletHamiltonianKinetic, _1, _2, _3, _4, boost::cref(sqrt_mass), boost::cref(momentum_array)));
|
||||
|
||||
Ekin *= 0.5;
|
||||
comm->all_reduce_t(MPI_IN_PLACE, &Ekin, 1, MPI_SUM);
|
||||
return Ekin;
|
||||
}
|
78
extra/hades/libLSS/samplers/rgen/nuts/nuts_prior.tcc
Normal file
78
extra/hades/libLSS/samplers/rgen/nuts/nuts_prior.tcc
Normal file
|
@ -0,0 +1,78 @@
|
|||
inline
|
||||
HMCDensitySampler::HamiltonianType codeletHamiltonianPrior(int n0, int n1, int n2,
|
||||
const IArrayType::ArrayType& adjust_array,
|
||||
const ArrayType1d::ArrayType& pspec,
|
||||
const IArrayType::ArrayType& key_array,
|
||||
const CArrayType::ArrayType& s_array
|
||||
)
|
||||
{
|
||||
const CArrayType::ArrayType::element& e = s_array[n0][n1][n2];
|
||||
long powerPosition = key_array[n0][n1][n2];
|
||||
double Amplitude = pspec.data()[powerPosition];
|
||||
double adjust = adjust_array[n0][n1][n2];
|
||||
|
||||
if (Amplitude == 0)
|
||||
return 0;
|
||||
|
||||
double Eprior = adjust * (square(e.real()) + square(e.imag()))/Amplitude;
|
||||
|
||||
if (isnan(Eprior)) {
|
||||
error_helper<ErrorBadState>("NaN in hamiltonian");
|
||||
}
|
||||
|
||||
return Eprior;
|
||||
}
|
||||
|
||||
inline
|
||||
HMCDensitySampler::HamiltonianType codeletGradientPrior(int n0, int n1, int n2,
|
||||
const IArrayType::ArrayType& adjust_array,
|
||||
double volume,
|
||||
const ArrayType1d::ArrayType& pspec,
|
||||
const IArrayType::ArrayType& key_array,
|
||||
const CArrayType::ArrayType& s,
|
||||
CArrayType::RefArrayType& grad_array
|
||||
)
|
||||
{
|
||||
const CArrayType::ArrayType::element& e = s[n0][n1][n2];
|
||||
long powerPosition = key_array[n0][n1][n2];
|
||||
double Amplitude = pspec.data()[powerPosition] * volume;
|
||||
CArrayType::ArrayType::element& gradient = grad_array[n0][n1][n2];
|
||||
double adjust = adjust_array[n0][n1][n2];
|
||||
|
||||
if (Amplitude == 0 || adjust == 0)
|
||||
gradient = 0;
|
||||
else
|
||||
gradient = adjust * e / Amplitude;
|
||||
}
|
||||
|
||||
|
||||
HMCDensitySampler::HamiltonianType HMCDensitySampler::computeHamiltonian_Prior(MarkovState& state, CArray& s_array)
|
||||
{
|
||||
ArrayType1d::ArrayType& pspec = *state.get<ArrayType1d>("powerspectrum")->array;
|
||||
IArrayType::ArrayType& adjust_array = *state.get<IArrayType>("adjust_mode_multiplier")->array;
|
||||
IArrayType::ArrayType& key_array = *state.get<IArrayType>("k_keys")->array;
|
||||
|
||||
double Eprior = 0;
|
||||
|
||||
Eprior = codeletGeneralHamiltonian(adjust_array,
|
||||
boost::bind(codeletHamiltonianPrior, _1, _2, _3, _4, boost::cref(pspec), boost::cref(key_array), boost::cref(s_array)));
|
||||
|
||||
Eprior *= 0.5/volume;
|
||||
comm->all_reduce_t(MPI_IN_PLACE, &Eprior, 1, MPI_SUM);
|
||||
|
||||
return Eprior;
|
||||
}
|
||||
|
||||
|
||||
void HMCDensitySampler::computeGradientPsi_Prior(MarkovState& state, CArray& s, CArrayRef& grad_array)
|
||||
{
|
||||
IArrayType::ArrayType& adjust_array = *adjust_field->array;
|
||||
IArrayType::ArrayType& key_array = *state.get<IArrayType>("k_keys")->array;
|
||||
ArrayType1d::ArrayType& pspec = *state.get<ArrayType1d>("powerspectrum")->array;
|
||||
|
||||
for (long n = 0 ; n < grad_array.num_elements(); n++)
|
||||
grad_array.data()[n] = 0;
|
||||
|
||||
codeletGeneral(adjust_array,
|
||||
boost::bind(&codeletGradientPrior, _1, _2, _3, _4, volume, boost::cref(pspec), boost::cref(key_array), boost::cref(s), boost::ref(grad_array)));
|
||||
}
|
18
extra/hades/libLSS/tests/data/gen_gradient_data.py
Normal file
18
extra/hades/libLSS/tests/data/gen_gradient_data.py
Normal file
|
@ -0,0 +1,18 @@
|
|||
#+
|
||||
# ARES/HADES/BORG Package -- ./extra/hades/libLSS/tests/data/gen_gradient_data.py
|
||||
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
#
|
||||
# Additional contributions from:
|
||||
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
#
|
||||
#+
|
||||
import h5py as h5
|
||||
import numpy as np
|
||||
|
||||
N=32
|
||||
|
||||
numbers = np.random.normal(size=(N,N,N))
|
||||
with h5.File("gradient_numbers.h5", mode="w") as f:
|
||||
f['/random'] = numbers
|
||||
f['/random_fft'] = np.fft.rfftn(numbers)
|
BIN
extra/hades/libLSS/tests/data/gradient_numbers.h5
Normal file
BIN
extra/hades/libLSS/tests/data/gradient_numbers.h5
Normal file
Binary file not shown.
288
extra/hades/libLSS/tests/generic_gradient_benchmark.cpp
Normal file
288
extra/hades/libLSS/tests/generic_gradient_benchmark.cpp
Normal file
|
@ -0,0 +1,288 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/tests/generic_gradient_benchmark.cpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#include "libLSS/tools/static_init.hpp"
|
||||
#include "libLSS/samplers/core/types_samplers.hpp"
|
||||
#include "libLSS/samplers/core/powerspec_tools.hpp"
|
||||
#include "libLSS/mcmc/global_state.hpp"
|
||||
#include "libLSS/samplers/rgen/gsl_random_number.hpp"
|
||||
#include "libLSS/samplers/rgen/hmc/hmc_density_sampler.hpp"
|
||||
#include "libLSS/tools/array_tools.hpp"
|
||||
#include "libLSS/physics/cosmo.hpp"
|
||||
#include "libLSS/physics/forward_model.hpp"
|
||||
#include "libLSS/tools/mpi_fftw_helper.hpp"
|
||||
#include <CosmoTool/cosmopower.hpp>
|
||||
#include <CosmoTool/algo.hpp>
|
||||
#include <boost/format.hpp>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include "libLSS/tests/setup_hades_test_run.hpp"
|
||||
|
||||
using namespace LibLSS;
|
||||
using boost::format;
|
||||
using CosmoTool::square;
|
||||
using std::endl;
|
||||
using std::ios;
|
||||
using std::ofstream;
|
||||
using std::string;
|
||||
|
||||
typedef boost::multi_array_types::extent_range range;
|
||||
|
||||
typedef RandomNumberThreaded<GSL_RandomNumber> RGenType;
|
||||
|
||||
static constexpr size_t BORG_RESOLUTION = 128; //128;
|
||||
|
||||
#ifndef BORG_SUPERSAMPLING
|
||||
# define BORG_SUPERSAMPLING 1
|
||||
#endif
|
||||
|
||||
#ifndef BORG_FORCESAMPLING
|
||||
# define BORG_FORCESAMPLING 1
|
||||
#endif
|
||||
|
||||
namespace {
|
||||
#if defined(ARES_MPI_FFTW)
|
||||
RegisterStaticInit reg0(fftw_mpi_init, fftw_mpi_cleanup, 9, "MPI/FFTW");
|
||||
#endif
|
||||
RegisterStaticInit reg1(
|
||||
CosmoTool::init_fftw_wisdom, CosmoTool::save_fftw_wisdom, 10,
|
||||
"FFTW/WISDOM");
|
||||
#if !defined(ARES_MPI_FFTW) && \
|
||||
defined( \
|
||||
_OPENMP) // Do not use MPI and Threaded FFTW at the same time for the moment.
|
||||
RegisterStaticInit
|
||||
reg2(fftw_init_threads, fftw_cleanup_threads, 11, "FFTW/THREADS");
|
||||
#endif
|
||||
}; // namespace
|
||||
|
||||
class DummyPowerSpectrum : public PowerSpectrumSampler_Base {
|
||||
public:
|
||||
DummyPowerSpectrum(MPI_Communication *comm)
|
||||
: PowerSpectrumSampler_Base(comm) {}
|
||||
|
||||
virtual void initialize(MarkovState &state) { initialize_base(state); }
|
||||
virtual void restore(MarkovState &state) { restore_base(state); }
|
||||
|
||||
virtual void sample(MarkovState &state) {}
|
||||
};
|
||||
|
||||
void createCosmologicalPowerSpectrum(
|
||||
MarkovState &state, CosmologicalParameters &cosmo_params,
|
||||
double adjust = 1) {
|
||||
double h;
|
||||
CosmoTool::CosmoPower cpower;
|
||||
|
||||
h = cpower.h = cosmo_params.h;
|
||||
cpower.OMEGA_B = cosmo_params.omega_b;
|
||||
cpower.OMEGA_C = cosmo_params.omega_m - cosmo_params.omega_b;
|
||||
cpower.SIGMA8 = cosmo_params.sigma8;
|
||||
cpower.setFunction(CosmoTool::CosmoPower::HU_WIGGLES);
|
||||
cpower.updateCosmology();
|
||||
cpower.normalize();
|
||||
|
||||
ArrayType1d::ArrayType &k = *state.get<ArrayType1d>("k_modes")->array;
|
||||
ArrayType1d::ArrayType &Pk = *state.get<ArrayType1d>("powerspectrum")->array;
|
||||
for (long i = 0; i < k.num_elements(); i++) {
|
||||
Pk[i] = cpower.power(k[i] * h) * h * h * h * adjust;
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef MIN_RANK
|
||||
# define MIN_RANK 1
|
||||
#endif
|
||||
|
||||
static const int INIT_RANK = MIN_RANK;
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
MPI_Communication *comm_base = setupMPI(argc, argv);
|
||||
StaticInit::execute();
|
||||
Console &cons = Console::instance();
|
||||
cons.setVerboseLevel<LOG_DEBUG>();
|
||||
int rankThreshold = INIT_RANK;
|
||||
int k = 0, kmax;
|
||||
using boost::chrono::system_clock;
|
||||
|
||||
cons.outputToFile(
|
||||
boost::str(format("gradient_bench_rank_%d.txt") % comm_base->rank()));
|
||||
|
||||
for (kmax = 0; rankThreshold < comm_base->size(); kmax++)
|
||||
rankThreshold *= 2;
|
||||
kmax++;
|
||||
|
||||
rankThreshold = INIT_RANK;
|
||||
|
||||
cons.print<LOG_DEBUG>(format("kmax = %d") % kmax);
|
||||
|
||||
while (k < kmax) {
|
||||
int color = (comm_base->rank() < rankThreshold) ? 0 : 1;
|
||||
// int color;
|
||||
// int groupper = comm_base->size() / rankThreshold;
|
||||
// if (groupper > 1)
|
||||
// color =
|
||||
// ((comm_base->rank() % groupper) == 0) ? 0 : 1;
|
||||
// else
|
||||
// color = comm_base->rank() == 0 ? 0 : 1;
|
||||
cons.format<LOG_DEBUG>("Color is %d", color);
|
||||
MPI_Communication *comm = comm_base->split(color, comm_base->rank());
|
||||
boost::chrono::system_clock::time_point start_context_forward,
|
||||
start_context_adjoint;
|
||||
boost::chrono::duration<double> duration_forward, duration_adjoint;
|
||||
|
||||
if (color == 1) {
|
||||
comm_base->barrier();
|
||||
rankThreshold = std::min(comm_base->size(), 2 * rankThreshold);
|
||||
k++;
|
||||
delete comm;
|
||||
continue;
|
||||
}
|
||||
|
||||
{
|
||||
MarkovState state;
|
||||
RGenType randgen(-1);
|
||||
int M;
|
||||
BoxModel box;
|
||||
BoxModel box2;
|
||||
LikelihoodInfo info;
|
||||
|
||||
randgen.seed(2348098);
|
||||
|
||||
state.newElement(
|
||||
"random_generator", new RandomStateElement<RandomNumber>(&randgen));
|
||||
|
||||
LibLSS_test::setup_hades_test_run(comm, BORG_RESOLUTION, 600, state);
|
||||
LibLSS_test::setup_box(state, box);
|
||||
|
||||
state.newScalar<long>("Ndata0", box.N0 / DOWNGRADE_DATA);
|
||||
state.newScalar<long>("Ndata1", box.N1 / DOWNGRADE_DATA);
|
||||
state.newScalar<long>("Ndata2", box.N2 / DOWNGRADE_DATA);
|
||||
// FIXME!
|
||||
state.newScalar<long>("localNdata0", state.getScalar<long>("startN0"));
|
||||
state.newScalar<long>(
|
||||
"localNdata1",
|
||||
state.getScalar<long>("startN0") + state.getScalar<long>("localN0"));
|
||||
state.newScalar<long>("localNdata2", 0);
|
||||
state.newScalar<long>("localNdata3", box.N1 / DOWNGRADE_DATA);
|
||||
state.newScalar<long>("localNdata4", 0);
|
||||
state.newScalar<long>("localNdata5", box.N2 / DOWNGRADE_DATA);
|
||||
|
||||
LibLSS_test::setup_likelihood_info(state, info, comm);
|
||||
|
||||
box2 = box;
|
||||
box2.N0 *= 2;
|
||||
box2.N1 *= 2;
|
||||
box2.N2 *= 2;
|
||||
|
||||
DummyPowerSpectrum dummy_p(comm);
|
||||
HMCDensitySampler::Likelihood_t likelihood = makeLikelihood(info);
|
||||
|
||||
auto model = buildModel(comm, state, box, box2);
|
||||
BorgModelElement *model_element = new BorgModelElement();
|
||||
|
||||
model_element->obj = model;
|
||||
state.newElement("BORG_model", model_element);
|
||||
|
||||
HMCDensitySampler hmc(comm, likelihood);
|
||||
|
||||
// Initialize (data,s)->t sampler
|
||||
dummy_p.init_markov(state);
|
||||
hmc.init_markov(state);
|
||||
|
||||
createCosmologicalPowerSpectrum(
|
||||
state, state.getScalar<CosmologicalParameters>("cosmology"));
|
||||
|
||||
// Build some mock field
|
||||
{
|
||||
ConsoleContext<LOG_INFO> ctx("Generation performance");
|
||||
timings::set_file_pattern("timing_generation_N_" + to_string(rankThreshold) + "_%d.txt");
|
||||
timings::reset();
|
||||
hmc.generateMockData(state);
|
||||
timings::trigger_dump();
|
||||
timings::set_file_pattern("timing_stats_N_" + to_string(rankThreshold) + "_%d.txt");
|
||||
}
|
||||
|
||||
typedef FFTW_Manager<double, 3> DFT_Manager;
|
||||
std::unique_ptr<DFT_Manager> mgr_p = std::make_unique<DFT_Manager>(
|
||||
BORG_RESOLUTION, BORG_RESOLUTION, BORG_RESOLUTION, comm);
|
||||
auto &mgr = *mgr_p;
|
||||
CArrayType *s_hat_field = state.get<CArrayType>("s_hat_field");
|
||||
auto gradient_field_p = mgr.allocate_complex_array();
|
||||
auto &gradient_field = gradient_field_p.get_array();
|
||||
auto tmp_field_p = mgr.allocate_complex_array();
|
||||
auto delta_out_p = mgr.allocate_array();
|
||||
auto &delta_out = delta_out_p.get_array();
|
||||
|
||||
double volume = box.L0 * box.L1 * box.L2;
|
||||
double ai = state.getScalar<double>("borg_a_initial");
|
||||
Cosmology cosmo(state.getScalar<CosmologicalParameters>("cosmology"));
|
||||
double D_init = cosmo.d_plus(ai) /
|
||||
cosmo.d_plus(1.0); // Scale factor for initial conditions
|
||||
|
||||
array::scaleAndCopyArray3d(
|
||||
tmp_field_p.get_array(), *s_hat_field->array, D_init);
|
||||
|
||||
{
|
||||
ConsoleContext<LOG_INFO> ctx("Forward-Gradient performance");
|
||||
timings::set_file_pattern("timing_forward_N_" + to_string(rankThreshold) + "_%d.txt");
|
||||
timings::reset();
|
||||
|
||||
start_context_forward = system_clock::now();
|
||||
//model->forwardModel(tmp_field_p.get_array(), delta_out, true);
|
||||
likelihood->logLikelihood(tmp_field_p.get_array());
|
||||
duration_forward = system_clock::now() - start_context_forward;
|
||||
timings::trigger_dump();
|
||||
}
|
||||
|
||||
if (RUN_RSD_TEST) {
|
||||
{
|
||||
ConsoleContext<LOG_INFO> ctx("Forward RSD performance");
|
||||
|
||||
double vobsext[3] = {100, 100, 100};
|
||||
start_context_adjoint = system_clock::now();
|
||||
model->forwardModelRsdField(delta_out, vobsext);
|
||||
duration_adjoint = system_clock::now() - start_context_adjoint;
|
||||
}
|
||||
|
||||
{
|
||||
double vobsext[3] = {0, 0, 0};
|
||||
model->forwardModelRsdField(delta_out, vobsext);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
ConsoleContext<LOG_INFO> ctx("Gradient performance");
|
||||
timings::set_file_pattern("timing_gradient_N_" + to_string(rankThreshold) + "_%d.txt");
|
||||
timings::reset();
|
||||
start_context_adjoint = system_clock::now();
|
||||
fwrap(gradient_field) = 1.0;
|
||||
likelihood->gradientLikelihood(
|
||||
tmp_field_p.get_array(), gradient_field, false, 1.0);
|
||||
duration_adjoint = system_clock::now() - start_context_adjoint;
|
||||
timings::trigger_dump();
|
||||
}
|
||||
}
|
||||
|
||||
if (comm_base->rank() == 0) {
|
||||
ofstream f_performance("bench_performance.txt", ios::app);
|
||||
f_performance << format("%s % 5d % 5d % 5d % .5lf % .5lf") % testName %
|
||||
BORG_RESOLUTION % rankThreshold %
|
||||
smp_get_max_threads() % duration_forward.count() %
|
||||
duration_adjoint.count()
|
||||
<< endl;
|
||||
}
|
||||
comm_base->barrier();
|
||||
rankThreshold = std::min(comm_base->size(), 2 * rankThreshold);
|
||||
k++;
|
||||
delete comm;
|
||||
}
|
||||
|
||||
StaticInit::finalize();
|
||||
doneMPI();
|
||||
|
||||
return 0;
|
||||
}
|
430
extra/hades/libLSS/tests/generic_gradient_test.cpp
Normal file
430
extra/hades/libLSS/tests/generic_gradient_test.cpp
Normal file
|
@ -0,0 +1,430 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/tests/generic_gradient_test.cpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#include "libLSS/tools/static_init.hpp"
|
||||
#include "libLSS/samplers/core/types_samplers.hpp"
|
||||
#include "libLSS/samplers/core/powerspec_tools.hpp"
|
||||
#include "libLSS/mcmc/global_state.hpp"
|
||||
#include "libLSS/samplers/rgen/gsl_random_number.hpp"
|
||||
#include "libLSS/tools/array_tools.hpp"
|
||||
#include "libLSS/physics/cosmo.hpp"
|
||||
#include "libLSS/physics/forward_model.hpp"
|
||||
#include <CosmoTool/cosmopower.hpp>
|
||||
#include <CosmoTool/algo.hpp>
|
||||
#include <boost/format.hpp>
|
||||
#include "libLSS/physics/likelihoods/base.hpp"
|
||||
#include "libLSS/physics/hades_pt.hpp"
|
||||
#include "libLSS/physics/chain_forward_model.hpp"
|
||||
#include "libLSS/physics/hermitic.hpp"
|
||||
|
||||
using namespace LibLSS;
|
||||
using boost::format;
|
||||
using CosmoTool::square;
|
||||
using std::string;
|
||||
|
||||
typedef boost::multi_array_types::extent_range range;
|
||||
|
||||
typedef RandomNumberMPI<GSL_RandomNumber> RGenType;
|
||||
|
||||
static const int STEP_GRADIENT = 1 * 1; //2*2;//8*8;
|
||||
static const bool TEST_BORG_REDSHIFT = false;
|
||||
|
||||
#ifndef BORG_SUPERSAMPLING
|
||||
# define BORG_SUPERSAMPLING 1
|
||||
#endif
|
||||
|
||||
#ifndef BORG_FORCESAMPLING
|
||||
# define BORG_FORCESAMPLING 1
|
||||
#endif
|
||||
|
||||
#ifndef MODEL_TO_TEST
|
||||
# define MODEL_TO_TEST(model, box) \
|
||||
auto model = new HadesLinear(comm, box, 0.001)
|
||||
#endif
|
||||
|
||||
namespace {
|
||||
#if defined(ARES_MPI_FFTW)
|
||||
RegisterStaticInit reg0(fftw_mpi_init, fftw_mpi_cleanup, 9, "MPI/FFTW");
|
||||
#endif
|
||||
RegisterStaticInit reg1(
|
||||
CosmoTool::init_fftw_wisdom, CosmoTool::save_fftw_wisdom, 10,
|
||||
"FFTW/WISDOM");
|
||||
}; // namespace
|
||||
|
||||
class DummyPowerSpectrum : public PowerSpectrumSampler_Base {
|
||||
public:
|
||||
DummyPowerSpectrum(MPI_Communication *comm)
|
||||
: PowerSpectrumSampler_Base(comm) {}
|
||||
|
||||
virtual void initialize(MarkovState &state) { initialize_base(state); }
|
||||
virtual void restore(MarkovState &state) { restore_base(state); }
|
||||
|
||||
virtual void sample(MarkovState &state) {}
|
||||
};
|
||||
|
||||
void createCosmologicalPowerSpectrum(
|
||||
MarkovState &state, CosmologicalParameters &cosmo_params,
|
||||
double adjust = 1) {
|
||||
double h;
|
||||
CosmoTool::CosmoPower cpower;
|
||||
|
||||
h = cpower.h = cosmo_params.h;
|
||||
cpower.OMEGA_B = cosmo_params.omega_b;
|
||||
cpower.OMEGA_C = cosmo_params.omega_m - cosmo_params.omega_b;
|
||||
cpower.SIGMA8 = cosmo_params.sigma8;
|
||||
cpower.setFunction(CosmoTool::CosmoPower::HU_WIGGLES);
|
||||
cpower.updateCosmology();
|
||||
cpower.normalize();
|
||||
|
||||
ArrayType1d::ArrayType &k = *state.get<ArrayType1d>("k_modes")->array;
|
||||
ArrayType1d::ArrayType &Pk = *state.get<ArrayType1d>("powerspectrum")->array;
|
||||
for (long i = 0; i < k.num_elements(); i++) {
|
||||
Pk[i] = cpower.power(k[i] * h) * h * h * h * adjust;
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
MPI_Communication *comm = setupMPI(argc, argv);
|
||||
StaticInit::execute();
|
||||
Console &cons = Console::instance();
|
||||
cons.setVerboseLevel<LOG_DEBUG>();
|
||||
|
||||
cons.outputToFile(
|
||||
boost::str(format("gradient_test_rank_%d.txt") % comm->rank()));
|
||||
{
|
||||
MarkovState state;
|
||||
SelArrayType *sel_data, *sel_data2;
|
||||
SLong *N0, *N1, *N2, *N2_HC, *Ncat, *fourierLocalSize, *NUM_MODES, *localN0,
|
||||
*startN0;
|
||||
SDouble *L0, *L1, *L2, *K_MIN, *K_MAX, *corner0, *corner1, *corner2,
|
||||
*borg_a_initial;
|
||||
ArrayType1d *bias0;
|
||||
ArrayType *data0, *growth;
|
||||
RGenType randgen(comm, -1);
|
||||
int M;
|
||||
BoxModel box, box2;
|
||||
|
||||
randgen.seed(2348098);
|
||||
|
||||
state.newElement(
|
||||
"random_generator", new RandomStateElement<RandomNumber>(&randgen));
|
||||
|
||||
state.newElement("N0", N0 = new SLong());
|
||||
state.newElement("N1", N1 = new SLong());
|
||||
state.newElement("N2", N2 = new SLong());
|
||||
state.newElement("N2_HC", N2_HC = new SLong());
|
||||
state.newElement("NUM_MODES", NUM_MODES = new SLong());
|
||||
state.newElement("K_MIN", K_MIN = new SDouble());
|
||||
state.newElement("K_MAX", K_MAX = new SDouble());
|
||||
|
||||
state.newElement("L0", L0 = new SDouble());
|
||||
state.newElement("L1", L1 = new SDouble());
|
||||
state.newElement("L2", L2 = new SDouble());
|
||||
|
||||
state.newElement("NCAT", Ncat = new SLong());
|
||||
state.newElement("startN0", startN0 = new SLong());
|
||||
state.newElement("localN0", localN0 = new SLong());
|
||||
state.newElement("fourierLocalSize", fourierLocalSize = new SLong());
|
||||
|
||||
state.newElement("corner0", corner0 = new SDouble());
|
||||
state.newElement("corner1", corner1 = new SDouble());
|
||||
state.newElement("corner2", corner2 = new SDouble());
|
||||
|
||||
state.newElement("borg_a_initial", borg_a_initial = new SDouble());
|
||||
|
||||
state.newScalar<double>("ares_heat", 1.0);
|
||||
state.newScalar<int>("borg_pm_nsteps", 1);
|
||||
state.newScalar<double>("borg_pm_start_z", 69.);
|
||||
state.newScalar<bool>("borg_do_rsd", TEST_BORG_REDSHIFT);
|
||||
state.newScalar<int>("borg_supersampling", BORG_SUPERSAMPLING);
|
||||
state.newScalar<int>("borg_forcesampling", BORG_FORCESAMPLING);
|
||||
|
||||
M = box.N0 = N0->value = 16;
|
||||
box.N1 = N1->value = M;
|
||||
box.N2 = N2->value = M;
|
||||
|
||||
state.newScalar<long>("Ndata0", M / DOWNGRADE_DATA);
|
||||
state.newScalar<long>("Ndata1", M / DOWNGRADE_DATA);
|
||||
state.newScalar<long>("Ndata2", M / DOWNGRADE_DATA);
|
||||
ptrdiff_t localn0, startn0, data_localn0, data_startn0;
|
||||
|
||||
#ifdef ARES_MPI_FFTW
|
||||
{
|
||||
|
||||
fourierLocalSize->value =
|
||||
MPI_FCalls::local_size_3d(M, M, M, comm->comm(), &localn0, &startn0);
|
||||
startN0->value = startn0;
|
||||
localN0->value = localn0;
|
||||
|
||||
MPI_FCalls::local_size_3d(
|
||||
M / DOWNGRADE_DATA, M / DOWNGRADE_DATA, M / DOWNGRADE_DATA,
|
||||
comm->comm(), &data_localn0, &data_startn0);
|
||||
}
|
||||
#else
|
||||
fourierLocalSize->value = M * M * (M / 2 + 1);
|
||||
startn0 = startN0->value = 0;
|
||||
localn0 = localN0->value = M;
|
||||
data_localn0 = M / DOWNGRADE_DATA;
|
||||
data_startn0 = 0;
|
||||
#endif
|
||||
|
||||
state.newScalar<long>("localNdata0", data_startn0);
|
||||
state.newScalar<long>("localNdata1", data_startn0 + data_localn0);
|
||||
state.newScalar<long>("localNdata2", 0);
|
||||
state.newScalar<long>("localNdata3", (M / DOWNGRADE_DATA));
|
||||
state.newScalar<long>("localNdata4", 0);
|
||||
state.newScalar<long>("localNdata5", (M / DOWNGRADE_DATA));
|
||||
|
||||
cons.print<LOG_INFO>(
|
||||
format("startN0 = %d, localN0 = %d") % startN0->value % localN0->value);
|
||||
|
||||
Ncat->value = 1;
|
||||
|
||||
box.xmin0 = corner0->value = -1500;
|
||||
box.xmin1 = corner1->value = -1500;
|
||||
box.xmin2 = corner2->value = -1500;
|
||||
N2_HC->value = M / 2 + 1;
|
||||
NUM_MODES->value = 300;
|
||||
box.L0 = L0->value = 3000.0;
|
||||
box.L1 = L1->value = 3000.0;
|
||||
box.L2 = L2->value = 3000.0;
|
||||
K_MIN->value = 0.;
|
||||
K_MAX->value =
|
||||
M_PI *
|
||||
sqrt(
|
||||
square(N0->value / L0->value) + square(N1->value / L1->value) +
|
||||
square(N2->value / L2->value)) *
|
||||
1.1;
|
||||
|
||||
box2 = box;
|
||||
box2.N0 *= 2;
|
||||
box2.N1 *= 2;
|
||||
box2.N2 *= 2;
|
||||
|
||||
borg_a_initial->value = 0.001;
|
||||
|
||||
state.newElement(
|
||||
"growth_factor",
|
||||
growth = new ArrayType(
|
||||
boost::extents[range(startn0, startn0 + localn0)][M][M]));
|
||||
growth->eigen().fill(1);
|
||||
growth->setRealDims(ArrayDimension(M, M, M));
|
||||
if (DOWNGRADE_DATA == 1) {
|
||||
auto ext = boost::extents[range(startn0, startn0 + localn0)][M][M];
|
||||
auto adim = ArrayDimension(M, M, M);
|
||||
state.newElement("galaxy_data_0", data0 = new ArrayType(ext));
|
||||
data0->setRealDims(adim);
|
||||
state.newElement("galaxy_sel_window_0", sel_data = new SelArrayType(ext));
|
||||
sel_data->setRealDims(adim);
|
||||
state.newElement(
|
||||
"galaxy_synthetic_sel_window_0", sel_data2 = new SelArrayType(ext));
|
||||
sel_data2->setRealDims(adim);
|
||||
} else {
|
||||
size_t startdown = data_startn0;
|
||||
size_t enddown = data_startn0 + data_localn0;
|
||||
auto ext = boost::extents[range(startdown, enddown)][M / DOWNGRADE_DATA]
|
||||
[M / DOWNGRADE_DATA];
|
||||
auto adim = ArrayDimension(
|
||||
M / DOWNGRADE_DATA, M / DOWNGRADE_DATA, M / DOWNGRADE_DATA);
|
||||
|
||||
state.newElement("galaxy_data_0", data0 = new ArrayType(ext));
|
||||
data0->setRealDims(adim);
|
||||
state.newElement("galaxy_sel_window_0", sel_data = new SelArrayType(ext));
|
||||
sel_data->setRealDims(adim);
|
||||
state.newElement(
|
||||
"galaxy_synthetic_sel_window_0", sel_data2 = new SelArrayType(ext));
|
||||
sel_data2->setRealDims(adim);
|
||||
}
|
||||
state.newElement(
|
||||
"galaxy_bias_0", bias0 = new ArrayType1d(boost::extents[0]));
|
||||
state.newScalar("galaxy_nmean_0", 20.0);
|
||||
state.newScalar("galaxy_bias_ref_0", false);
|
||||
state.newScalar("ares_heat", 1.0);
|
||||
|
||||
ScalarStateElement<CosmologicalParameters> *s_cosmo =
|
||||
new ScalarStateElement<CosmologicalParameters>();
|
||||
state.newElement("cosmology", s_cosmo);
|
||||
|
||||
CosmologicalParameters &cparams = s_cosmo->value;
|
||||
cparams.omega_r = 0.; /* negligible radiation density */
|
||||
cparams.omega_k = 0.; /* curvature - flat prior for everything! */
|
||||
cparams.omega_m = 0.3175;
|
||||
cparams.omega_b = 0.049;
|
||||
cparams.omega_q = 0.6825;
|
||||
cparams.w = -1.;
|
||||
cparams.n_s = 0.9624;
|
||||
cparams.wprime = 0.;
|
||||
cparams.sigma8 = 0.8344;
|
||||
cparams.h = 0.6711;
|
||||
cparams.beta = 1.5;
|
||||
cparams.z0 = 0.;
|
||||
cparams.a0 = 1.; /* scale factor at epoch of observation usually 1*/
|
||||
// Initialize (data,s)->t sampler
|
||||
|
||||
LikelihoodInfo info;
|
||||
|
||||
{
|
||||
namespace L = LibLSS::Likelihood;
|
||||
|
||||
info[L::MPI] = MPI_Communication::instance();
|
||||
info["ManyPower_prior_width"] = 3.5;
|
||||
|
||||
L::GridSize gs(boost::extents[3]), mpi_gs(boost::extents[6]),
|
||||
gsd(boost::extents[3]);
|
||||
L::GridLengths gl(boost::extents[6]);
|
||||
|
||||
state.getScalarArray<long, 3>("Ndata", gsd);
|
||||
|
||||
gs[0] = N0->value;
|
||||
gs[1] = N1->value;
|
||||
gs[2] = N2->value;
|
||||
mpi_gs[0] = startn0;
|
||||
mpi_gs[1] = startn0 + localn0;
|
||||
mpi_gs[2] = 0;
|
||||
mpi_gs[3] = N1->value;
|
||||
mpi_gs[4] = 0;
|
||||
mpi_gs[5] = N2->value;
|
||||
gl[0] = corner0->value;
|
||||
gl[1] = corner0->value + L0->value;
|
||||
gl[2] = corner1->value;
|
||||
gl[3] = corner1->value + L1->value;
|
||||
gl[4] = corner2->value;
|
||||
gl[5] = corner2->value + L2->value;
|
||||
|
||||
info[L::GRID] = gs;
|
||||
info[L::GRID_LENGTH] = gl;
|
||||
info[L::MPI_GRID] = mpi_gs;
|
||||
info[L::DATA_GRID] = gsd;
|
||||
info["EFT_Lambda"] = 0.07;
|
||||
|
||||
std::shared_ptr<boost::multi_array_ref<long, 3>> cmap =
|
||||
std::make_shared<boost::multi_array<long, 3>>(boost::extents[range(
|
||||
startn0, startn0 + localn0)][N1->value][N2->value]);
|
||||
|
||||
array::fill(*cmap, 0);
|
||||
|
||||
for (int i = startn0; i < startn0 + localn0; i++) {
|
||||
for (int j = 0; j < N1->value; j++) {
|
||||
for (int k = 0; k < N2->value; k++) {
|
||||
long idx = (i + j * N0->value + k * N0->value * N1->value) % 8;
|
||||
|
||||
(*cmap)[i][j][k] = idx;
|
||||
}
|
||||
}
|
||||
}
|
||||
auto p_cmap = make_promise_pointer(cmap);
|
||||
info[L::COLOR_MAP] = p_cmap;
|
||||
|
||||
p_cmap.defer.submit_ready();
|
||||
}
|
||||
|
||||
#ifdef DATA_SETUP
|
||||
DATA_SETUP(state);
|
||||
#endif
|
||||
|
||||
DummyPowerSpectrum dummy_p(comm);
|
||||
HMCDensitySampler::Likelihood_t likelihood = makeLikelihood(info);
|
||||
HMCDensitySampler hmc(comm, likelihood);
|
||||
|
||||
dummy_p.init_markov(state);
|
||||
|
||||
auto model = makeModel(comm, state, box, box2);
|
||||
auto chain = std::make_shared<ChainForwardModel>(comm, box);
|
||||
auto fixer = std::make_shared<ForwardHermiticOperation>(comm, box);
|
||||
|
||||
BorgModelElement *model_element = new BorgModelElement();
|
||||
|
||||
chain->addModel(fixer);
|
||||
chain->addModel(model);
|
||||
|
||||
{
|
||||
ArrayType1d::ArrayType vobs(boost::extents[3]);
|
||||
vobs[0] = 1000.;
|
||||
vobs[1] = -300;
|
||||
vobs[2] = 200.;
|
||||
model->setObserver(vobs);
|
||||
}
|
||||
model_element->obj = std::shared_ptr<BORGForwardModel>(chain);
|
||||
state.newElement("BORG_model", model_element);
|
||||
|
||||
createCosmologicalPowerSpectrum(state, cparams);
|
||||
|
||||
hmc.init_markov(state);
|
||||
|
||||
likelihood->setupDefaultParameters(state, 0);
|
||||
|
||||
// Build some mock field
|
||||
|
||||
sel_data->eigen().fill(1);
|
||||
sel_data2->eigen().fill(1);
|
||||
|
||||
#if 0
|
||||
//Build spherical mask
|
||||
for (int n0 = 0; n0 < M; n0++) {
|
||||
for (int n1 = 0; n1 < M; n1++) {
|
||||
for (int n2 = 0; n2 < M; n2++) {
|
||||
|
||||
double r= sqrt((n0-M/2)*(n0-M/2) + (n1-M/2)*(n1-M/2) +(n2-M/2)*(n2-M/2));
|
||||
|
||||
if(r>M/4) (*sel_data->array)[n0][n1][n2] = 0.;
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#if 0
|
||||
for (int n0 = startn0; n0 < startn0 + localn0; n0++) {
|
||||
double in0 = (n0 - M / 2) * 1.0 / M;
|
||||
for (int n1 = 0; n1 < M; n1++) {
|
||||
double in1 = (n1 - M / 2) * 1.0 / M;
|
||||
for (int n2 = 0; n2 < M; n2++) {
|
||||
double in2 = (n2 - M / 2) * 1.0 / M;
|
||||
double S =
|
||||
exp(-0.5 * (in0 * in0 + in1 * in1 + in2 * in2) / (0.2 * 0.2));
|
||||
double r = sqrt(
|
||||
(n0 - M / 2) * (n0 - M / 2) + (n1 - M / 2) * (n1 - M / 2) +
|
||||
(n2 - M / 2) * (n2 - M / 2));
|
||||
|
||||
(*sel_data->array)[n0][n1][n2] = S;
|
||||
if (r > M / 4)
|
||||
(*sel_data->array)[n0][n1][n2] = 0.;
|
||||
(*sel_data2->array)[n0][n1][n2] = (*sel_data->array)[n0][n1][n2];
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
// Build some s field
|
||||
|
||||
long N2real;
|
||||
|
||||
#ifdef ARES_MPI_FFTW
|
||||
N2real = 2 * N2_HC->value;
|
||||
#else
|
||||
N2real = N2->value;
|
||||
#endif
|
||||
|
||||
hmc.generateMockData(state);
|
||||
cons.setVerboseLevel<LOG_INFO>();
|
||||
hmc.checkGradient(state, STEP_GRADIENT);
|
||||
//hmc.checkGradientReal(state, STEP_GRADIENT);
|
||||
|
||||
{
|
||||
std::shared_ptr<H5::H5File> f;
|
||||
|
||||
if (comm->rank() == 0)
|
||||
f = std::make_shared<H5::H5File>("dump.h5", H5F_ACC_TRUNC);
|
||||
|
||||
state.mpiSaveState(f, comm, false);
|
||||
}
|
||||
}
|
||||
StaticInit::finalize();
|
||||
doneMPI();
|
||||
|
||||
return 0;
|
||||
}
|
97
extra/hades/libLSS/tests/generic_mock.hpp
Normal file
97
extra/hades/libLSS/tests/generic_mock.hpp
Normal file
|
@ -0,0 +1,97 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/tests/generic_mock.hpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#ifndef __LIBLSS_TEST_GENERIC_MOCK_HPP
|
||||
#define __LIBLSS_TEST_GENERIC_MOCK_HPP
|
||||
|
||||
#include "libLSS/mpi/generic_mpi.hpp"
|
||||
#include "libLSS/tools/static_init.hpp"
|
||||
#include "libLSS/physics/likelihoods/voxel_poisson.hpp"
|
||||
#include "libLSS/tools/fusewrapper.hpp"
|
||||
#include "libLSS/samplers/core/random_number.hpp"
|
||||
#include "libLSS/samplers/core/types_samplers.hpp"
|
||||
#include "libLSS/samplers/rgen/hmc/hmc_density_sampler.hpp"
|
||||
#include <CosmoTool/hdf5_array.hpp>
|
||||
|
||||
template <typename Likelihood>
|
||||
void generate_mock_data(
|
||||
LibLSS::MPI_Communication *comm, LibLSS::MarkovState &state, size_t const N,
|
||||
double const L) {
|
||||
using namespace LibLSS;
|
||||
typedef typename Likelihood::bias_t bias_t;
|
||||
double const nmean = state.getScalar<double>("galaxy_nmean_0");
|
||||
auto const &bias_params = *state.get<ArrayType1d>("galaxy_bias_0")->array;
|
||||
auto model = state.get<BorgModelElement>("BORG_model")->obj;
|
||||
|
||||
// Make three sinus
|
||||
double const epsilon = 0.1;
|
||||
|
||||
typedef FFTW_Manager_3d<double> FFT_Manager;
|
||||
FFT_Manager mgr(N, N, N, comm);
|
||||
FFT_Manager::ArrayReal ic_field(
|
||||
mgr.extents_real(), boost::c_storage_order(), mgr.allocator_real);
|
||||
FFT_Manager::ArrayReal final_density(
|
||||
model->out_mgr->extents_real(), boost::c_storage_order(), model->out_mgr->allocator_real);
|
||||
FFT_Manager::ArrayFourier ic_field_hat(
|
||||
mgr.extents_complex(), boost::c_storage_order(), mgr.allocator_complex);
|
||||
|
||||
auto rhom = fwrap(ic_field);
|
||||
|
||||
rhom = LibLSS::b_fused_idx<double, 3>(
|
||||
[N, epsilon](size_t const i, size_t const j, size_t const k) -> double {
|
||||
double x = double(i) / N;
|
||||
double z = double(j) / N;
|
||||
double y = double(k) / N;
|
||||
|
||||
return epsilon * sin(M_PI * x) * sin(M_PI * y) * sin(M_PI * z);
|
||||
});
|
||||
|
||||
// Compute the Fourier transform, as that is the required input for the forward
|
||||
// model.
|
||||
auto w_ic_hat = fwrap(ic_field_hat);
|
||||
FFT_Manager::plan_type aplan =
|
||||
mgr.create_r2c_plan(ic_field.data(), ic_field_hat.data());
|
||||
mgr.execute_r2c(aplan, ic_field.data(), ic_field_hat.data());
|
||||
w_ic_hat = w_ic_hat * double((L * L * L) / (N * N * N));
|
||||
fwrap(*state.get<CArrayType>("s_hat_field")->array) = ic_field_hat;
|
||||
|
||||
auto vobs = state.get<ArrayType1d>("BORG_vobs")->array;
|
||||
|
||||
typedef ScalarStateElement<CosmologicalParameters> CosmoElement;
|
||||
GenericDetails::compute_forward(
|
||||
model->lo_mgr, model, state.get<CosmoElement>("cosmology")->value,
|
||||
state.getScalar<double>("borg_a_initial"), *vobs, ModelInput<3>(model->lo_mgr, model->get_box_model(), ic_field_hat),
|
||||
ModelOutput<3>(model->out_mgr, model->get_box_model_output(), final_density), false);
|
||||
|
||||
auto data = fwrap(*state.get<ArrayType>("galaxy_data_0")->array);
|
||||
|
||||
RandomNumber &rgen = state.get<RandomGen>("random_generator")->get();
|
||||
|
||||
// Make nmean=3, data
|
||||
bias_t bias_func;
|
||||
bias_func.prepare(*model, final_density, nmean, bias_params, true);
|
||||
Console::instance().print<LOG_VERBOSE>(
|
||||
"Density field ready. Now do joint selection + sample of mock data");
|
||||
data = Likelihood::likelihood_t::sample(
|
||||
rgen, bias_func.selection_adaptor.apply(
|
||||
b_fused_idx<double, 3>(
|
||||
FuseWrapper_detail::constantFunctor<double>(1),
|
||||
boost::extents[N][N][N]),
|
||||
bias_func.compute_density(final_density)));
|
||||
Console::instance().print<LOG_VERBOSE>("Done with mock. Save now.");
|
||||
bias_func.cleanup();
|
||||
|
||||
H5::H5File f("mock.h5", H5F_ACC_TRUNC);
|
||||
CosmoTool::hdf5_write_array(f, "data", *data);
|
||||
CosmoTool::hdf5_write_array(f, "ic_field", ic_field);
|
||||
CosmoTool::hdf5_write_array(f, "ic_hat_field", ic_field_hat);
|
||||
CosmoTool::hdf5_write_array(f, "final_density", final_density);
|
||||
}
|
||||
|
||||
#endif
|
74
extra/hades/libLSS/tests/hades_gradients.py_config
Normal file
74
extra/hades/libLSS/tests/hades_gradients.py_config
Normal file
|
@ -0,0 +1,74 @@
|
|||
tests = {
|
||||
'tests': {
|
||||
'linear': {
|
||||
'includes': [
|
||||
"libLSS/samplers/hades/hades_linear_likelihood.hpp",
|
||||
"libLSS/physics/hades_pt.hpp"
|
||||
],
|
||||
'likelihood':
|
||||
"LibLSS::HadesLinearDensityLikelihood",
|
||||
'model_args':
|
||||
'comm, box, box, 0.001, 1',
|
||||
'model':
|
||||
'LibLSS::HadesLinear'
|
||||
},
|
||||
'log_linear': {
|
||||
'includes': [
|
||||
"libLSS/samplers/hades/hades_linear_likelihood.hpp",
|
||||
"libLSS/physics/hades_log.hpp"
|
||||
],
|
||||
'likelihood':
|
||||
"LibLSS::HadesLinearDensityLikelihood",
|
||||
'model':
|
||||
'LibLSS::HadesLog',
|
||||
'model_args':
|
||||
'comm, box, 0.001'
|
||||
},
|
||||
'primordial_linear': {
|
||||
'includes': [
|
||||
"libLSS/samplers/hades/hades_linear_likelihood.hpp",
|
||||
"libLSS/physics/forwards/primordial.hpp"
|
||||
],
|
||||
'likelihood':
|
||||
"LibLSS::HadesLinearDensityLikelihood",
|
||||
'model':
|
||||
'LibLSS::ForwardPrimordial',
|
||||
'model_args':
|
||||
'comm, box, 0.001'
|
||||
},
|
||||
'chain_primordial_log_linear': {
|
||||
'includes': [
|
||||
"libLSS/samplers/hades/hades_linear_likelihood.hpp",
|
||||
"libLSS/physics/chain_forward_model.hpp",
|
||||
"libLSS/physics/forwards/primordial.hpp",
|
||||
"libLSS/physics/forwards/transfer_ehu.hpp",
|
||||
"libLSS/physics/hades_log.hpp"
|
||||
],
|
||||
'likelihood':
|
||||
"LibLSS::HadesLinearDensityLikelihood",
|
||||
'model': [
|
||||
'LibLSS::ForwardPrimordial', 'LibLSS::ForwardEisensteinHu',
|
||||
'LibLSS::HadesLog'
|
||||
],
|
||||
'model_args':
|
||||
['comm, box, 0.001', 'comm, box', 'comm, box, .001']
|
||||
},
|
||||
'chain_primordial_pt_linear': {
|
||||
'includes': [
|
||||
"libLSS/samplers/hades/hades_linear_likelihood.hpp",
|
||||
"libLSS/physics/chain_forward_model.hpp",
|
||||
"libLSS/physics/forwards/primordial.hpp",
|
||||
"libLSS/physics/forwards/transfer_ehu.hpp",
|
||||
"libLSS/physics/hades_pt.hpp"
|
||||
],
|
||||
'likelihood':
|
||||
"LibLSS::HadesLinearDensityLikelihood",
|
||||
'model': [
|
||||
'LibLSS::ForwardPrimordial', 'LibLSS::ForwardEisensteinHu',
|
||||
'LibLSS::HadesLinear'
|
||||
],
|
||||
'model_args':
|
||||
['comm, box, .001', 'comm, box', 'comm, box, box, .001, 1']
|
||||
}
|
||||
}
|
||||
}
|
250
extra/hades/libLSS/tests/setup_hades_test_run.cpp
Normal file
250
extra/hades/libLSS/tests/setup_hades_test_run.cpp
Normal file
|
@ -0,0 +1,250 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/tests/setup_hades_test_run.cpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#include "libLSS/tools/static_init.hpp"
|
||||
#include "libLSS/samplers/core/types_samplers.hpp"
|
||||
#include "libLSS/samplers/core/powerspec_tools.hpp"
|
||||
#include "libLSS/mcmc/global_state.hpp"
|
||||
#include "libLSS/samplers/rgen/gsl_random_number.hpp"
|
||||
#include "libLSS/tools/array_tools.hpp"
|
||||
#include "libLSS/physics/cosmo.hpp"
|
||||
#include "libLSS/physics/forward_model.hpp"
|
||||
#include <CosmoTool/cosmopower.hpp>
|
||||
#include <CosmoTool/algo.hpp>
|
||||
#include <boost/format.hpp>
|
||||
#include "libLSS/tools/fusewrapper.hpp"
|
||||
#include "setup_hades_test_run.hpp"
|
||||
#include "libLSS/physics/likelihoods/base.hpp"
|
||||
|
||||
using namespace LibLSS;
|
||||
using boost::format;
|
||||
using CosmoTool::square;
|
||||
using std::string;
|
||||
|
||||
typedef boost::multi_array_types::extent_range range;
|
||||
|
||||
typedef RandomNumberThreaded<GSL_RandomNumber> RGenType;
|
||||
|
||||
namespace {
|
||||
#if defined(ARES_MPI_FFTW)
|
||||
RegisterStaticInit reg0(fftw_mpi_init, fftw_mpi_cleanup, 9, "MPI/FFTW");
|
||||
#endif
|
||||
RegisterStaticInit reg1(
|
||||
CosmoTool::init_fftw_wisdom, CosmoTool::save_fftw_wisdom, 10,
|
||||
"FFTW/WISDOM");
|
||||
|
||||
class DummyPowerSpectrum : public PowerSpectrumSampler_Base {
|
||||
public:
|
||||
DummyPowerSpectrum(MPI_Communication *comm)
|
||||
: PowerSpectrumSampler_Base(comm) {}
|
||||
|
||||
virtual void initialize(MarkovState &state) { initialize_base(state); }
|
||||
virtual void restore(MarkovState &state) { restore_base(state); }
|
||||
|
||||
virtual void sample(MarkovState &state) {}
|
||||
};
|
||||
|
||||
constexpr size_t TEST_NUM_MODES = 200;
|
||||
|
||||
}; // namespace
|
||||
|
||||
static void createCosmologicalPowerSpectrum(
|
||||
MarkovState &state, CosmologicalParameters &cosmo_params,
|
||||
double adjust = 1) {
|
||||
double h;
|
||||
CosmoTool::CosmoPower cpower;
|
||||
|
||||
h = cpower.h = cosmo_params.h;
|
||||
cpower.OMEGA_B = cosmo_params.omega_b;
|
||||
cpower.OMEGA_C = cosmo_params.omega_m - cosmo_params.omega_b;
|
||||
cpower.SIGMA8 = cosmo_params.sigma8;
|
||||
cpower.setFunction(CosmoTool::CosmoPower::HU_WIGGLES);
|
||||
cpower.updateCosmology();
|
||||
cpower.normalize();
|
||||
|
||||
ArrayType1d::ArrayType &k = *state.get<ArrayType1d>("k_modes")->array;
|
||||
ArrayType1d::ArrayType &Pk = *state.get<ArrayType1d>("powerspectrum")->array;
|
||||
for (size_t i = 0; i < k.num_elements(); i++) {
|
||||
Pk[i] = cpower.power(k[i] * h) * h * h * h * adjust;
|
||||
}
|
||||
}
|
||||
|
||||
void LibLSS_test::setup_box(MarkovState &state, BoxModel &box) {
|
||||
box.xmin0 = state.getScalar<double>("corner0");
|
||||
box.xmin1 = state.getScalar<double>("corner1");
|
||||
box.xmin2 = state.getScalar<double>("corner2");
|
||||
box.L0 = state.getScalar<double>("L0");
|
||||
box.L1 = state.getScalar<double>("L1");
|
||||
box.L2 = state.getScalar<double>("L2");
|
||||
box.N0 = state.getScalar<long>("N0");
|
||||
box.N1 = state.getScalar<long>("N1");
|
||||
box.N2 = state.getScalar<long>("N2");
|
||||
}
|
||||
|
||||
void LibLSS_test::setup_likelihood_info(
|
||||
MarkovState &state, LikelihoodInfo &info, MPI_Communication *comm) {
|
||||
namespace L = LibLSS::Likelihood;
|
||||
|
||||
info[L::MPI] = comm;
|
||||
info["ManyPower_prior_width"] = 3.5;
|
||||
|
||||
L::GridSize gs(boost::extents[3]), gsd(boost::extents[3]),
|
||||
mpi_gs(boost::extents[6]);
|
||||
L::GridLengths gl(boost::extents[6]);
|
||||
|
||||
state.getScalarArray<long, 3>("N", gs);
|
||||
mpi_gs[0] = state.getScalar<long>("startN0");
|
||||
mpi_gs[1] = mpi_gs[0] + state.getScalar<long>("localN0");
|
||||
mpi_gs[2] = 0;
|
||||
mpi_gs[3] = gs[1];
|
||||
mpi_gs[4] = 0;
|
||||
mpi_gs[5] = gs[2];
|
||||
gl[0] = state.getScalar<double>("corner0");
|
||||
gl[2] = state.getScalar<double>("corner1");
|
||||
gl[4] = state.getScalar<double>("corner2");
|
||||
gl[1] = gl[0] + state.getScalar<double>("L0");
|
||||
gl[3] = gl[2] + state.getScalar<double>("L1");
|
||||
gl[5] = gl[4] + state.getScalar<double>("L2");
|
||||
|
||||
info[L::GRID] = gs;
|
||||
info[L::GRID_LENGTH] = gl;
|
||||
info[L::MPI_GRID] = mpi_gs;
|
||||
info["EFT_Lambda"] = 0.15; // Some default, 0.15 h/Mpc
|
||||
if (state.exists("Ndata0")) {
|
||||
state.getScalarArray<long, 3>("Ndata", gsd);
|
||||
info[L::DATA_GRID] = gsd;
|
||||
}
|
||||
|
||||
std::shared_ptr<boost::multi_array_ref<long, 3>> cmap =
|
||||
std::make_shared<boost::multi_array<long, 3>>(
|
||||
boost::extents[range(mpi_gs[0], mpi_gs[1])][mpi_gs[3]][mpi_gs[5]]);
|
||||
array::fill(*cmap, 0);
|
||||
|
||||
for (int i = mpi_gs[0]; i < mpi_gs[1]; i++) {
|
||||
for (int j = 0; j < mpi_gs[3]; j++) {
|
||||
for (int k = 0; k < mpi_gs[5]; k++) {
|
||||
long idx = (i + j * gs[0] + k * gs[0] * gs[1]) % 8;
|
||||
|
||||
(*cmap)[i][j][k] = idx;
|
||||
}
|
||||
}
|
||||
}
|
||||
auto promise_cmap = make_promise_pointer(cmap);
|
||||
info[L::COLOR_MAP] = promise_cmap;
|
||||
|
||||
promise_cmap.defer.submit_ready();
|
||||
}
|
||||
|
||||
void LibLSS_test::setup_hades_test_run(
|
||||
MPI_Communication *comm, size_t Nbase, double L, MarkovState &state,
|
||||
boost::multi_array_ref<double, 1> *bias_params) {
|
||||
Console &cons = Console::instance();
|
||||
|
||||
SelArrayType *sel_data, *s_sel_data;
|
||||
ArrayType1d *bias0;
|
||||
ArrayType *data0, *growth;
|
||||
RGenType *randgen = new RGenType(-1);
|
||||
|
||||
randgen->seed(23482098);
|
||||
|
||||
cons.print<LOG_INFO>("Setting up a mock run configuration");
|
||||
|
||||
state.newElement(
|
||||
"random_generator", new RandomStateElement<RandomNumber>(randgen, true));
|
||||
|
||||
state.newScalar<long>("N0", Nbase);
|
||||
state.newScalar<long>("N1", Nbase);
|
||||
state.newScalar<long>("N2", Nbase);
|
||||
state.newScalar<long>("N2_HC", Nbase / 2 + 1);
|
||||
state.newScalar<long>("NUM_MODES", TEST_NUM_MODES);
|
||||
state.newScalar<double>("K_MIN", 0);
|
||||
state.newScalar<double>("K_MAX", 2 * M_PI / L * Nbase * 1.1 * std::sqrt(3.0));
|
||||
|
||||
state.newScalar<double>("L0", L);
|
||||
state.newScalar<double>("L1", L);
|
||||
state.newScalar<double>("L2", L);
|
||||
|
||||
state.newScalar<long>("NCAT", 1);
|
||||
|
||||
state.newScalar<double>("ares_heat", 1.0);
|
||||
|
||||
state.newScalar<double>("corner0", -L / 2);
|
||||
state.newScalar<double>("corner1", -L / 2);
|
||||
state.newScalar<double>("corner2", -L / 2);
|
||||
|
||||
state.newScalar<double>("borg_a_initial", 0.001);
|
||||
|
||||
state.newScalar<int>("borg_pm_nsteps", 30);
|
||||
state.newScalar<double>("borg_pm_start_z", 69.);
|
||||
|
||||
FFTW_Manager_3d<double> mgr(Nbase, Nbase, Nbase, comm);
|
||||
|
||||
state.newScalar<long>("startN0", mgr.startN0);
|
||||
state.newScalar<long>("localN0", mgr.localN0);
|
||||
state.newScalar<long>("fourierLocalSize", mgr.allocator_real.minAllocSize);
|
||||
|
||||
auto local_extent =
|
||||
boost::extents[range(mgr.startN0, mgr.startN0 + mgr.localN0)][Nbase]
|
||||
[Nbase];
|
||||
auto full_extent = ArrayDimension(Nbase, Nbase, Nbase);
|
||||
|
||||
state.newElement("growth_factor", growth = new ArrayType(local_extent));
|
||||
growth->eigen().fill(1);
|
||||
growth->setRealDims(full_extent);
|
||||
state.newElement("galaxy_data_0", data0 = new ArrayType(local_extent));
|
||||
data0->setRealDims(full_extent);
|
||||
state.newElement(
|
||||
"galaxy_sel_window_0", sel_data = new SelArrayType(local_extent));
|
||||
state.newElement(
|
||||
"galaxy_synthetic_sel_window_0",
|
||||
s_sel_data = new SelArrayType(local_extent));
|
||||
sel_data->setRealDims(full_extent);
|
||||
s_sel_data->setRealDims(full_extent);
|
||||
|
||||
size_t Nb = (bias_params == 0) ? 1 : bias_params->shape()[0];
|
||||
state.newElement(
|
||||
"galaxy_bias_0", bias0 = new ArrayType1d(boost::extents[Nb]));
|
||||
|
||||
if (bias_params == 0)
|
||||
(*bias0->array)[0] = 2;
|
||||
else
|
||||
fwrap(*bias0->array) = *bias_params;
|
||||
state.newScalar<double>("galaxy_nmean_0", 20);
|
||||
state.newScalar<bool>("galaxy_bias_ref_0", true);
|
||||
|
||||
DummyPowerSpectrum dummy_p(comm);
|
||||
|
||||
dummy_p.init_markov(state);
|
||||
|
||||
ScalarStateElement<CosmologicalParameters> *s_cosmo =
|
||||
new ScalarStateElement<CosmologicalParameters>();
|
||||
state.newElement("cosmology", s_cosmo);
|
||||
|
||||
CosmologicalParameters &cparams = s_cosmo->value;
|
||||
cparams.omega_r = 0.; /* negligible radiation density */
|
||||
cparams.omega_k = 0.; /* curvature - flat prior for everything! */
|
||||
cparams.omega_m = 0.3175;
|
||||
cparams.omega_b = 0.049;
|
||||
cparams.omega_q = 0.6825;
|
||||
cparams.w = -1.;
|
||||
cparams.n_s = 0.9624;
|
||||
cparams.wprime = 0.;
|
||||
cparams.sigma8 = 0.8344;
|
||||
cparams.h = 0.6711;
|
||||
cparams.beta = 1.5;
|
||||
cparams.z0 = 0.;
|
||||
cparams.a0 = 1.; /* scale factor at epoch of observation usually 1*/
|
||||
|
||||
createCosmologicalPowerSpectrum(state, cparams);
|
||||
|
||||
// Build some mock field
|
||||
|
||||
sel_data->eigen().fill(1);
|
||||
s_sel_data->eigen().fill(1);
|
||||
}
|
29
extra/hades/libLSS/tests/setup_hades_test_run.hpp
Normal file
29
extra/hades/libLSS/tests/setup_hades_test_run.hpp
Normal file
|
@ -0,0 +1,29 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/tests/setup_hades_test_run.hpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#ifndef __LIBLSS_SETUP_HADES_TEST_HPP
|
||||
#define __LIBLSS_SETUP_HADES_TEST_HPP
|
||||
|
||||
#include "libLSS/mpi/generic_mpi.hpp"
|
||||
#include "libLSS/physics/forward_model.hpp"
|
||||
#include "libLSS/mcmc/global_state.hpp"
|
||||
#include "libLSS/physics/likelihoods/base.hpp"
|
||||
|
||||
namespace LibLSS_test {
|
||||
void setup_box(LibLSS::MarkovState &state, LibLSS::BoxModel &box);
|
||||
void setup_hades_test_run(
|
||||
LibLSS::MPI_Communication *comm, size_t Nbase, double L,
|
||||
LibLSS::MarkovState &state,
|
||||
boost::multi_array_ref<double, 1> *bias_params = 0);
|
||||
|
||||
void setup_likelihood_info(
|
||||
LibLSS::MarkovState &state, LibLSS::LikelihoodInfo &info, LibLSS::MPI_Communication *comm = LibLSS::MPI_Communication::instance());
|
||||
} // namespace LibLSS_test
|
||||
|
||||
#endif
|
59
extra/hades/libLSS/tests/test_ghost_array.cpp
Normal file
59
extra/hades/libLSS/tests/test_ghost_array.cpp
Normal file
|
@ -0,0 +1,59 @@
|
|||
#include <boost/format.hpp>
|
||||
#include "libLSS/mpi/generic_mpi.hpp"
|
||||
#include "libLSS/tools/mpi/ghost_array.hpp"
|
||||
#include "libLSS/tools/static_init.hpp"
|
||||
#include "libLSS/tools/console.hpp"
|
||||
#include "libLSS/tools/string_tools.hpp"
|
||||
|
||||
using namespace LibLSS;
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
MPI_Communication *comm = setupMPI(argc, argv);
|
||||
|
||||
StaticInit::execute();
|
||||
|
||||
Console::instance().outputToFile(
|
||||
boost::str(boost::format("ghost_test.txt_%d") % comm->rank()));
|
||||
;
|
||||
|
||||
GhostArray<int> ghost;
|
||||
|
||||
int Ncomm = comm->size();
|
||||
|
||||
boost::multi_array<double, 1> test_array(boost::extents[2]);
|
||||
boost::multi_array<int, 1> test_idx(boost::extents[2]);
|
||||
|
||||
if (comm->size() > 1) {
|
||||
test_idx[0] = comm->rank();
|
||||
test_idx[1] = (comm->rank() + 1) % comm->size();
|
||||
} else {
|
||||
test_idx[0] = 0;
|
||||
test_idx[1] = 1;
|
||||
}
|
||||
|
||||
test_array[0] = 0.5;
|
||||
test_array[1] = 0.5;
|
||||
|
||||
ghost.setup(comm, test_idx);
|
||||
|
||||
int rank = comm->rank();
|
||||
int rank_next = (comm->rank() + 1) % comm->size();
|
||||
ghost.synchronize(test_array, [rank, rank_next](size_t i) {
|
||||
if (i == rank)
|
||||
return 0;
|
||||
else if (i == rank_next)
|
||||
return 1;
|
||||
else {
|
||||
Console::instance().print<LOG_ERROR>("Invalid index");
|
||||
MPI_Communication::instance()->abort();
|
||||
return 0;
|
||||
}
|
||||
});
|
||||
|
||||
Console::instance().print<LOG_VERBOSE>(
|
||||
"Result: " + LibLSS::to_string(test_array));
|
||||
|
||||
doneMPI();
|
||||
|
||||
return 0;
|
||||
}
|
99
extra/hades/libLSS/tests/test_hermiticity.cpp
Normal file
99
extra/hades/libLSS/tests/test_hermiticity.cpp
Normal file
|
@ -0,0 +1,99 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/tests/test_hermiticity.cpp
|
||||
Copyright (C) 2019-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#define BOOST_TEST_MODULE modelio
|
||||
#define BOOST_TEST_NO_MAIN
|
||||
#define BOOST_TEST_ALTERNATIVE_INIT_API
|
||||
#include <boost/test/included/unit_test.hpp>
|
||||
#include <boost/test/data/test_case.hpp>
|
||||
#include <H5Cpp.h>
|
||||
#include <boost/multi_array.hpp>
|
||||
#include "libLSS/tools/static_init.hpp"
|
||||
#include "libLSS/tests/testFramework.hpp"
|
||||
#include "libLSS/tools/hdf5_error.hpp"
|
||||
#include "libLSS/tools/hermiticity_fixup.hpp"
|
||||
#include "libLSS/tools/fusewrapper.hpp"
|
||||
#include <memory>
|
||||
|
||||
using namespace LibLSS;
|
||||
using boost::extents;
|
||||
using namespace CosmoTool;
|
||||
|
||||
namespace utf = boost::unit_test;
|
||||
|
||||
BOOST_AUTO_TEST_CASE(hermitic_forward) {
|
||||
int const N = 32;
|
||||
auto comm = MPI_Communication::instance();
|
||||
auto mgr = std::make_shared<FFTW_Manager<double, 3>>(N, N, N, comm);
|
||||
typedef boost::multi_array<std::complex<double>, 3> CArray;
|
||||
CArray input(mgr->extents_complex());
|
||||
CArray input_ref(mgr->extents_complex());
|
||||
CArray rebuilt(boost::extents[N][N][N / 2 + 1]);
|
||||
|
||||
LibLSS_tests::loadReferenceInput(N, rebuilt);
|
||||
fwrap(input) = 0;
|
||||
fwrap(input[mgr->complex_range()]) = fwrap(rebuilt[mgr->complex_range()]);
|
||||
fwrap(rebuilt) = 0;
|
||||
|
||||
Hermiticity_fixer<double, 3> fixer(mgr);
|
||||
size_t numLines = mgr->localN0;
|
||||
|
||||
fixer.forward(input);
|
||||
|
||||
|
||||
if (comm->rank() == 0) {
|
||||
long numPlanes, q = 0;
|
||||
|
||||
fwrap(rebuilt[mgr->complex_range()]) = fwrap(input[mgr->complex_range()]);
|
||||
q+= mgr->localN0;
|
||||
for (int r = 1; r < comm->size(); r++) {
|
||||
comm->recv(&numPlanes, 1, translateMPIType<long>(), r, r);
|
||||
comm->recv(
|
||||
&rebuilt[q][0][0], numPlanes * mgr->N1 * mgr->N2_HC,
|
||||
translateMPIType<std::complex<double>>(), r, r);
|
||||
q += numPlanes;
|
||||
}
|
||||
} else {
|
||||
long numPlanes = mgr->localN0;
|
||||
comm->send(&numPlanes, 1, translateMPIType<long>(), 0, comm->rank());
|
||||
comm->send(
|
||||
&input[mgr->startN0][0][0], numPlanes * mgr->N1 * mgr->N2_HC,
|
||||
translateMPIType<std::complex<double>>(), 0, comm->rank());
|
||||
H5::H5File f("dump_rank.h5_"+to_string(comm->rank()), H5F_ACC_TRUNC);
|
||||
CosmoTool::hdf5_write_array(f, "input", input);
|
||||
}
|
||||
if (comm->rank() == 0) {
|
||||
CArray input_ref_full(boost::extents[N][N][N / 2 + 1]);
|
||||
LibLSS_tests::loadReferenceInput(N, input_ref_full);
|
||||
double norm = std::abs(fwrap(input_ref_full)).sum();
|
||||
double rel_difference =
|
||||
(std::abs(fwrap(rebuilt) - fwrap(input_ref_full)).sum()) / norm;
|
||||
BOOST_CHECK_LT(rel_difference, 1e-6);
|
||||
|
||||
H5::H5File f("dump.h5", H5F_ACC_TRUNC);
|
||||
CosmoTool::hdf5_write_array(f, "rebuilt", rebuilt);
|
||||
CosmoTool::hdf5_write_array(f, "input_ref", input_ref_full);
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
setupMPI(argc, argv);
|
||||
LibLSS::QUIET_CONSOLE_START = true;
|
||||
StaticInit::execute();
|
||||
LibLSS::Console::instance().setVerboseLevel<LOG_DEBUG>();
|
||||
|
||||
int ret = utf::unit_test_main(&init_unit_test, argc, argv);
|
||||
|
||||
StaticInit::finalize();
|
||||
doneMPI();
|
||||
return 0;
|
||||
}
|
||||
// ARES TAG: authors_num = 1
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: year(0) = 2019-2020
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
348
extra/hades/libLSS/tests/test_modelio.cpp
Normal file
348
extra/hades/libLSS/tests/test_modelio.cpp
Normal file
|
@ -0,0 +1,348 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/tests/test_modelio.cpp
|
||||
Copyright (C) 2019-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#define BOOST_TEST_MODULE modelio
|
||||
#define BOOST_TEST_NO_MAIN
|
||||
#define BOOST_TEST_ALTERNATIVE_INIT_API
|
||||
#include <boost/test/included/unit_test.hpp>
|
||||
#include <boost/test/data/test_case.hpp>
|
||||
#include <H5Cpp.h>
|
||||
#include <boost/multi_array.hpp>
|
||||
#include "libLSS/tools/static_init.hpp"
|
||||
#include "libLSS/physics/forward_model.hpp"
|
||||
#include "libLSS/physics/model_io.hpp"
|
||||
#include "libLSS/physics/hades_pt.hpp"
|
||||
#include "libLSS/physics/hades_log.hpp"
|
||||
#include "libLSS/tests/testFramework.hpp"
|
||||
#include "libLSS/physics/hades_log.hpp"
|
||||
#include "libLSS/tools/hdf5_error.hpp"
|
||||
#include "libLSS/physics/chain_forward_model.hpp"
|
||||
#include "libLSS/physics/forwards/primordial.hpp"
|
||||
#include "libLSS/samplers/core/powerspec_tools.hpp"
|
||||
|
||||
using namespace LibLSS;
|
||||
using boost::extents;
|
||||
using namespace CosmoTool;
|
||||
|
||||
namespace utf = boost::unit_test;
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_modelio_input) {
|
||||
BoxModel box{0, 0, 0, 100, 100, 100, 32, 32, 32};
|
||||
|
||||
BOOST_CHECK_EQUAL(box.N0, 32);
|
||||
BOOST_CHECK_EQUAL(box.N1, 32);
|
||||
BOOST_CHECK_EQUAL(box.N2, 32);
|
||||
|
||||
BOOST_TEST_CHECKPOINT("initialize manager");
|
||||
auto mgr = std::make_shared<FFTW_Manager<double, 3>>(
|
||||
box.N0, box.N1, box.N2, MPI_Communication::instance());
|
||||
|
||||
BOOST_TEST_CHECKPOINT("allocate arrays");
|
||||
auto input_array_p = mgr->allocate_array();
|
||||
auto &input_array = input_array_p.get_array();
|
||||
|
||||
BOOST_TEST_CHECKPOINT("fill up value");
|
||||
fwrap(input_array) = 1;
|
||||
|
||||
{
|
||||
BOOST_TEST_CHECKPOINT("init modelio");
|
||||
ModelInput<3> io1(mgr, box, input_array);
|
||||
|
||||
BOOST_TEST_CHECKPOINT("request output format");
|
||||
io1.setRequestedIO(PREFERRED_REAL);
|
||||
|
||||
auto const &d = io1.getReal();
|
||||
BOOST_CHECK_CLOSE(d[0][0][0], 1, 0.1);
|
||||
}
|
||||
|
||||
{
|
||||
BOOST_TEST_CHECKPOINT("init modelio");
|
||||
ModelInput<3> io1(mgr, box, input_array);
|
||||
|
||||
BOOST_TEST_CHECKPOINT("request output format");
|
||||
io1.setRequestedIO(PREFERRED_FOURIER);
|
||||
|
||||
BOOST_TEST_CHECKPOINT("obtain output");
|
||||
auto const &dhat = io1.getFourierConst();
|
||||
double dVol = std::pow(box.L0, 3);
|
||||
BOOST_CHECK_CLOSE(dhat[0][0][0].real(), dVol, 0.1);
|
||||
}
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_modelio_output) {
|
||||
BoxModel box{0, 0, 0, 100, 100, 100, 32, 32, 32};
|
||||
|
||||
BOOST_CHECK_EQUAL(box.N0, 32);
|
||||
BOOST_CHECK_EQUAL(box.N1, 32);
|
||||
BOOST_CHECK_EQUAL(box.N2, 32);
|
||||
|
||||
BOOST_TEST_CHECKPOINT("initialize manager");
|
||||
auto mgr = std::make_shared<FFTW_Manager<double, 3>>(
|
||||
box.N0, box.N1, box.N2, MPI_Communication::instance());
|
||||
|
||||
BOOST_TEST_CHECKPOINT("allocate arrays");
|
||||
auto output_array_p = mgr->allocate_array();
|
||||
auto &output_array = output_array_p.get_array();
|
||||
|
||||
{
|
||||
BOOST_TEST_CHECKPOINT("init modelio");
|
||||
ModelOutput<3> io1(mgr, box, output_array);
|
||||
|
||||
BOOST_TEST_CHECKPOINT("request output format");
|
||||
io1.setRequestedIO(PREFERRED_REAL);
|
||||
|
||||
auto &d = io1.getRealOutput();
|
||||
fwrap(d) = 1;
|
||||
}
|
||||
BOOST_CHECK_CLOSE(output_array[0][0][0], 1, 0.1);
|
||||
|
||||
{
|
||||
BOOST_TEST_CHECKPOINT("init modelio");
|
||||
ModelOutput<3> io1(mgr, box, output_array);
|
||||
|
||||
BOOST_TEST_CHECKPOINT("request output format");
|
||||
io1.setRequestedIO(PREFERRED_FOURIER);
|
||||
|
||||
BOOST_TEST_CHECKPOINT("obtain fourier output");
|
||||
auto &dhat = io1.getFourierOutput();
|
||||
fwrap(dhat) = 0;
|
||||
dhat[0][0][0] = std::complex<double>(1.0, 0.0);
|
||||
}
|
||||
|
||||
BOOST_CHECK_CLOSE(output_array[0][0][0], 1.0 / (100. * 100. * 100.), 0.1);
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_move) {
|
||||
BoxModel box{0, 0, 0, 100, 100, 100, 32, 32, 32};
|
||||
auto mgr = std::make_shared<FFTW_Manager<double, 3>>(
|
||||
box.N0, box.N1, box.N2, MPI_Communication::instance());
|
||||
|
||||
BOOST_TEST_CHECKPOINT("allocate arrays");
|
||||
auto input_array_p = mgr->allocate_array();
|
||||
auto &input_array = input_array_p.get_array();
|
||||
|
||||
BOOST_TEST_CHECKPOINT("fill up value");
|
||||
fwrap(input_array) = 1;
|
||||
|
||||
{
|
||||
BOOST_TEST_CHECKPOINT("init modelio");
|
||||
ModelInput<3> io1(mgr, box, input_array);
|
||||
ModelInput<3> io2;
|
||||
|
||||
io2 = std::move(io1);
|
||||
|
||||
BOOST_TEST_CHECKPOINT("request output format");
|
||||
io2.setRequestedIO(PREFERRED_REAL);
|
||||
|
||||
auto const &d = io2.getReal();
|
||||
BOOST_CHECK_CLOSE(d[0][0][0], 1, 0.1);
|
||||
}
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_modelio_pt) {
|
||||
BoxModel box{0, 0, 0, 100, 100, 100, 32, 32, 32};
|
||||
CosmologicalParameters cparams;
|
||||
|
||||
cparams.a0 = 1.0;
|
||||
cparams.fnl = 0;
|
||||
cparams.h = 0.67;
|
||||
cparams.omega_b = 0.05;
|
||||
cparams.omega_k = 0.0;
|
||||
cparams.omega_m = 0.3;
|
||||
cparams.omega_q = 0.7;
|
||||
cparams.omega_r = 0.0;
|
||||
cparams.w = -1;
|
||||
cparams.wprime = 0;
|
||||
|
||||
Cosmology cosmo(cparams);
|
||||
|
||||
auto mgr = std::make_shared<FFTW_Manager<double, 3>>(
|
||||
box.N0, box.N1, box.N2, MPI_Communication::instance());
|
||||
|
||||
HadesLinear fwd(MPI_Communication::instance(), box, box, 0.1, 1.0);
|
||||
auto input_delta_p = mgr->allocate_array();
|
||||
auto input_delta_c_p = mgr->allocate_complex_array();
|
||||
auto output_delta_p = mgr->allocate_array();
|
||||
auto ag_input_p = mgr->allocate_array();
|
||||
auto ag_output_c_p = mgr->allocate_complex_array();
|
||||
auto ag_output_p = mgr->allocate_array();
|
||||
auto output_delta_c_p = mgr->allocate_complex_array();
|
||||
|
||||
auto &input_delta = input_delta_p.get_array();
|
||||
auto &input_delta_c = input_delta_c_p.get_array();
|
||||
auto &output_delta = output_delta_p.get_array();
|
||||
auto &output_delta_c = output_delta_c_p.get_array();
|
||||
auto &ag_input = ag_input_p.get_array();
|
||||
auto &ag_output_c = ag_output_c_p.get_array();
|
||||
auto &ag_output = ag_output_p.get_array();
|
||||
auto &ag_input_c = ag_output_c_p.get_array();
|
||||
|
||||
array::fill(ag_input, 1.0);
|
||||
|
||||
fwd.setCosmoParams(cparams);
|
||||
|
||||
{
|
||||
LibLSS_tests::loadReferenceInput(box.N0, input_delta);
|
||||
double ref = input_delta[0][0][0];
|
||||
|
||||
fwrap(input_delta) =
|
||||
fwrap(input_delta) * cosmo.d_plus(0.1) / cosmo.d_plus(1.0);
|
||||
|
||||
fwd.forwardModel_v2(ModelInput<3>(mgr, box, input_delta));
|
||||
fwd.getDensityFinal(ModelOutput<3>(mgr, box, output_delta));
|
||||
|
||||
BOOST_CHECK_CLOSE(output_delta[0][0][0], ref, 0.1);
|
||||
|
||||
fwd.adjointModel_v2(ModelInputAdjoint<3>(mgr, box, ag_input));
|
||||
fwd.getAdjointModelOutput(ModelOutputAdjoint<3>(mgr, box, ag_input));
|
||||
|
||||
double scale = cosmo.d_plus(0.1) / cosmo.d_plus(1.0);
|
||||
BOOST_CHECK_CLOSE(ag_input[0][0][0], 1.0 / scale, 0.1);
|
||||
}
|
||||
|
||||
{
|
||||
LibLSS_tests::loadReferenceInput(box.N0, input_delta);
|
||||
LibLSS_tests::loadReferenceInput(box.N0, input_delta_c);
|
||||
|
||||
fwrap(input_delta_c) = fwrap(input_delta_c) * ((box.L0 * box.L1 * box.L2) /
|
||||
(box.N0 * box.N1 * box.N2));
|
||||
|
||||
fwrap(input_delta) =
|
||||
fwrap(input_delta) * cosmo.d_plus(0.1) / cosmo.d_plus(1.0);
|
||||
fwd.forwardModel_v2(ModelInput<3>(mgr, box, input_delta));
|
||||
fwd.getDensityFinal(ModelOutput<3>(mgr, box, output_delta_c));
|
||||
|
||||
BOOST_CHECK_CLOSE(
|
||||
output_delta_c[0][0][0].real(), input_delta_c[0][0][0].real(), 0.1);
|
||||
BOOST_CHECK_CLOSE(
|
||||
output_delta_c[0][0][1].real(), input_delta_c[0][0][1].real(), 0.1);
|
||||
|
||||
array::fill(ag_input_c, 1.0);
|
||||
/* fwd.adjointModel_v2(ModelInputAdjoint<3>(mgr, box, ag_input_c));
|
||||
fwd.getAdjointModelOutput(ModelOutputAdjoint<3>(mgr, box, ag_output));
|
||||
auto scale = std::real(fwrap(ag_input_c)).sum() * 1.0 /
|
||||
(box.L0 * box.L1 * box.L2) * cosmo.d_plus(1.0) /
|
||||
cosmo.d_plus(0.1);
|
||||
BOOST_CHECK_CLOSE(ag_output[0][0][0], scale, 0.1);*/
|
||||
}
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_chain_model) {
|
||||
BoxModel box{0, 0, 0, 100, 100, 100, 32, 32, 32};
|
||||
CosmologicalParameters cparams;
|
||||
|
||||
cparams.a0 = 1.0;
|
||||
cparams.fnl = 0;
|
||||
cparams.h = 0.67;
|
||||
cparams.omega_b = 0.05;
|
||||
cparams.omega_k = 0.0;
|
||||
cparams.omega_m = 0.3;
|
||||
cparams.omega_q = 0.7;
|
||||
cparams.omega_r = 0.0;
|
||||
cparams.w = -1;
|
||||
cparams.wprime = 0;
|
||||
|
||||
Cosmology cosmo(cparams);
|
||||
|
||||
auto mgr = std::make_shared<FFTW_Manager<double, 3>>(
|
||||
box.N0, box.N1, box.N2, MPI_Communication::instance());
|
||||
auto comm = MPI_Communication::instance();
|
||||
|
||||
auto fwd = std::make_shared<HadesLinear>(comm, box, box, 0.1, 1.0);
|
||||
auto fwd_log = std::make_shared<HadesLog>(comm, box, 1.0);
|
||||
|
||||
{
|
||||
ChainForwardModel chain(comm, box);
|
||||
|
||||
chain.addModel(fwd);
|
||||
chain.addModel(fwd_log);
|
||||
|
||||
auto input_delta_p = mgr->allocate_array();
|
||||
auto output_delta_p = mgr->allocate_array();
|
||||
auto output_delta_c_p = mgr->allocate_complex_array();
|
||||
|
||||
auto &input_delta = input_delta_p.get_array();
|
||||
auto &output_delta = output_delta_p.get_array();
|
||||
auto &output_delta_c = output_delta_c_p.get_array();
|
||||
|
||||
{
|
||||
LibLSS_tests::loadReferenceInput(box.N0, input_delta);
|
||||
double ref = fwrap(input_delta).sum() * (box.L0 * box.L1 * box.L2) /
|
||||
(box.N0 * box.N1 * box.N2);
|
||||
|
||||
fwrap(input_delta) =
|
||||
fwrap(input_delta) * cosmo.d_plus(0.1) / cosmo.d_plus(1.0);
|
||||
chain.forwardModel_v2(ModelInput<3>(mgr, box, input_delta));
|
||||
chain.getDensityFinal(ModelOutput<3>(mgr, box, output_delta_c));
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
ChainForwardModel chain(comm, box);
|
||||
size_t const Nmodes = 1000;
|
||||
auto primordial = std::make_shared<ForwardPrimordial>(comm, box, 1.0);
|
||||
|
||||
CosmologicalParameters cosmo_pars;
|
||||
|
||||
cosmo_pars.omega_b = 0.049;
|
||||
cosmo_pars.omega_k = 0;
|
||||
cosmo_pars.omega_m = 0.315;
|
||||
cosmo_pars.omega_q = 0.785;
|
||||
cosmo_pars.omega_r = 0;
|
||||
cosmo_pars.w = -1;
|
||||
cosmo_pars.wprime = 0;
|
||||
cosmo_pars.z0 = 0;
|
||||
cosmo_pars.sigma8 = 0.8;
|
||||
cosmo_pars.h = 0.68;
|
||||
|
||||
primordial->setCosmoParams(cosmo_pars);
|
||||
|
||||
chain.addModel(primordial);
|
||||
chain.addModel(fwd_log);
|
||||
|
||||
auto input_delta_p = mgr->allocate_array();
|
||||
auto output_delta_p = mgr->allocate_array();
|
||||
auto output_delta_c_p = mgr->allocate_complex_array();
|
||||
|
||||
auto &input_delta = input_delta_p.get_array();
|
||||
auto &output_delta = output_delta_p.get_array();
|
||||
auto &output_delta_c = output_delta_c_p.get_array();
|
||||
|
||||
{
|
||||
LibLSS_tests::loadReferenceInput(box.N0, input_delta);
|
||||
double ref = fwrap(input_delta).sum() * (box.L0 * box.L1 * box.L2) /
|
||||
(box.N0 * box.N1 * box.N2);
|
||||
|
||||
fwrap(input_delta) =
|
||||
fwrap(input_delta) * (1.0 / std::sqrt(box.N0 * box.N1 * box.N2));
|
||||
chain.forwardModel_v2(ModelInput<3>(mgr, box, input_delta));
|
||||
chain.getDensityFinal(ModelOutput<3>(mgr, box, output_delta));
|
||||
|
||||
BOOST_CHECK_CLOSE(output_delta[0][0][0], -1, 0.1);
|
||||
BOOST_CHECK_CLOSE(output_delta[0][4][0], -1, 0.1);
|
||||
BOOST_CHECK_CLOSE(output_delta[15][5][22], -0.99879686176352611, 0.1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
setupMPI(argc, argv);
|
||||
LibLSS::QUIET_CONSOLE_START = true;
|
||||
StaticInit::execute();
|
||||
LibLSS::Console::instance().setVerboseLevel<LOG_STD>();
|
||||
|
||||
int ret = utf::unit_test_main(&init_unit_test, argc, argv);
|
||||
|
||||
StaticInit::finalize();
|
||||
doneMPI();
|
||||
return 0;
|
||||
}
|
||||
// ARES TAG: authors_num = 1
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: year(0) = 2019-2020
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
78
extra/hades/libLSS/tests/test_symplectic.cpp
Normal file
78
extra/hades/libLSS/tests/test_symplectic.cpp
Normal file
|
@ -0,0 +1,78 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/tests/test_symplectic.cpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#include <H5Cpp.h>
|
||||
#include <boost/multi_array.hpp>
|
||||
#include "libLSS/tools/static_init.hpp"
|
||||
#include "libLSS/tools/symplectic_integrator.hpp"
|
||||
#include <CosmoTool/algo.hpp>
|
||||
#include <CosmoTool/hdf5_array.hpp>
|
||||
#include <boost/preprocessor/cat.hpp>
|
||||
#include <boost/preprocessor/stringize.hpp>
|
||||
#include <boost/preprocessor/seq/for_each.hpp>
|
||||
#include <algorithm>
|
||||
|
||||
using namespace LibLSS;
|
||||
using boost::extents;
|
||||
using namespace CosmoTool;
|
||||
|
||||
void force_func(
|
||||
boost::multi_array<double, 1> &x, boost::multi_array<double, 1> &grad) {
|
||||
grad[0] = x[0];
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
using namespace SymplecticOption;
|
||||
setupMPI(argc, argv);
|
||||
|
||||
LibLSS::Console &console = LibLSS::Console::instance();
|
||||
LibLSS::StaticInit::execute();
|
||||
SymplecticIntegrators F;
|
||||
boost::multi_array<double, 1> mass(extents[1]), position(extents[1]),
|
||||
momentum(extents[1]), gradient(extents[1]);
|
||||
double epsilon = 0.1;
|
||||
double Einit;
|
||||
int Ntime = 2 * M_PI / epsilon;
|
||||
boost::multi_array<double, 1> E(boost::extents[Ntime]),
|
||||
p(boost::extents[Ntime]), m(boost::extents[Ntime]);
|
||||
|
||||
#define BUILD_SCHEME(r, data, elem) \
|
||||
std::make_pair(elem, BOOST_PP_STRINGIZE(elem)),
|
||||
|
||||
std::pair<IntegratorScheme, std::string> schemes[] = {BOOST_PP_SEQ_FOR_EACH(
|
||||
BUILD_SCHEME, _,
|
||||
(SI_2A)(SI_2B)(SI_2C)(SI_3A)(SI_4B)(SI_4C)(SI_4D)(SI_6A))};
|
||||
int numSchemes = sizeof(schemes) / sizeof(schemes[0]);
|
||||
H5::H5File f("symplectic.h5", H5F_ACC_TRUNC);
|
||||
|
||||
for (int s = 0; s < numSchemes; s++) {
|
||||
|
||||
F.setIntegratorScheme(schemes[s].first);
|
||||
mass[0] = 1;
|
||||
position[0] = 0;
|
||||
momentum[0] = 1;
|
||||
|
||||
Einit = 0.5 * square(position[0]) + 0.5 * square(momentum[0]) / mass[0];
|
||||
for (int i = 0; i < Ntime; i++) {
|
||||
F.integrate(force_func, mass, epsilon, 1, position, momentum, gradient);
|
||||
p[i] = position[0];
|
||||
m[i] = momentum[0] / mass[0];
|
||||
E[i] = 0.5 * square(position[0]) + 0.5 * square(momentum[0]) / mass[0] -
|
||||
Einit;
|
||||
}
|
||||
|
||||
H5::Group g = f.createGroup(schemes[s].second);
|
||||
hdf5_write_array(g, "energy", E);
|
||||
hdf5_write_array(g, "position", p);
|
||||
hdf5_write_array(g, "velocity", m);
|
||||
}
|
||||
LibLSS::StaticInit::finalize();
|
||||
|
||||
return 0;
|
||||
}
|
13
extra/hades/libLSS/tests/tests.cmake
Normal file
13
extra/hades/libLSS/tests/tests.cmake
Normal file
|
@ -0,0 +1,13 @@
|
|||
SET(EXTRA_HADES ${CMAKE_SOURCE_DIR}/extra/hades/libLSS/tests)
|
||||
|
||||
set(TEST_LIBRARY_SOURCES ${EXTRA_HADES}/setup_hades_test_run.cpp)
|
||||
|
||||
include_directories(${EXTRA_HADES})
|
||||
|
||||
SET(TEST_hades_LIST symplectic modelio hermiticity ghost_array )
|
||||
|
||||
include(${CMAKE_SOURCE_DIR}/extra/hades/scripts/gradient_tests.cmake)
|
||||
|
||||
hades_add_gradient_test(hades_gradients ${EXTRA_HADES}/hades_gradients.py_config)
|
||||
|
||||
add_test(NAME modelio COMMAND ${CURRENT_CMAKE_BINARY_DIR}/test_modelio)
|
333
extra/hades/libLSS/tools/hermiticity_fixup.cpp
Normal file
333
extra/hades/libLSS/tools/hermiticity_fixup.cpp
Normal file
|
@ -0,0 +1,333 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/tools/hermiticity_fixup.cpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2019 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#include <set>
|
||||
#include <array>
|
||||
#include <algorithm>
|
||||
#include "libLSS/tools/console.hpp"
|
||||
#include "libLSS/tools/errors.hpp"
|
||||
#include "libLSS/tools/mpi_fftw_helper.hpp"
|
||||
#include "libLSS/samplers/core/types_samplers.hpp"
|
||||
#include "libLSS/tools/mpi/ghost_planes.hpp"
|
||||
#include "libLSS/tools/hermiticity_fixup.hpp"
|
||||
|
||||
static constexpr bool ULTRA_VERBOSE = true;
|
||||
|
||||
using namespace LibLSS;
|
||||
|
||||
template <typename T, size_t Nd>
|
||||
Hermiticity_fixer<T, Nd>::Hermiticity_fixer(Mgr_p mgr_)
|
||||
: comm(mgr_->getComm()), mgr(mgr_) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
std::set<ssize_t> wanted_planes, owned_planes;
|
||||
|
||||
std::array<ssize_t, Nd - 1> dims;
|
||||
std::copy(mgr->N.begin() + 1, mgr->N.end(), dims.begin());
|
||||
dims[Nd - 2] = dims[Nd - 2] / 2 + 1;
|
||||
|
||||
{
|
||||
size_t i_min = mgr->startN0;
|
||||
size_t i_max = mgr->startN0 + mgr->localN0;
|
||||
ctx.format("own: i_min=%d, i_max=%d", i_min, i_max);
|
||||
for (size_t i = i_min; i < i_max; i++) {
|
||||
owned_planes.insert(i);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
size_t i_min = std::max(size_t(mgr->startN0), mgr->N[0] / 2 + 1);
|
||||
size_t i_max = mgr->startN0 + mgr->localN0;
|
||||
ctx.format("want: i_min=%d, i_max=%d", i_min, i_max);
|
||||
for (size_t i = i_min; i < i_max; i++) {
|
||||
size_t conj_plane = mgr->N[0] - i;
|
||||
if (!mgr->on_core(conj_plane)) {
|
||||
wanted_planes.insert(conj_plane);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ghosts.setup(comm, wanted_planes, owned_planes, dims, mgr->N[0]);
|
||||
}
|
||||
|
||||
template <size_t Nd>
|
||||
static ssize_t encode_index(
|
||||
std::array<ssize_t, Nd> const &index, std::array<size_t, Nd> const &N) {
|
||||
ssize_t ret = 0;
|
||||
|
||||
for (size_t i = 0; i < Nd; i++)
|
||||
ret = ret * N[i] + index[i];
|
||||
return ret;
|
||||
}
|
||||
|
||||
template <size_t Nd>
|
||||
static void decode_index(
|
||||
ssize_t coded_index, std::array<ssize_t, Nd> &decoded,
|
||||
std::array<size_t, Nd> const &N) {
|
||||
for (size_t i = Nd; i > 0; i--) {
|
||||
size_t j = i - 1;
|
||||
ssize_t tmp = coded_index / N[j];
|
||||
ssize_t tmp2 = coded_index - tmp * N[j];
|
||||
decoded[j] = tmp2;
|
||||
coded_index = tmp;
|
||||
}
|
||||
}
|
||||
|
||||
template <size_t Nd>
|
||||
static void find_conjugate(
|
||||
std::array<ssize_t, Nd> &reversed_index,
|
||||
std::array<ssize_t, Nd> const &index, std::array<size_t, Nd> const &N) {
|
||||
for (size_t i = 0; i < Nd; i++) {
|
||||
if (index[i] == 0)
|
||||
reversed_index[i] = 0;
|
||||
else
|
||||
reversed_index[i] = N[i] - index[i];
|
||||
}
|
||||
}
|
||||
|
||||
template <size_t Nd>
|
||||
static bool
|
||||
has_nyquist(std::array<ssize_t, Nd> &index, std::array<size_t, Nd> const &N) {
|
||||
for (size_t i = 0; i < Nd; i++) {
|
||||
if (index[i] == N[i] / 2 || index[i] == 0)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Forward hermiticity fixer
|
||||
|
||||
template <
|
||||
size_t rank, typename Mgr, typename Ghosts, typename CArray,
|
||||
size_t Dim = CArray::dimensionality>
|
||||
static typename std::enable_if<Dim == 1, void>::type
|
||||
fix_plane(Mgr &mgr, Ghosts &&ghosts, CArray &&a, size_t *N) {
|
||||
std::array<size_t, 1> current_N = {N[0]};
|
||||
size_t Ntot = N[0];
|
||||
size_t N0_HC = N[0] / 2;
|
||||
|
||||
#pragma omp parallel for
|
||||
for (size_t i = 1; i < N0_HC; i++) {
|
||||
size_t current, conj_current;
|
||||
current = i;
|
||||
conj_current = current_N[0] - i;
|
||||
a[conj_current] = std::conj(a[current]);
|
||||
}
|
||||
|
||||
for (size_t i : {size_t(0), N0_HC}) {
|
||||
a[i].imag(0);
|
||||
}
|
||||
}
|
||||
|
||||
template <bool full, size_t Nd, typename AccessDirect, typename AccessConj>
|
||||
static void direct_fix(
|
||||
std::array<size_t, Nd> const ¤t_N, AccessDirect &&direct_access,
|
||||
AccessConj &&conj_access) {
|
||||
size_t Ntot =
|
||||
full ? array::product(current_N) / 2 : array::product(current_N);
|
||||
#pragma omp parallel for
|
||||
for (size_t i = 0; i < Ntot; i++) {
|
||||
std::array<ssize_t, Nd> current, conj_current;
|
||||
decode_index(i, current, current_N);
|
||||
//if (!has_nyquist(current, current_N))
|
||||
{
|
||||
find_conjugate(conj_current, current, current_N);
|
||||
direct_access(current) = std::conj(conj_access(conj_current));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <
|
||||
size_t rank, typename Mgr, typename Ghosts, typename CArray,
|
||||
size_t Dim = CArray::dimensionality>
|
||||
static typename std::enable_if<Dim != 1, void>::type
|
||||
fix_plane(Mgr &mgr, Ghosts &&ghosts, CArray &&a, size_t *N) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
std::array<size_t, Dim> current_N;
|
||||
size_t Ntot = array::product(current_N);
|
||||
size_t N0_HC = N[0] / 2;
|
||||
|
||||
std::copy(N, N + Dim, current_N.begin());
|
||||
|
||||
if (rank != 0) {
|
||||
auto accessor = [&a](auto &&x) -> auto & { return a(x); };
|
||||
direct_fix<true>(current_N, accessor, accessor);
|
||||
} else if (mgr.startN0 + mgr.localN0 > N0_HC) {
|
||||
size_t i_min = std::max(N0_HC, size_t(mgr.startN0));
|
||||
size_t i_max = mgr.startN0 + mgr.localN0;
|
||||
std::array<size_t, Dim - 1> sub_N;
|
||||
std::copy(current_N.begin() + 1, current_N.end(), sub_N.begin());
|
||||
|
||||
ctx.format("i_min = %d, i_max = %d", i_min, i_max);
|
||||
|
||||
for (size_t i0 = i_min; i0 < i_max; i0++) {
|
||||
size_t i0_conj = N[0] - i0;
|
||||
auto this_plane = a[i0];
|
||||
auto direct_access = [&this_plane](auto &&x) -> auto & {
|
||||
return this_plane(x);
|
||||
};
|
||||
|
||||
if (mgr.on_core(i0_conj)) {
|
||||
auto conj_plane = a[i0_conj];
|
||||
auto conj_direct_access = [&conj_plane](auto &&x) -> auto & {
|
||||
return conj_plane(x);
|
||||
};
|
||||
direct_fix<false>(sub_N, direct_access, conj_direct_access);
|
||||
} else {
|
||||
ctx.format(" Fix plane %d using i0_conj=%d from remote", i0, i0_conj);
|
||||
auto conj_plane = ghosts(i0_conj);
|
||||
direct_fix<false>(
|
||||
sub_N, direct_access, [&conj_plane](auto &&x) -> auto & {
|
||||
return conj_plane(x);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (rank != 0 || mgr.on_core(0))
|
||||
fix_plane<rank + 1>(mgr, ghosts, a[0], N + 1);
|
||||
if (rank != 0 || mgr.on_core(N0_HC))
|
||||
fix_plane<rank + 1>(mgr, ghosts, a[N0_HC], N + 1);
|
||||
}
|
||||
|
||||
template <typename T, size_t Nd>
|
||||
void Hermiticity_fixer<T, Nd>::forward(CArrayRef &a) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
// Grab the planes that is required to build the Nyquist correction
|
||||
ghosts.synchronize(a);
|
||||
|
||||
std::array<size_t, Nd> N = mgr->N;
|
||||
size_t N_HC = mgr->N_HC;
|
||||
|
||||
auto idx = array::make_star_indices<Nd - 1>(boost::indices);
|
||||
auto idx_g = array::make_star_indices<Nd - 2>(boost::indices);
|
||||
fix_plane<0>(
|
||||
*mgr,
|
||||
[this, idx_g, N_HC](ssize_t plane) {
|
||||
return array::slice_array(ghosts.getPlane(plane), idx_g[0]);
|
||||
},
|
||||
array::slice_array(a, idx[0]), N.data());
|
||||
fix_plane<0>(
|
||||
*mgr,
|
||||
[this, idx_g, N_HC](ssize_t plane) {
|
||||
return array::slice_array(ghosts.getPlane(plane), idx_g[N_HC - 1]);
|
||||
},
|
||||
array::slice_array(a, idx[N_HC - 1]), N.data());
|
||||
|
||||
ghosts.release();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Adjoint gradient of hermiticity fixer
|
||||
|
||||
template <
|
||||
size_t rank, typename Mgr, typename CArray,
|
||||
size_t Dim = CArray::dimensionality>
|
||||
static typename std::enable_if<Dim == 1, void>::type
|
||||
adjoint_fix_plane(Mgr &mgr, CArray &&a, size_t *N) {
|
||||
std::array<size_t, 1> current_N = {N[0]};
|
||||
size_t Ntot = N[0];
|
||||
size_t N0_HC = N[0] / 2;
|
||||
|
||||
#pragma omp parallel for
|
||||
for (size_t i = 1; i < N0_HC; i++) {
|
||||
size_t current, conj_current;
|
||||
current = i;
|
||||
conj_current = current_N[0] - i;
|
||||
a[conj_current] = 0;
|
||||
}
|
||||
|
||||
for (size_t i : {size_t(0), N0_HC}) {
|
||||
auto &a0 = a[i];
|
||||
a0.real(a0.real() * 0.5);
|
||||
a0.imag(0);
|
||||
}
|
||||
}
|
||||
|
||||
template <bool full, size_t Nd, typename AccessDirect>
|
||||
static void adjoint_direct_fix(
|
||||
std::array<size_t, Nd> const ¤t_N, AccessDirect &&direct_access) {
|
||||
size_t const Ntot =
|
||||
full ? array::product(current_N) / 2 : array::product(current_N);
|
||||
#pragma omp parallel for
|
||||
for (size_t i = 0; i < Ntot; i++) {
|
||||
std::array<ssize_t, Nd> current, conj_current;
|
||||
decode_index(i, current, current_N);
|
||||
//if (!has_nyquist(current, current_N))
|
||||
{
|
||||
find_conjugate(conj_current, current, current_N);
|
||||
direct_access(conj_current) = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <
|
||||
size_t rank, typename Mgr, typename CArray,
|
||||
size_t Dim = CArray::dimensionality>
|
||||
static typename std::enable_if<Dim != 1, void>::type
|
||||
adjoint_fix_plane(Mgr &mgr, CArray &&a, size_t *N) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
std::array<size_t, Dim> current_N;
|
||||
size_t Ntot = array::product(current_N);
|
||||
size_t N0_HC = N[0] / 2;
|
||||
|
||||
std::copy(N, N + Dim, current_N.begin());
|
||||
|
||||
if (rank != 0) {
|
||||
auto accessor = [&a](auto &&x) -> auto & { return a(x); };
|
||||
adjoint_direct_fix<true>(current_N, accessor);
|
||||
} else if (mgr.startN0 + mgr.localN0 > N0_HC) {
|
||||
size_t i_min = std::max(N0_HC, size_t(mgr.startN0));
|
||||
size_t i_max = mgr.startN0 + mgr.localN0;
|
||||
std::array<size_t, Dim - 1> sub_N;
|
||||
std::copy(current_N.begin() + 1, current_N.end(), sub_N.begin());
|
||||
|
||||
for (size_t i0 = i_min; i0 < i_max; i0++) {
|
||||
auto this_plane = a[i0];
|
||||
auto direct_access = [&this_plane](auto &&x) -> auto & {
|
||||
return this_plane(x);
|
||||
};
|
||||
|
||||
adjoint_direct_fix<false>(sub_N, direct_access);
|
||||
}
|
||||
}
|
||||
|
||||
if (rank != 0 || mgr.on_core(0))
|
||||
adjoint_fix_plane<rank + 1>(mgr, a[0], N + 1);
|
||||
if (rank != 0 || mgr.on_core(N0_HC))
|
||||
adjoint_fix_plane<rank + 1>(mgr, a[N0_HC], N + 1);
|
||||
}
|
||||
|
||||
template <typename T, size_t Nd>
|
||||
void Hermiticity_fixer<T, Nd>::adjoint(CArrayRef &a) {
|
||||
// Grab the planes that is required to build the Nyquist correction
|
||||
|
||||
std::array<size_t, Nd> N = mgr->N;
|
||||
size_t N_HC = mgr->N_HC;
|
||||
|
||||
fwrap(a) = fwrap(a) * 2.0;
|
||||
// if (mgr->on_core(0))
|
||||
// a[0][0][0] *= 0.5;
|
||||
|
||||
auto idx = array::make_star_indices<Nd - 1>(boost::indices);
|
||||
adjoint_fix_plane<0>(*mgr, array::slice_array(a, idx[0]), N.data());
|
||||
adjoint_fix_plane<0>(*mgr, array::slice_array(a, idx[N_HC - 1]), N.data());
|
||||
}
|
||||
|
||||
//template struct LibLSS::Hermiticity_fixer<double, 1>;
|
||||
//template struct LibLSS::Hermiticity_fixer<double, 2>;
|
||||
template struct LibLSS::Hermiticity_fixer<double, 3>;
|
||||
|
||||
// ARES TAG: authors_num = 2
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
||||
// ARES TAG: year(0) = 2014-2020
|
||||
// ARES TAG: name(1) = Jens Jasche
|
||||
// ARES TAG: email(1) = jens.jasche@fysik.su.se
|
||||
// ARES TAG: year(1) = 2009-2019
|
47
extra/hades/libLSS/tools/hermiticity_fixup.hpp
Normal file
47
extra/hades/libLSS/tools/hermiticity_fixup.hpp
Normal file
|
@ -0,0 +1,47 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/tools/hermiticity_fixup.hpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2019 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#ifndef __LIBLSS_TOOLS_HERMITICITY_FIXUP_HPP
|
||||
# define __LIBLSS_TOOLS_HERMITICITY_FIXUP_HPP
|
||||
|
||||
# include <complex>
|
||||
# include <boost/format.hpp>
|
||||
# include "libLSS/tools/mpi_fftw_helper.hpp"
|
||||
# include "libLSS/tools/mpi/ghost_planes.hpp"
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
template <typename T, size_t Nd>
|
||||
struct Hermiticity_fixer {
|
||||
typedef FFTW_Manager<T, Nd> Mgr;
|
||||
typedef std::shared_ptr<Mgr> Mgr_p;
|
||||
typedef typename Mgr::U_ArrayFourier::array_type CArrayRef;
|
||||
|
||||
MPI_Communication *comm;
|
||||
Mgr_p mgr;
|
||||
|
||||
GhostPlanes<std::complex<T>, Nd - 1> ghosts;
|
||||
|
||||
Hermiticity_fixer(Mgr_p mgr);
|
||||
|
||||
void forward(CArrayRef &a);
|
||||
void adjoint(CArrayRef &a);
|
||||
};
|
||||
|
||||
} // namespace LibLSS
|
||||
|
||||
#endif
|
||||
|
||||
// ARES TAG: authors_num = 2
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
||||
// ARES TAG: year(0) = 2014-2020
|
||||
// ARES TAG: name(1) = Jens Jasche
|
||||
// ARES TAG: email(1) = jens.jasche@fysik.su.se
|
||||
// ARES TAG: year(1) = 2009-2019
|
231
extra/hades/libLSS/tools/mpi/ghost_array.hpp
Normal file
231
extra/hades/libLSS/tools/mpi/ghost_array.hpp
Normal file
|
@ -0,0 +1,231 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/tools/mpi/ghost_array.hpp
|
||||
Copyright (C) 2018-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#pragma once
|
||||
#ifndef __LIBLSS_TOOLS_MPI_GHOST_ARRAY_HPP
|
||||
# define __LIBLSS_TOOLS_MPI_GHOST_ARRAY_HPP
|
||||
|
||||
# include <set>
|
||||
# include <map>
|
||||
# include <memory>
|
||||
# include "libLSS/tools/string_tools.hpp"
|
||||
# include "libLSS/tools/uninitialized_type.hpp"
|
||||
# include "libLSS/mpi/generic_mpi.hpp"
|
||||
# include "libLSS/tools/array_tools.hpp"
|
||||
# include "libLSS/tools/string_tools.hpp"
|
||||
# include "libLSS/samplers/core/types_samplers.hpp"
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
template <typename T>
|
||||
struct GhostArrayTypes {
|
||||
typedef LibLSS::U_Array<T, 1> U_ArrayType;
|
||||
typedef typename U_ArrayType::array_type ArrayType;
|
||||
|
||||
typedef std::map<size_t, std::shared_ptr<U_ArrayType>> MapGhosts;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class GhostArray : public GhostArrayTypes<T> {
|
||||
protected:
|
||||
static constexpr bool SUPER_VERBOSE = false;
|
||||
typedef GhostArrayTypes<T> super;
|
||||
typedef typename super::ArrayType ArrayType;
|
||||
typedef typename super::U_ArrayType U_ArrayType;
|
||||
typedef typename super::MapGhosts MapGhosts;
|
||||
|
||||
MPI_Communication *comm;
|
||||
|
||||
std::vector<boost::multi_array<T, 1>> exchangeIndexes;
|
||||
|
||||
public:
|
||||
GhostArray() {}
|
||||
|
||||
/**
|
||||
* @brief
|
||||
*
|
||||
* We assume that localIndexes are unique.
|
||||
*
|
||||
* @param comm_
|
||||
* @param localIndexes
|
||||
*/
|
||||
template <typename IndexSet>
|
||||
void setup(MPI_Communication *comm_, IndexSet &&localIndexes) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
typedef typename std::remove_reference_t<IndexSet>::value_type indexType;
|
||||
static_assert(
|
||||
std::is_same<indexType, T>::value, "Index list must be of type T");
|
||||
int localKeys = localIndexes.size();
|
||||
|
||||
comm = comm_;
|
||||
|
||||
// Serialize and send to peers
|
||||
auto commSize = boost::extents[comm->size()];
|
||||
boost::multi_array<indexType, 1> linearIndexes(boost::extents[localKeys]);
|
||||
boost::multi_array<int, 1> allIndexCounts(commSize);
|
||||
boost::multi_array<int, 1> displIndexes(commSize);
|
||||
|
||||
ctx.print("Transfer indexes to linear array");
|
||||
std::copy(
|
||||
localIndexes.begin(), localIndexes.end(), linearIndexes.begin());
|
||||
|
||||
ctx.print("Sort");
|
||||
std::sort(linearIndexes.begin(), linearIndexes.end());
|
||||
|
||||
comm->all_gather_t(&localKeys, 1, allIndexCounts.data(), 1);
|
||||
|
||||
ctx.print("Compute global displacements");
|
||||
int totalIndexes = 0, previousDispl = 0;
|
||||
for (int i = 0; i < comm->size(); i++) {
|
||||
totalIndexes += allIndexCounts[i];
|
||||
displIndexes[i] = previousDispl;
|
||||
previousDispl += allIndexCounts[i];
|
||||
}
|
||||
|
||||
boost::multi_array<indexType, 1> allIndexes(boost::extents[totalIndexes]);
|
||||
// FIXME: Try to reduce memory/bandwidth consumption with better distributed algorithm
|
||||
ctx.print("Gather all relevant indexes");
|
||||
comm->all_gatherv_t(
|
||||
linearIndexes.data(), localKeys, allIndexes.data(),
|
||||
allIndexCounts.data(), displIndexes.data());
|
||||
|
||||
std::set<indexType> localSet;
|
||||
|
||||
if (SUPER_VERBOSE)
|
||||
ctx.format("Local indexes: %s", LibLSS::to_string(localIndexes));
|
||||
|
||||
ctx.print("Transfer local indexes to set for better intersection");
|
||||
std::copy(
|
||||
localIndexes.begin(), localIndexes.end(),
|
||||
std::inserter(localSet, localSet.begin()));
|
||||
|
||||
exchangeIndexes.resize(comm->size());
|
||||
for (int i = 0; i < comm->size(); i++) {
|
||||
// Compute intersections with remote nodes
|
||||
std::set<indexType> otherIndexes, interIndexes;
|
||||
|
||||
if (i == comm->rank())
|
||||
continue;
|
||||
|
||||
for (int j = 0; j < allIndexCounts[i]; j++) {
|
||||
otherIndexes.insert(allIndexes[j + displIndexes[i]]);
|
||||
}
|
||||
if (SUPER_VERBOSE)
|
||||
ctx.format(
|
||||
"Other indexes (count=%d): %s", allIndexCounts[i],
|
||||
LibLSS::to_string(otherIndexes));
|
||||
|
||||
ctx.format("Intersect with rank=%d", i);
|
||||
std::set_intersection(
|
||||
localSet.begin(), localSet.end(), otherIndexes.begin(),
|
||||
otherIndexes.end(),
|
||||
std::inserter(interIndexes, interIndexes.begin()));
|
||||
|
||||
ctx.format("%d indexes in common", interIndexes.size());
|
||||
exchangeIndexes[i].resize(boost::extents[interIndexes.size()]);
|
||||
std::copy(
|
||||
interIndexes.begin(), interIndexes.end(),
|
||||
exchangeIndexes[i].begin());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief
|
||||
*
|
||||
*
|
||||
*
|
||||
* @tparam U
|
||||
* @tparam boost::multi_array_ref<U, 1>
|
||||
* @param data
|
||||
* @param indexToIndex how to map an index (from setup) to an index in the provided array
|
||||
*/
|
||||
template <typename U, typename ReductionOperation, typename IndexMapper>
|
||||
void synchronize(
|
||||
boost::multi_array_ref<U, 1> &data, IndexMapper &&mapper,
|
||||
ReductionOperation op) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
typedef LibLSS::U_Array<U, 1> TmpU;
|
||||
|
||||
std::vector<std::shared_ptr<TmpU>> allTmpSend;
|
||||
std::vector<std::shared_ptr<TmpU>> allTmpRecv;
|
||||
std::vector<MPICC_Request> allReq;
|
||||
|
||||
allTmpRecv.resize(comm->size());
|
||||
|
||||
for (int i = 0; i < comm->size(); i++) {
|
||||
int numExchange = exchangeIndexes[i].size();
|
||||
if (numExchange == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
ctx.format("Send %d data -> %d", numExchange, i);
|
||||
{
|
||||
auto thisTmp = std::make_shared<TmpU>(boost::extents[numExchange]);
|
||||
auto &tmpData = thisTmp->get_array();
|
||||
|
||||
allTmpSend.push_back(thisTmp);
|
||||
|
||||
# pragma omp parallel for
|
||||
for (int j = 0; j < numExchange; j++) {
|
||||
tmpData[j] = data[mapper(exchangeIndexes[i][j])];
|
||||
}
|
||||
|
||||
allReq.push_back(comm->IsendT(tmpData.data(), tmpData.size(), i, i));
|
||||
}
|
||||
ctx.format("Recv %d data <- %d", numExchange, i);
|
||||
{
|
||||
auto thisTmp = std::make_shared<TmpU>(boost::extents[numExchange]);
|
||||
auto &tmpData = thisTmp->get_array();
|
||||
|
||||
allTmpRecv[i] = thisTmp;
|
||||
|
||||
allReq.push_back(
|
||||
comm->IrecvT(tmpData.data(), tmpData.size(), i, comm->rank()));
|
||||
}
|
||||
}
|
||||
ctx.print("Wait IO completion");
|
||||
comm->WaitAll(allReq);
|
||||
|
||||
allTmpSend.clear();
|
||||
|
||||
{
|
||||
ConsoleContext<LOG_DEBUG> ctx("GhostArray local reduction");
|
||||
|
||||
// Now all data are in place, we must do partial reductions
|
||||
for (int i = 0; i < comm->size(); i++) {
|
||||
if (i == comm->rank())
|
||||
continue;
|
||||
int numExchange = exchangeIndexes[i].size();
|
||||
if (numExchange == 0)
|
||||
continue;
|
||||
|
||||
auto &inData = allTmpRecv[i]->get_array();
|
||||
|
||||
# pragma omp parallel for
|
||||
for (int j = 0; j < numExchange; j++) {
|
||||
op(data[mapper(exchangeIndexes[i][j])], inData[j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename U, typename IndexMapper>
|
||||
void synchronize(
|
||||
boost::multi_array_ref<U, 1> &data, IndexMapper &&indexToIndex) {
|
||||
synchronize<U>(
|
||||
data, indexToIndex, [](auto &x, auto const &y) { x += y; });
|
||||
}
|
||||
};
|
||||
} // namespace LibLSS
|
||||
|
||||
#endif
|
||||
// ARES TAG: num_authors = 1
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: year(0) = 2018-2020
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
606
extra/hades/libLSS/tools/mpi/ghost_planes.hpp
Normal file
606
extra/hades/libLSS/tools/mpi/ghost_planes.hpp
Normal file
|
@ -0,0 +1,606 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/tools/mpi/ghost_planes.hpp
|
||||
Copyright (C) 2018-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#pragma once
|
||||
#ifndef __LIBLSS_TOOLS_MPI_GHOST_PLANES_HPP
|
||||
# define __LIBLSS_TOOLS_MPI_GHOST_PLANES_HPP
|
||||
|
||||
# include <map>
|
||||
# include <memory>
|
||||
# include "libLSS/tools/uninitialized_type.hpp"
|
||||
# include "libLSS/mpi/generic_mpi.hpp"
|
||||
# include "libLSS/tools/array_tools.hpp"
|
||||
# include "libLSS/tools/string_tools.hpp"
|
||||
# include "libLSS/samplers/core/types_samplers.hpp"
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
/**
|
||||
* This class provides some types to abbreviate the long array specification
|
||||
* for ghost planes.
|
||||
*/
|
||||
template <typename T, size_t Nd>
|
||||
struct GhostPlaneTypes {
|
||||
typedef boost::multi_array_ref<T, Nd> ArrayType;
|
||||
typedef UninitializedArray<ArrayType> U_ArrayType;
|
||||
|
||||
typedef std::map<size_t, std::shared_ptr<U_ArrayType>> MapGhosts;
|
||||
};
|
||||
|
||||
/**
|
||||
* @file
|
||||
* This enumeration allows to choose between different kind of "ghosts".
|
||||
*/
|
||||
enum GhostMethod {
|
||||
GHOST_COPY, ///< in synchronize mode, the plane is copied. In AG mode, it is accumulated.
|
||||
GHOST_ACCUMULATE ///< in synchronize mode, the plane is accumulated. In AG mode, it is copied.
|
||||
};
|
||||
|
||||
/**
|
||||
* This class handles the generic problems of ghost planes management with MPI.
|
||||
* The concept of ghost planes (and ghost particles in another module), comes
|
||||
* from the distinction of which MPI task owns the plane and which task needs
|
||||
* the plane to do further computation. A ghost plane is not designed to be an
|
||||
* "active" plane on the node that needs it. Though there is a slight variant
|
||||
* that may allow such things at the cost of a final synchronization.
|
||||
*
|
||||
* The work flow of using ghostplanes is the following:
|
||||
* GhostPlanes object creation
|
||||
* call setup method to indicate what are the provided data and requirements
|
||||
* do stuff
|
||||
* call synchronize before needing the ghost planes
|
||||
* use the ghost planes with getPlane()
|
||||
* Repeat synchronize if needed
|
||||
*
|
||||
* There is an adjoint gradient variant of the synchronization step which
|
||||
* does sum reduction of the adjoint gradient arrays corresponding to the
|
||||
* ghost planes.
|
||||
*
|
||||
*/
|
||||
template <typename T, size_t Nd>
|
||||
struct GhostPlanes : GhostPlaneTypes<T, Nd> {
|
||||
typedef GhostPlaneTypes<T, Nd> super;
|
||||
typedef typename super::ArrayType ArrayType;
|
||||
typedef typename super::U_ArrayType U_ArrayType;
|
||||
typedef typename super::MapGhosts MapGhosts;
|
||||
|
||||
private:
|
||||
static constexpr bool CHECK_DIMENSIONS = true;
|
||||
static constexpr bool ULTRA_VERBOSE = false;
|
||||
MPI_Communication *comm;
|
||||
MapGhosts ghosts, ag_ghosts;
|
||||
size_t maxPlaneId;
|
||||
std::map<size_t, size_t> plane_peer;
|
||||
std::array<size_t, Nd> setupDims;
|
||||
|
||||
typedef LibLSS::multi_array<int, 1> int_array;
|
||||
typedef LibLSS::multi_array<int, 1> size_array;
|
||||
typedef std::set<int> size_set;
|
||||
|
||||
LibLSS::multi_array<int, 1> other_requested_planes, other_requested_count,
|
||||
other_requested_displ;
|
||||
|
||||
std::map<size_t, std::shared_ptr<MPI_Communication>> owned_plane_dispatch;
|
||||
size_set req_plane_set;
|
||||
|
||||
template <typename PlaneSet>
|
||||
inline size_array fill_with_planes(PlaneSet &&owned_planes) {
|
||||
size_array plane_set(boost::extents[owned_planes.size()]);
|
||||
size_t i = 0;
|
||||
for (auto plane : owned_planes) {
|
||||
plane_set[i] = plane;
|
||||
i++;
|
||||
}
|
||||
return plane_set;
|
||||
}
|
||||
|
||||
template <typename Array>
|
||||
inline std::string array_to_str(Array const &s, char const *sep) {
|
||||
std::ostringstream oss;
|
||||
auto iter = s.begin();
|
||||
|
||||
if (iter == s.end())
|
||||
return "";
|
||||
|
||||
oss << *iter;
|
||||
++iter;
|
||||
|
||||
while (iter != s.end()) {
|
||||
oss << sep << *iter;
|
||||
++iter;
|
||||
}
|
||||
return oss.str();
|
||||
}
|
||||
|
||||
template <typename PlaneSet>
|
||||
inline void dispatch_plane_map(
|
||||
PlaneSet &&owned_planes, int_array &other_planes,
|
||||
int_array &other_planes_count, int_array &other_planes_displ) {
|
||||
size_t cSize = comm->size();
|
||||
auto e_cSize = boost::extents[cSize];
|
||||
ConsoleContext<LOG_DEBUG> ctx("dispatch_plane_map");
|
||||
int_array tmp_data(e_cSize), send_displ(e_cSize), send_count(e_cSize);
|
||||
|
||||
// Now find out which rank has the planes.
|
||||
// Everybody send their planeset for that.
|
||||
auto plane_set = fill_with_planes(owned_planes);
|
||||
size_t Nplanes = plane_set.size();
|
||||
array::fill(tmp_data, Nplanes);
|
||||
array::fill(send_count, 1);
|
||||
|
||||
// Costly but we hopefully do it only once in a while.
|
||||
// Get all the plane number count from everybody.
|
||||
ctx.print("Dispatch our planeset, number is " + to_string(tmp_data));
|
||||
comm->all2allT(tmp_data.data(), 1, other_planes_count.data(), 1);
|
||||
|
||||
for (size_t i = 1; i < comm->size(); i++) {
|
||||
other_planes_displ[i] =
|
||||
other_planes_displ[i - 1] + other_planes_count[i - 1];
|
||||
}
|
||||
|
||||
size_t total_planes =
|
||||
other_planes_displ[cSize - 1] + other_planes_count[cSize - 1];
|
||||
|
||||
ctx.print(boost::format("Total planes = %d") % total_planes);
|
||||
other_planes.resize(boost::extents[total_planes]);
|
||||
|
||||
ctx.print(
|
||||
boost::format("Now gather plane ids send_count=%s; send_displ=%s; "
|
||||
"other_planes_count=%s; other_planes_displ=%s") %
|
||||
array_to_str(tmp_data, ",") % array_to_str(send_displ, ",") %
|
||||
array_to_str(other_planes_count, ",") %
|
||||
array_to_str(other_planes_displ, ","));
|
||||
// Get plane id from everybody
|
||||
comm->all2allv_t(
|
||||
plane_set.data(), tmp_data.data(), send_displ.data(),
|
||||
other_planes.data(), other_planes_count.data(),
|
||||
other_planes_displ.data());
|
||||
ctx.print(
|
||||
boost::format("Got other task planeset: %s") %
|
||||
array_to_str(other_planes, ","));
|
||||
}
|
||||
|
||||
typedef std::map<size_t, std::list<size_t>> MapPlaneToPeer;
|
||||
|
||||
inline MapPlaneToPeer gather_peer_by_plane(
|
||||
int_array const &required_planes,
|
||||
int_array const &required_planes_count,
|
||||
int_array const &required_planes_displ) {
|
||||
MapPlaneToPeer plane_to_peer;
|
||||
int peer = 0;
|
||||
size_t cSize = comm->size();
|
||||
size_t cRank = comm->rank();
|
||||
ConsoleContext<LOG_DEBUG> ctx("gather_peer_by_plane");
|
||||
|
||||
for (size_t i = 0; i < required_planes.num_elements(); i++) {
|
||||
if (peer + 1 < cSize && i >= required_planes_displ[peer + 1]) {
|
||||
peer++;
|
||||
}
|
||||
ctx.print(
|
||||
boost::format("Peer %d provides %d") % peer % required_planes[i]);
|
||||
if (peer != cRank) {
|
||||
plane_to_peer[required_planes[i]].push_back(peer);
|
||||
}
|
||||
}
|
||||
return plane_to_peer;
|
||||
}
|
||||
|
||||
static inline void null_destroy(void *) {}
|
||||
|
||||
std::map<
|
||||
GhostMethod,
|
||||
std::function<MPICC_Request(MPI_Communication *, T const *, int)>>
|
||||
ghost_methods;
|
||||
std::map<
|
||||
GhostMethod,
|
||||
std::function<MPICC_Request(MPI_Communication *, T *, T *, int)>>
|
||||
ghost_methods_ag;
|
||||
|
||||
static MPICC_Request
|
||||
ghost_copy_method(MPI_Communication *c, T const *data, int num) {
|
||||
return c->IbroadcastT((T *)data, num, 0);
|
||||
}
|
||||
|
||||
static MPICC_Request
|
||||
ghost_accumulate_method(MPI_Communication *c, T const *data, int num) {
|
||||
return c->IallReduceT((T *)MPI_IN_PLACE, (T *)data, num, MPI_SUM);
|
||||
}
|
||||
|
||||
static MPICC_Request ghost_accumulate_method_ag(
|
||||
MPI_Communication *c, T *indata, T const *data, int num) {
|
||||
return c->IgatherT((T *)indata, num, (T *)data, num, 0);
|
||||
}
|
||||
|
||||
static MPICC_Request
|
||||
ghost_copy_method_ag(MPI_Communication *c, T *indata, T *data, int num) {
|
||||
return c->IreduceT(indata, data, num, MPI_SUM, 0);
|
||||
}
|
||||
|
||||
public:
|
||||
/**
|
||||
* Constructor.
|
||||
*/
|
||||
GhostPlanes() {
|
||||
ghost_methods[GHOST_COPY] = &ghost_copy_method;
|
||||
ghost_methods[GHOST_ACCUMULATE] = &ghost_accumulate_method;
|
||||
ghost_methods_ag[GHOST_COPY] = &ghost_copy_method_ag;
|
||||
ghost_methods_ag[GHOST_ACCUMULATE] = &ghost_accumulate_method_ag;
|
||||
std::fill(setupDims.begin(), setupDims.end(), 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the current dimensions of the planes.
|
||||
*
|
||||
* @return A container with the dimensions.
|
||||
*/
|
||||
auto const &dims() const { return setupDims; }
|
||||
|
||||
/**
|
||||
* This function allows the user to change the dimensions of the planes.
|
||||
*
|
||||
* @params dims (N-1)-d dimension of each plane.
|
||||
*/
|
||||
template <typename DimList>
|
||||
void updatePlaneDims(DimList &&dims) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
auto i1 = dims.begin();
|
||||
auto i2 = setupDims.begin();
|
||||
int d = 0;
|
||||
for (d = 0; d < Nd; d++) {
|
||||
if (*i1 != *i2)
|
||||
break;
|
||||
++i1;
|
||||
++i2;
|
||||
}
|
||||
// Everything already correct. Exit now.
|
||||
if (d == Nd) {
|
||||
ctx.print("No change needed.");
|
||||
return;
|
||||
}
|
||||
|
||||
ctx.format("New shape is %dx%d", dims[0], dims[1]);
|
||||
|
||||
for (auto &g : ghosts) {
|
||||
if (g.second)
|
||||
g.second.reset(); //reshape(dims);
|
||||
}
|
||||
for (auto &g : ag_ghosts) {
|
||||
if (g.second)
|
||||
g.second.reset(); //reshape(dims);
|
||||
}
|
||||
std::copy(dims.begin(), dims.end(), setupDims.begin());
|
||||
}
|
||||
|
||||
/**
|
||||
* This function setups the ghost plane object for usage. It can be called
|
||||
* several times, in that case the previous setup is forgotten and a new
|
||||
* one is initiated.
|
||||
*
|
||||
* @param mpi MPI Communicator with the same topology as the planes
|
||||
* @param planes a list of planes that are required from other nodes. The
|
||||
* list must be a sort of container.
|
||||
* @param owned_planes a list of the planes that are owned by the current
|
||||
* node.
|
||||
* @param dims dimensions of the planes (barring the first one, i.e. 2D if
|
||||
* the entire set is 3D))
|
||||
* @param maxPlaneId_ this is convenience to avoid a global communication
|
||||
* to figure out what is the maximum id of the considered
|
||||
* planes.
|
||||
*/
|
||||
template <typename PlaneList, typename PlaneSet, typename DimList>
|
||||
void setup(
|
||||
MPI_Communication *comm_, PlaneList &&planes, PlaneSet &&owned_planes,
|
||||
DimList &&dims, size_t maxPlaneId_) {
|
||||
LIBLSS_AUTO_DEBUG_CONTEXT(ctx);
|
||||
size_t cSize = comm_->size();
|
||||
auto e_cSize = boost::extents[cSize];
|
||||
int_array other_planes, other_planes_count(e_cSize),
|
||||
other_planes_displ(e_cSize);
|
||||
size_set owned_plane_set;
|
||||
// required_planes, required_planes_count,
|
||||
// required_planes_displ;
|
||||
|
||||
maxPlaneId = maxPlaneId_;
|
||||
req_plane_set = size_set(planes.begin(), planes.end());
|
||||
owned_plane_set = size_set(owned_planes.begin(), owned_planes.end());
|
||||
|
||||
ghosts.clear();
|
||||
comm = comm_;
|
||||
std::copy(dims.begin(), dims.end(), setupDims.begin());
|
||||
|
||||
// Create a map betwen requested planes and peers.
|
||||
dispatch_plane_map(
|
||||
owned_planes, other_planes, other_planes_count, other_planes_displ);
|
||||
|
||||
// Now we know about the requirements of other peer for own set of planes
|
||||
auto plane_to_peer = gather_peer_by_plane(
|
||||
// required_planes, required_planes_count, required_planes_displ
|
||||
other_planes, other_planes_count, other_planes_displ);
|
||||
|
||||
ctx.print("Required planes: " + to_string(req_plane_set));
|
||||
ctx.print("Owned planes: " + to_string(owned_plane_set));
|
||||
|
||||
for (size_t plane = 0; plane < maxPlaneId; plane++) {
|
||||
std::shared_ptr<MPI_Communication> cptr;
|
||||
auto peer = plane_to_peer.find(plane);
|
||||
|
||||
if (owned_plane_set.count(plane) > 0) {
|
||||
// Mark this task as root (key==0)
|
||||
cptr = std::shared_ptr<MPI_Communication>(comm->split(plane, 0));
|
||||
if (ULTRA_VERBOSE)
|
||||
ctx.format("Data for plane %d is present here.", plane);
|
||||
} else if (req_plane_set.find(plane) != req_plane_set.end()) {
|
||||
// Mark this task as non root (key!=0)
|
||||
cptr = std::shared_ptr<MPI_Communication>(comm->split(plane, 1));
|
||||
if (ULTRA_VERBOSE)
|
||||
ctx.format("Data for plane %d is NEEDED here.", plane);
|
||||
} else {
|
||||
// Ignore this one, but we have to run it nonetheless as split is a collective operation.
|
||||
comm->split();
|
||||
if (ULTRA_VERBOSE)
|
||||
ctx.format("Ignore this process for plane %d.", plane);
|
||||
}
|
||||
if (cptr &&
|
||||
cptr->size() <=
|
||||
1) { // Should even be 2 , but then we have a rank problem later.
|
||||
// We do not a need a new communicator for that in the end.
|
||||
// This will reaffect cptr and frees the communicator we have just
|
||||
// created.
|
||||
cptr.reset();
|
||||
if (ULTRA_VERBOSE)
|
||||
ctx.format(
|
||||
"Communicator has only one process for plane %d, reset.",
|
||||
plane);
|
||||
}
|
||||
owned_plane_dispatch[plane] = cptr;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Pre-allocate memory for synchronization.
|
||||
*
|
||||
* Warning! Previous memory is freed.
|
||||
*
|
||||
*/
|
||||
void allocate() {
|
||||
// Allocate memory for the ghost planes
|
||||
for (auto plane : req_plane_set) {
|
||||
if (!ghosts[plane])
|
||||
ghosts[plane] = std::make_shared<U_ArrayType>(setupDims);
|
||||
if (!ag_ghosts[plane])
|
||||
ag_ghosts[plane] = std::make_shared<U_ArrayType>(setupDims);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Release memory for synchronization
|
||||
*
|
||||
*/
|
||||
void release() {
|
||||
for (auto plane : req_plane_set) {
|
||||
ghosts[plane].reset();
|
||||
ag_ghosts[plane].reset();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear the internal ghost cache for the computation
|
||||
* of the adjoint gradient.
|
||||
*/
|
||||
void clear_ghosts() {
|
||||
for (auto &ag : ag_ghosts) {
|
||||
array::fill(ag.second->get_array(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This creates a virtual contiguous array of all the planes that are
|
||||
* requested and owned by the current task. There is a bit of overhead for
|
||||
* each plane lookup so please use wisely by caching plane access.
|
||||
*
|
||||
* @param planes contiguous multi_array of planes to be synchronized. The
|
||||
* the multi_array is assumed to range from min_local_plane
|
||||
* to max_local_plane (according to the list given in
|
||||
* setup).
|
||||
* @param method a method to compute the synchronization
|
||||
*/
|
||||
void synchronize(
|
||||
boost::multi_array_ref<T, (Nd + 1)> const &planes,
|
||||
GhostMethod method = GHOST_COPY) {
|
||||
// Synchronize operations with other members of comm
|
||||
ConsoleContext<LOG_DEBUG> ctx("ghost synchronize");
|
||||
RequestArray requests(boost::extents[maxPlaneId]);
|
||||
StatusArray statuses(boost::extents[maxPlaneId]);
|
||||
|
||||
allocate();
|
||||
|
||||
// Check that the planes do have the correct shape
|
||||
if (CHECK_DIMENSIONS) {
|
||||
auto shape_in = planes.shape();
|
||||
auto iter = ghosts.begin();
|
||||
|
||||
if (iter != ghosts.end()) {
|
||||
auto shape_out = iter->second->get_array().shape();
|
||||
|
||||
for (size_t i = 1; i < Nd; i++) {
|
||||
if (shape_in[i] != shape_out[i - 1]) {
|
||||
error_helper<ErrorBadState>(
|
||||
"Invalid dimensions of the array to synchronize (" +
|
||||
to_string(shape_in[i]) +
|
||||
" != " + to_string(shape_out[i - 1]) + ")");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t plane = 0; plane < maxPlaneId; plane++) {
|
||||
auto iter = owned_plane_dispatch.find(plane);
|
||||
if (iter != owned_plane_dispatch.end()) {
|
||||
int num;
|
||||
|
||||
if (!iter->second) {
|
||||
if (ULTRA_VERBOSE)
|
||||
ctx.print("Empty communicator. Skip.");
|
||||
continue;
|
||||
}
|
||||
|
||||
if (req_plane_set.count(plane) == 0) {
|
||||
//Console::instance().c_assert(plane >= idMin && plane < idMax, "Missing plane id for broadcasting");
|
||||
auto one_plane = planes[plane];
|
||||
T const *data =
|
||||
one_plane
|
||||
.origin(); // This assumes that index_bases is zero for dims > 1
|
||||
num = one_plane.num_elements();
|
||||
ctx.format("Send our plane %d (num=%d)", plane, num);
|
||||
requests[plane] =
|
||||
ghost_methods[method](iter->second.get(), data, num);
|
||||
} else {
|
||||
auto &one_plane = (ghosts[plane]->get_array());
|
||||
auto data = one_plane.data();
|
||||
num = one_plane.num_elements();
|
||||
ctx.format(
|
||||
"Receive some plane %d (num=%d), ptr=%p", plane, num,
|
||||
(void *)data);
|
||||
requests[plane] =
|
||||
ghost_methods[method](iter->second.get(), data, num);
|
||||
}
|
||||
}
|
||||
// If we do not have anything to exchange just skip the communication.
|
||||
}
|
||||
|
||||
if (ULTRA_VERBOSE)
|
||||
ctx.print("Wait for completion");
|
||||
MPI_Communication::WaitAll(requests, statuses);
|
||||
}
|
||||
|
||||
/**
|
||||
* This function allows to compute an "adjoint gradient" of the ghost
|
||||
* plane algorithm.
|
||||
*
|
||||
* @param ag_planes similar to synchronize, except that ag_planes is
|
||||
* modified through communication with sibling nodes.
|
||||
* @param method a method to compute the synchronization
|
||||
* @see GhostMethod
|
||||
*/
|
||||
void synchronize_ag(
|
||||
boost::multi_array_ref<T, (Nd + 1)> &ag_planes,
|
||||
GhostMethod method = GHOST_COPY) {
|
||||
// Synchronize operations with other members of comm
|
||||
ConsoleContext<LOG_DEBUG> ctx(
|
||||
"ghost synchronize_ag, maxPlaneId=" + to_string(maxPlaneId));
|
||||
RequestArray requests(boost::extents[maxPlaneId]);
|
||||
StatusArray statuses(boost::extents[maxPlaneId]);
|
||||
std::vector<std::unique_ptr<T[]>> all_tmps;
|
||||
|
||||
for (size_t plane = 0; plane < maxPlaneId; plane++) {
|
||||
auto iter = owned_plane_dispatch.find(plane);
|
||||
if (iter != owned_plane_dispatch.end()) {
|
||||
int num;
|
||||
|
||||
if (!iter->second) {
|
||||
if (ULTRA_VERBOSE)
|
||||
ctx.print("Empty communicator. Skip.");
|
||||
continue;
|
||||
}
|
||||
|
||||
if (req_plane_set.count(plane) == 0) {
|
||||
//Console::instance().c_assert(plane >= idMin && plane < idMax, "Missing plane id for broadcasting");
|
||||
auto one_plane = ag_planes[plane];
|
||||
T *tmp_buf;
|
||||
T *data =
|
||||
one_plane
|
||||
.origin(); // WARNING: This assumes that index_bases is zero for dims > 1
|
||||
num = one_plane.num_elements();
|
||||
ctx.format("Receive and reduce our plane %d (num=%d)", plane, num);
|
||||
Console::instance().c_assert(
|
||||
iter->second->rank() == 0,
|
||||
"For reception, local rank has to be zero.");
|
||||
all_tmps.push_back(std::unique_ptr<T[]>(tmp_buf = new T[num]));
|
||||
LibLSS::copy_array_rv(
|
||||
boost::multi_array_ref<T, 2>(
|
||||
tmp_buf,
|
||||
boost::extents[one_plane.shape()[0]][one_plane.shape()[1]]),
|
||||
one_plane);
|
||||
requests[plane] = ghost_methods_ag[method](
|
||||
iter->second.get(), tmp_buf, data, num);
|
||||
} else {
|
||||
auto &one_plane = (ag_ghosts[plane]->get_array());
|
||||
auto data = one_plane.data();
|
||||
T *tmp_buf;
|
||||
|
||||
num = one_plane.num_elements();
|
||||
ctx.format(
|
||||
"Send and reduce some plane %d (num=%d), ptr=%p", plane, num,
|
||||
(void *)data);
|
||||
Console::instance().c_assert(
|
||||
iter->second->rank() != 0,
|
||||
"For sending, local rank must not be zero.");
|
||||
all_tmps.push_back(std::unique_ptr<T[]>(tmp_buf = new T[num]));
|
||||
LibLSS::copy_array_rv(
|
||||
boost::multi_array_ref<T, 2>(
|
||||
tmp_buf,
|
||||
boost::extents[one_plane.shape()[0]][one_plane.shape()[1]]),
|
||||
one_plane);
|
||||
requests[plane] = ghost_methods_ag[method](
|
||||
iter->second.get(), tmp_buf, data, num);
|
||||
}
|
||||
}
|
||||
// If we do not have anything to exchange just skip the communication.
|
||||
}
|
||||
|
||||
MPI_Communication::WaitAll(requests, statuses);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the adjoint gradient plane indicated by the parameter i.
|
||||
* @param i plane of interest.
|
||||
*/
|
||||
ArrayType &ag_getPlane(size_t i) {
|
||||
auto iter = ag_ghosts.find(i);
|
||||
Console::instance().c_assert(
|
||||
iter != ag_ghosts.end(), "Invalid ag ghost plane access");
|
||||
return iter->second->get_array();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the ghost plane indicated by the parameter i.
|
||||
* @param i plane of interest.
|
||||
*/
|
||||
ArrayType &getPlane(size_t i) {
|
||||
auto iter = ghosts.find(i);
|
||||
if (iter == ghosts.end()) {
|
||||
Console::instance().print<LOG_ERROR>(
|
||||
boost::format("no such ghost plane %d") % i);
|
||||
error_helper<ErrorBadState>("Invalid ghost plane access");
|
||||
}
|
||||
return iter->second->get_array();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the ghost plane indicated by the parameter i.
|
||||
* @param i plane of interest.
|
||||
*/
|
||||
ArrayType const &getPlane(size_t i) const {
|
||||
auto iter = ghosts.find(i);
|
||||
if (iter == ghosts.end()) {
|
||||
Console::instance().print<LOG_ERROR>(
|
||||
boost::format("no such ghost plane %d") % i);
|
||||
error_helper<ErrorBadState>("Invalid ghost plane access");
|
||||
}
|
||||
return iter->second->get_array();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace LibLSS
|
||||
|
||||
#endif
|
||||
// ARES TAG: num_authors = 1
|
||||
// ARES TAG: name(0) = Guilhem Lavaux
|
||||
// ARES TAG: year(0) = 2018-2020
|
||||
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
224
extra/hades/libLSS/tools/symplectic_integrator.hpp
Normal file
224
extra/hades/libLSS/tools/symplectic_integrator.hpp
Normal file
|
@ -0,0 +1,224 @@
|
|||
/*+
|
||||
ARES/HADES/BORG Package -- ./extra/hades/libLSS/tools/symplectic_integrator.hpp
|
||||
Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||
Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||
|
||||
Additional contributions from:
|
||||
Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||
|
||||
+*/
|
||||
#ifndef __LIBLSS_SYMPLECTIC_INTEGRATOR_HPP
|
||||
#define __LIBLSS_SYMPLECTIC_INTEGRATOR_HPP
|
||||
|
||||
#include <boost/multi_array.hpp>
|
||||
#include "libLSS/tools/console.hpp"
|
||||
#include "libLSS/tools/array_tools.hpp"
|
||||
#include "libLSS/tools/fused_array.hpp"
|
||||
#include "libLSS/tools/fusewrapper.hpp"
|
||||
|
||||
namespace LibLSS {
|
||||
|
||||
namespace SymplecticOption {
|
||||
enum IntegratorScheme {
|
||||
SI_2A,
|
||||
SI_2B,
|
||||
SI_2C,
|
||||
SI_3A,
|
||||
SI_4B,
|
||||
SI_4C,
|
||||
SI_4D,
|
||||
SI_6A,
|
||||
CG_89
|
||||
};
|
||||
|
||||
typedef boost::multi_array<double, 2> IntegratorCoefficients;
|
||||
|
||||
template <int N>
|
||||
static inline void
|
||||
pushScheme(double coefs[2][N], IntegratorCoefficients &I_coefs) {
|
||||
int Ncoefs = N;
|
||||
|
||||
I_coefs.resize(boost::extents[2][Ncoefs]);
|
||||
for (int i = 0; i < Ncoefs; i++) {
|
||||
I_coefs[0][i] = coefs[0][i];
|
||||
I_coefs[1][i] = coefs[1][i];
|
||||
}
|
||||
}
|
||||
|
||||
}; // namespace SymplecticOption
|
||||
|
||||
struct SymplecticIntegrators {
|
||||
typedef SymplecticOption::IntegratorCoefficients IntegratorCoefficients;
|
||||
typedef SymplecticOption::IntegratorScheme IntegratorScheme;
|
||||
IntegratorCoefficients I_coefs;
|
||||
|
||||
SymplecticIntegrators() { setIntegratorScheme(SymplecticOption::SI_2A); }
|
||||
|
||||
void setIntegratorScheme(IntegratorScheme scheme) {
|
||||
using namespace SymplecticOption;
|
||||
|
||||
switch (scheme) {
|
||||
case SI_2A: {
|
||||
//si2a : standard leapfrog
|
||||
double coefs[2][2] = {{0.5, 0.5}, {0.0, 1.0}};
|
||||
pushScheme<2>(coefs, I_coefs);
|
||||
break;
|
||||
}
|
||||
case SI_2B: {
|
||||
//si2b : pseudo leapfrog
|
||||
double coefs[2][2] = {{1.0, 0.0}, {0.5, 0.5}};
|
||||
pushScheme<2>(coefs, I_coefs);
|
||||
break;
|
||||
}
|
||||
case SI_2C: {
|
||||
//si2c : optimal 2-stage
|
||||
double coefs[2][2] = {{1.0 / sqrt(2.), 1.0 - 1.0 / sqrt(2.0)},
|
||||
{1.0 - 1.0 / sqrt(2.0), 1.0 / sqrt(2.0)}};
|
||||
pushScheme<2>(coefs, I_coefs);
|
||||
break;
|
||||
}
|
||||
case SI_3A: {
|
||||
//si3a : Ruth's third order method
|
||||
double coefs[2][3] = {{2.0 / 3.0, -2.0 / 3.0, 1.0},
|
||||
{7.0 / 24.0, 0.75, -1.0 / 24.0}};
|
||||
pushScheme<3>(coefs, I_coefs);
|
||||
break;
|
||||
}
|
||||
case SI_4B: {
|
||||
//si4b : Calvo and Sanz-Serna's fourth order method
|
||||
double coeffs[2][4] = {{0.515352837431122936, -0.085782019412973646,
|
||||
0.441583023616466524, 0.128846158365384185},
|
||||
{0.134496199277431089, -0.224819803079420806,
|
||||
0.756320000515668291, 0.334003603286321425}};
|
||||
pushScheme<4>(coeffs, I_coefs);
|
||||
break;
|
||||
}
|
||||
case SI_4C: {
|
||||
//si4c : McLachlan and Atela's optimal third order method
|
||||
double coeffs[2][5] = {{0.205177661542290, 0.403021281604210,
|
||||
-0.12092087633891, 0.512721933192410, 0.0},
|
||||
{0.061758858135626, 0.33897802655364,
|
||||
0.61479130717558, -0.14054801465937,
|
||||
0.12501982279453}};
|
||||
|
||||
pushScheme<5>(coeffs, I_coefs);
|
||||
break;
|
||||
}
|
||||
case SI_4D: {
|
||||
//si4d : McLachlan and Atela's optimal third order method
|
||||
double caux = pow(2., 1. / 3.);
|
||||
double coeffs[2][4] = {
|
||||
{0.5 / (2. - caux), 0.5 * (1.0 - caux) / (2. - caux),
|
||||
0.5 * (1.0 - caux) / (2. - caux), 0.5 / (2. - caux)},
|
||||
{0.0, 1.0 / (2. - caux), -caux / (2. - caux), 1.0 / (2. - caux)}};
|
||||
|
||||
pushScheme<4>(coeffs, I_coefs);
|
||||
break;
|
||||
}
|
||||
case SI_6A: {
|
||||
//si6a : Yoshida's sixth-order method
|
||||
double caux = pow(2., 1. / 3.);
|
||||
double coeffs[2][8] = {
|
||||
{0.78451361047756, 0.23557321335936, -1.1776799841789,
|
||||
1.3151863206839, 0., 0., 0., 0.},
|
||||
{0.39225680523878, 0.51004341191846, -0.47105338540976,
|
||||
0.068753168252520, 0., 0., 0., 0.}};
|
||||
coeffs[0][4] = coeffs[0][2];
|
||||
coeffs[0][5] = coeffs[0][1];
|
||||
coeffs[0][6] = coeffs[0][0];
|
||||
|
||||
coeffs[1][4] = coeffs[1][3];
|
||||
coeffs[1][5] = coeffs[1][2];
|
||||
coeffs[1][6] = coeffs[1][1];
|
||||
coeffs[1][7] = coeffs[1][0];
|
||||
|
||||
pushScheme<8>(coeffs, I_coefs);
|
||||
break;
|
||||
}
|
||||
case CG_89: {
|
||||
constexpr int const i = 4;
|
||||
constexpr double const n = 2.;
|
||||
double s = std::pow(2*i, 1/(n+1.));
|
||||
double coeffs[2][4*i+2];
|
||||
for (int j = 0; j < i; j++) {
|
||||
coeffs[0][2*j] = 0.5;
|
||||
coeffs[0][2*j+1] = 0.5;
|
||||
coeffs[1][2*j] = 0.;
|
||||
coeffs[1][2*j+1] = 1.;
|
||||
}
|
||||
coeffs[0][2*i] = -0.5*s;
|
||||
coeffs[0][2*i+1] = -0.5*s;
|
||||
coeffs[1][2*i] = 0;
|
||||
coeffs[1][2*i+1] = -s;
|
||||
int const base = 2*i+2;
|
||||
for (int j = 0; j < i; j++) {
|
||||
coeffs[0][base+2*j] = 0.5;
|
||||
coeffs[0][base+2*j+1] = 0.5;
|
||||
coeffs[1][base+2*j] = 0.;
|
||||
coeffs[1][base+2*j+1] = 1.;
|
||||
}
|
||||
pushScheme<4*i+2>(coeffs, I_coefs);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
error_helper<ErrorBadState>("Unknown integration scheme");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
template <
|
||||
typename MassMatrix, typename GradientVector, typename MomentumVector,
|
||||
typename PositionVector, typename GradientFunction>
|
||||
void integrate_dense(
|
||||
const GradientFunction &gradient, MassMatrix &&masses, double epsilon,
|
||||
int Ntime, PositionVector &position, MomentumVector &momentum,
|
||||
GradientVector &tmp_gradient) {
|
||||
|
||||
using boost::lambda::_1;
|
||||
using boost::lambda::_2;
|
||||
using boost::lambda::_3;
|
||||
Console &cons = Console::instance();
|
||||
Progress<LOG_INFO_SINGLE> &progress =
|
||||
cons.start_progress<LOG_INFO_SINGLE>(
|
||||
"doing Symplectic integration", Ntime, 10);
|
||||
|
||||
int Ncoefs = I_coefs.shape()[1];
|
||||
for (int i_Time = 0; i_Time < Ntime; i_Time++) {
|
||||
|
||||
///the scheme depends on the chosen integrator order
|
||||
for (int n = 0; n < Ncoefs; n++) {
|
||||
double an = I_coefs[0][n] * epsilon;
|
||||
double bn = I_coefs[1][n] * epsilon;
|
||||
|
||||
if (bn != 0) {
|
||||
gradient(position, tmp_gradient);
|
||||
// This is momentum update
|
||||
fwrap(momentum) = fwrap(momentum) - fwrap(tmp_gradient) * bn;
|
||||
}
|
||||
// This is position update
|
||||
fwrap(position) = fwrap(position) + masses(momentum, tmp_gradient) * an;
|
||||
}
|
||||
progress.update(i_Time);
|
||||
}
|
||||
|
||||
progress.destroy();
|
||||
}
|
||||
|
||||
template <
|
||||
typename MassMatrix, typename GradientVector, typename MomentumVector,
|
||||
typename PositionVector, typename GradientFunction>
|
||||
void integrate(
|
||||
const GradientFunction &gradient, MassMatrix &&masses, double epsilon,
|
||||
int Ntime, PositionVector &position, MomentumVector &momentum,
|
||||
GradientVector &tmp_gradient) {
|
||||
auto mass_op = [&masses](MomentumVector const &m, auto&) {
|
||||
return fwrap(m) * fwrap(masses);
|
||||
};
|
||||
integrate_dense(
|
||||
gradient, mass_op, epsilon, Ntime, position, momentum, tmp_gradient);
|
||||
}
|
||||
};
|
||||
|
||||
}; // namespace LibLSS
|
||||
|
||||
#endif
|
Loading…
Add table
Add a link
Reference in a new issue