Initial import
This commit is contained in:
commit
56a50eead3
0
.aquila-modules
Normal file
0
.aquila-modules
Normal file
31
.atom-build.yml
Normal file
31
.atom-build.yml
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
cmd: "make"
|
||||||
|
args:
|
||||||
|
- "-C"
|
||||||
|
- "{PROJECT_PATH}/build"
|
||||||
|
sh: false
|
||||||
|
name: "Build All"
|
||||||
|
errorMatch:
|
||||||
|
- "(?<file>([A-Za-z]:[\\/])?[^:\\n]+):(?<line>\\d+):(?<col>\\d+):\\s*(fatal error|error):\\s*(?<message>.+)"
|
||||||
|
# - (?<file>[^:\\n]+):(?<line>\\d+):(?<col>\\d+):[\\s\\S]+?Error: (?<message>.+)
|
||||||
|
warningMatch:
|
||||||
|
- (?<file>([A-Za-z]:[\\/])?[^:\\n]+):(?<line>\\d+):(?<col>\\d+):\\s*(warning):\\s*(?<message>.+)
|
||||||
|
|
||||||
|
targets:
|
||||||
|
generate_build:
|
||||||
|
cmd: "bash build.sh"
|
||||||
|
args:
|
||||||
|
- --purge
|
||||||
|
- --c_compiler=/usr/bin/gcc
|
||||||
|
- --cxx_compiler=/usr/bin/g++
|
||||||
|
name: "Setup ARES build"
|
||||||
|
|
||||||
|
generate_build_mpi:
|
||||||
|
cmd: "bash build.sh"
|
||||||
|
name: "Setup ARES build with MPI"
|
||||||
|
args:
|
||||||
|
- --purge
|
||||||
|
- --with-mpi
|
||||||
|
- --c_compiler
|
||||||
|
- /usr/bin/gcc
|
||||||
|
- --cxx_compiler
|
||||||
|
- /usr/bin/g++
|
20
.clang-format
Normal file
20
.clang-format
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
BasedOnStyle: llvm
|
||||||
|
IndentWidth: 2
|
||||||
|
AlignAfterOpenBracket: AlwaysBreak
|
||||||
|
AlignEscapedNewlines: Right
|
||||||
|
Standard: Cpp11
|
||||||
|
SortIncludes: false
|
||||||
|
PointerAlignment: Right
|
||||||
|
MaxEmptyLinesToKeep: 1
|
||||||
|
ReflowComments: false
|
||||||
|
IndentPPDirectives: AfterHash
|
||||||
|
CompactNamespaces: false
|
||||||
|
IndentCaseLabels: false
|
||||||
|
AlwaysBreakTemplateDeclarations: true
|
||||||
|
NamespaceIndentation: All
|
||||||
|
|
||||||
|
|
||||||
|
BraceWrapping:
|
||||||
|
AfterControlStatement: false
|
||||||
|
AfterFunction: false
|
||||||
|
AfterNamespace: false
|
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
* text=auto eol=lf
|
58
.gitignore
vendored
Normal file
58
.gitignore
vendored
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
# Ignore all
|
||||||
|
*
|
||||||
|
|
||||||
|
# Ignore .gitignore and .gitmodules
|
||||||
|
.gitignore
|
||||||
|
.gitmodules
|
||||||
|
|
||||||
|
!Jenkinsfile
|
||||||
|
|
||||||
|
# Unignore all with extensions
|
||||||
|
!*.*
|
||||||
|
|
||||||
|
# Unignore Dockerfile
|
||||||
|
!Dockerfile
|
||||||
|
|
||||||
|
# Unignore all dirs
|
||||||
|
!*/
|
||||||
|
|
||||||
|
### Above combination will ignore all files without extension ###
|
||||||
|
|
||||||
|
# Ignore executable files
|
||||||
|
*.[oa]
|
||||||
|
*.out
|
||||||
|
|
||||||
|
# Ignore Python bytecode file
|
||||||
|
*.pyc
|
||||||
|
|
||||||
|
# Ignore all files in these directories
|
||||||
|
.texpadtmp/
|
||||||
|
downloads/
|
||||||
|
build/
|
||||||
|
|
||||||
|
# Ignore swap files
|
||||||
|
*~
|
||||||
|
.DS_Store
|
||||||
|
|
||||||
|
|
||||||
|
extra/*/
|
||||||
|
!extra/demo/
|
||||||
|
|
||||||
|
examples/**
|
||||||
|
!examples/*.ini
|
||||||
|
!examples/2MPP.txt
|
||||||
|
!examples/completeness_*.fits
|
||||||
|
!examples/one.fits
|
||||||
|
|
||||||
|
docs/tex/
|
||||||
|
docs/sphinx/_build/
|
||||||
|
docs/sphinx/_static/
|
||||||
|
build*/
|
||||||
|
docs/doxyoutput/
|
||||||
|
docs/_build/
|
||||||
|
docs/api/
|
||||||
|
docs/_static/doxy_html/
|
||||||
|
docs/source/_generate/
|
||||||
|
!docs/source/user/building/
|
||||||
|
|
||||||
|
!build_tools/
|
4
.gitmodules
vendored
Normal file
4
.gitmodules
vendored
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
[submodule "external/cosmotool"]
|
||||||
|
path = external/cosmotool
|
||||||
|
url = https://bitbucket.org/glavaux/cosmotool/
|
||||||
|
ignore = dirty
|
20
.readthedocs.yml
Normal file
20
.readthedocs.yml
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
|
||||||
|
# Required
|
||||||
|
version: 2
|
||||||
|
|
||||||
|
# Build documentation in the docs/ directory with Sphinx
|
||||||
|
sphinx:
|
||||||
|
configuration: docs/source/conf.py
|
||||||
|
|
||||||
|
# Build documentation with MkDocs
|
||||||
|
#mkdocs:
|
||||||
|
# configuration: mkdocs.yml
|
||||||
|
|
||||||
|
# Optionally build your docs in additional formats such as PDF and ePub
|
||||||
|
formats: all
|
||||||
|
|
||||||
|
# Optionally set the version of Python and requirements required to build your docs
|
||||||
|
python:
|
||||||
|
version: 3.7
|
||||||
|
install:
|
||||||
|
- requirements: docs/requirements.txt
|
27
.travis.yml
Normal file
27
.travis.yml
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
git:
|
||||||
|
depth: 3
|
||||||
|
submodules: true
|
||||||
|
|
||||||
|
language: cpp
|
||||||
|
|
||||||
|
|
||||||
|
install:
|
||||||
|
- DEPS_DIR="${TRAVIS_BUILD_DIR}/deps"
|
||||||
|
- mkdir ${DEPS_DIR} && cd ${DEPS_DIR}
|
||||||
|
- travis_retry wget --no-check-certificates https://github.com/Kitware/CMake/releases/download/v3.17.1/cmake-3.17.1-Linux-x86_64.tar.gz
|
||||||
|
- echo "" > cmake_md5.txt
|
||||||
|
- md5sum -c cmake_md5.txt
|
||||||
|
- tar -xvf cmake-3.17.1-Linux-x86_64.tar.gz > /dev/null
|
||||||
|
- mv cmake-3.17.1-Linux-x86_64 cmake-install
|
||||||
|
- PATH=${DEPS_DIR}/cmake-install/bin:${PATH}
|
||||||
|
|
||||||
|
|
||||||
|
before_script:
|
||||||
|
- sh get-aquila-modules.sh --clone
|
||||||
|
- sh get-aquila-modules.sh --branch-set
|
||||||
|
- sh build.sh --download-deps
|
||||||
|
- sh build.sh --use-predownload --python
|
||||||
|
|
||||||
|
script:
|
||||||
|
- cd build
|
||||||
|
- make
|
23
.vscode/c_cpp_properties.json
vendored
Normal file
23
.vscode/c_cpp_properties.json
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
{
|
||||||
|
"configurations": [
|
||||||
|
{
|
||||||
|
"name": "Linux",
|
||||||
|
"defines": [
|
||||||
|
"${default}"
|
||||||
|
],
|
||||||
|
"compilerPath": "/usr/bin/gcc",
|
||||||
|
"includePath": [
|
||||||
|
"${workspaceFolder}",
|
||||||
|
"${workspaceFolder}/extra/**",
|
||||||
|
"${env:ares_build}/",
|
||||||
|
"${env:ares_build}/external_build/eigen-prefix/src/eigen",
|
||||||
|
"${env:ares_build}/ext_install/include",
|
||||||
|
"/usr/include/eigen3"
|
||||||
|
],
|
||||||
|
"cStandard": "c11",
|
||||||
|
"cppStandard": "c++14",
|
||||||
|
"intelliSenseMode": "gcc-x64"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"version": 4
|
||||||
|
}
|
104
.vscode/settings.json
vendored
Normal file
104
.vscode/settings.json
vendored
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
{
|
||||||
|
"editor.formatOnSave": true,
|
||||||
|
"C_Cpp.default.compilerPath": "g++",
|
||||||
|
"C_Cpp.default.includePath": [
|
||||||
|
"${workspaceFolder}",
|
||||||
|
"${workspaceFolder}/extra/**",
|
||||||
|
"${env:ARES_BUILD}",
|
||||||
|
"${env:ARES_BUILD}/external_build/eigen-prefix/src/eigen",
|
||||||
|
"${env:ARES_BUILD/ext_install/include"
|
||||||
|
],
|
||||||
|
"cmake.configureOnOpen": false,
|
||||||
|
"files.associations": {
|
||||||
|
"cctype": "cpp",
|
||||||
|
"clocale": "cpp",
|
||||||
|
"cmath": "cpp",
|
||||||
|
"cstdarg": "cpp",
|
||||||
|
"cstddef": "cpp",
|
||||||
|
"cstdio": "cpp",
|
||||||
|
"cstdlib": "cpp",
|
||||||
|
"cstring": "cpp",
|
||||||
|
"ctime": "cpp",
|
||||||
|
"cwchar": "cpp",
|
||||||
|
"cwctype": "cpp",
|
||||||
|
"*.ipp": "cpp",
|
||||||
|
"array": "cpp",
|
||||||
|
"atomic": "cpp",
|
||||||
|
"strstream": "cpp",
|
||||||
|
"bit": "cpp",
|
||||||
|
"*.tcc": "cpp",
|
||||||
|
"bitset": "cpp",
|
||||||
|
"chrono": "cpp",
|
||||||
|
"complex": "cpp",
|
||||||
|
"condition_variable": "cpp",
|
||||||
|
"cstdint": "cpp",
|
||||||
|
"deque": "cpp",
|
||||||
|
"forward_list": "cpp",
|
||||||
|
"list": "cpp",
|
||||||
|
"map": "cpp",
|
||||||
|
"set": "cpp",
|
||||||
|
"unordered_map": "cpp",
|
||||||
|
"unordered_set": "cpp",
|
||||||
|
"vector": "cpp",
|
||||||
|
"exception": "cpp",
|
||||||
|
"algorithm": "cpp",
|
||||||
|
"functional": "cpp",
|
||||||
|
"iterator": "cpp",
|
||||||
|
"memory": "cpp",
|
||||||
|
"memory_resource": "cpp",
|
||||||
|
"numeric": "cpp",
|
||||||
|
"optional": "cpp",
|
||||||
|
"random": "cpp",
|
||||||
|
"ratio": "cpp",
|
||||||
|
"string": "cpp",
|
||||||
|
"string_view": "cpp",
|
||||||
|
"system_error": "cpp",
|
||||||
|
"tuple": "cpp",
|
||||||
|
"type_traits": "cpp",
|
||||||
|
"utility": "cpp",
|
||||||
|
"fstream": "cpp",
|
||||||
|
"initializer_list": "cpp",
|
||||||
|
"iomanip": "cpp",
|
||||||
|
"iosfwd": "cpp",
|
||||||
|
"iostream": "cpp",
|
||||||
|
"istream": "cpp",
|
||||||
|
"limits": "cpp",
|
||||||
|
"mutex": "cpp",
|
||||||
|
"new": "cpp",
|
||||||
|
"ostream": "cpp",
|
||||||
|
"sstream": "cpp",
|
||||||
|
"stdexcept": "cpp",
|
||||||
|
"streambuf": "cpp",
|
||||||
|
"thread": "cpp",
|
||||||
|
"cfenv": "cpp",
|
||||||
|
"cinttypes": "cpp",
|
||||||
|
"typeindex": "cpp",
|
||||||
|
"typeinfo": "cpp",
|
||||||
|
"valarray": "cpp",
|
||||||
|
"variant": "cpp",
|
||||||
|
"csetjmp": "cpp",
|
||||||
|
"csignal": "cpp",
|
||||||
|
"codecvt": "cpp",
|
||||||
|
"regex": "cpp",
|
||||||
|
"hash_map": "cpp",
|
||||||
|
"rope": "cpp",
|
||||||
|
"slist": "cpp",
|
||||||
|
"future": "cpp",
|
||||||
|
"shared_mutex": "cpp",
|
||||||
|
"hash_set": "cpp",
|
||||||
|
"scoped_allocator": "cpp",
|
||||||
|
"any": "cpp",
|
||||||
|
"compare": "cpp",
|
||||||
|
"concepts": "cpp",
|
||||||
|
"coroutine": "cpp",
|
||||||
|
"source_location": "cpp",
|
||||||
|
"ranges": "cpp",
|
||||||
|
"span": "cpp",
|
||||||
|
"stop_token": "cpp"
|
||||||
|
},
|
||||||
|
"python.formatting.provider": "yapf",
|
||||||
|
"C_Cpp.dimInactiveRegions": false,
|
||||||
|
"C_Cpp.errorSquiggles": "Enabled",
|
||||||
|
"python.linting.enabled": false,
|
||||||
|
"restructuredtext.confPath": ""
|
||||||
|
}
|
27
.vscode/tasks.json
vendored
Normal file
27
.vscode/tasks.json
vendored
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
{
|
||||||
|
"version": "2.0.0",
|
||||||
|
"tasks": [
|
||||||
|
{
|
||||||
|
"label": "Build ARES",
|
||||||
|
"type": "process",
|
||||||
|
"command": "nice",
|
||||||
|
"args": [
|
||||||
|
"make",
|
||||||
|
"all"
|
||||||
|
],
|
||||||
|
"options": {
|
||||||
|
"cwd": "${env:ARES_BUILD}"
|
||||||
|
},
|
||||||
|
"problemMatcher": {
|
||||||
|
"base": "$gcc",
|
||||||
|
"fileLocation": [
|
||||||
|
"absolute"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"group": {
|
||||||
|
"kind": "build",
|
||||||
|
"isDefault": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
76
CHANGES.rst
Normal file
76
CHANGES.rst
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
Release notes
|
||||||
|
=============
|
||||||
|
|
||||||
|
This file only lists the most important changes to each version. We try to follow semantic versioning:
|
||||||
|
- Major release means API incompatibilities
|
||||||
|
- Minor release means API compatibilities, but significant feature differences
|
||||||
|
- Bugfix release only fixes bugs
|
||||||
|
|
||||||
|
Release 2.1
|
||||||
|
-----------
|
||||||
|
|
||||||
|
- An option to control the verbosity in log file has been added ("system/logfile_verbose_level", v2.1.3)
|
||||||
|
|
||||||
|
Forward related
|
||||||
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
- Add a way of transforming all bias models into a forward deterministic transition. It means more flexibility at the cost of losing performance/memory by doing
|
||||||
|
more computations than required. For example, each subcatalog needs its own bias which could trigger quite a lot of recomputation and/or caching.
|
||||||
|
- PMv2 optimization when sampling.
|
||||||
|
- Implement a simple (non-MPI) haar transform.
|
||||||
|
- Add EnforceMass model element to articifially fix the mass conservation.
|
||||||
|
- Forward models may support a new behavior for adjointModel_v2. They can accumulate all adjoint vectors that are provided to them through
|
||||||
|
adjointModel_v2. The new behavior must be requested by calling BORGForwardModel::accumulateAdjoint. In that case, the user is explicitly
|
||||||
|
requested to clear the adjoint gradient when the computation is done by calling BORGForwardModel::clearAdjointGradient.
|
||||||
|
That behavior has been ported to pyborg. If the mode is not supported, an exception will be triggered.
|
||||||
|
- Merged Altair code.
|
||||||
|
- Bind ClassCosmo to ARES. Python binding is also active and vectorized for get_Tk.
|
||||||
|
|
||||||
|
Sampler related
|
||||||
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
- Add CG89 "higher order" symplectic integrator.
|
||||||
|
|
||||||
|
API related:
|
||||||
|
^^^^^^^^^^^^
|
||||||
|
|
||||||
|
- ManyPower bias model needs a likelihood info entry now to set the width of the prior on parameters. The name is ManyPower_prior_width in [info].
|
||||||
|
- Code cleanup in velocity field estimator. It also now supports Simplex-In-Cell (no adjoint gradient yet and only non-MPI).
|
||||||
|
- Models accept a broader range of parameters using BORGForwardModel::setModelParams.
|
||||||
|
|
||||||
|
Python related:
|
||||||
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
- *NEW tool* hades_python which supports a full deterministic transition written in python/tensorflow/jax. Data loading is still work in progress and
|
||||||
|
may need hacking at the moment
|
||||||
|
- Python extension is supporting LikelihoodInfo and the bias as forward model element.
|
||||||
|
- Add a 'setup.py' to support compiling the BORG python module directly with pip and packaging as a wheel file.
|
||||||
|
- Samplers fully supported from Python.
|
||||||
|
|
||||||
|
Build related
|
||||||
|
^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
- build.sh only downloads the dependency if the file is not already there
|
||||||
|
- Error reporting include a full C++ stacktrace on supported platforms (cmake flag is STACKTRACE_USE_BACKTRACE=ON, experimental at the moment
|
||||||
|
It can be turned off).
|
||||||
|
- Added GIT hooks to check on basic text elements (like formatting) before running commits.
|
||||||
|
clang-formatter absence may be overridden using ARES_CLANG_OVERRIDE=1
|
||||||
|
|
||||||
|
Release 2.0alpha
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- Use a prior that is purely gaussian unit variance (Fourier) in HMC now. The cosmology is completely moved as a BORGForwardModel.
|
||||||
|
- BORGForwardModel adds the v2 API to executes model: forwardModel_v2, and adjointModel_v2. This relies heavily on the mechanics of ModelIO
|
||||||
|
- Deterministic models are now self-registering and the available lists can be dynamically queried.
|
||||||
|
- Add a hook to optionally dump extra bias fields.
|
||||||
|
- Add QLPT and QLPT_RSD forward model in extra/borg
|
||||||
|
- Lots of documentation reorganization
|
||||||
|
- Added the lyman alpha model in extra/borg
|
||||||
|
- Merged the EFT likelihood effort in extra/borg
|
||||||
|
|
||||||
|
|
||||||
|
Release 1.0
|
||||||
|
-----------
|
||||||
|
|
||||||
|
|
||||||
|
Initial release
|
194
CMakeLists.txt
Normal file
194
CMakeLists.txt
Normal file
@ -0,0 +1,194 @@
|
|||||||
|
cmake_minimum_required(VERSION 3.10)
|
||||||
|
cmake_policy(SET CMP0074 NEW)
|
||||||
|
set(CMAKE_CXX_STANDARD 14)
|
||||||
|
set(CMAKE_CXX_VISIBILITY_PRESET hidden)
|
||||||
|
|
||||||
|
list(INSERT CMAKE_MODULE_PATH 0 "${CMAKE_CURRENT_SOURCE_DIR}/cmake_modules")
|
||||||
|
|
||||||
|
IF(DEFINED ARES_PREFIX_PATH)
|
||||||
|
SET(ENV{CMAKE_PREFIX_PATH} "${ARES_PREFIX_PATH}")
|
||||||
|
SET(CMAKE_PREFIX_PATH "${ARES_PREFIX_PATH}")
|
||||||
|
ENDIF()
|
||||||
|
|
||||||
|
SET(CMAKE_SKIP_BUILD_RPATH FALSE)
|
||||||
|
SET(CMAKE_BUILD_WITH_INSTALL_RPATH TRUE)
|
||||||
|
SET(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
|
||||||
|
|
||||||
|
option(BUILD_JULIA "Activate the Julia support" OFF)
|
||||||
|
|
||||||
|
PROJECT(ARES CXX C)
|
||||||
|
|
||||||
|
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS Debug Release RelWithDebInfo)
|
||||||
|
|
||||||
|
# Import required cmake modules
|
||||||
|
include(color_msg)
|
||||||
|
include(GetGitRevisionDescription)
|
||||||
|
include(ExternalProject)
|
||||||
|
include(CTest)
|
||||||
|
# Not used anymore
|
||||||
|
#include(GenOptMacro)
|
||||||
|
include(CheckCXXCompilerFlag)
|
||||||
|
include(CheckCCompilerFlag)
|
||||||
|
#include(FortranCInterface)
|
||||||
|
include(FindOpenMP)
|
||||||
|
include(FindPkgConfig)
|
||||||
|
include(clang-format)
|
||||||
|
include(FetchContent)
|
||||||
|
|
||||||
|
|
||||||
|
IF (CMAKE_C_COMPILER_ID MATCHES "AppleClang" OR CMAKE_CXX_COMPILER_ID MATCHES "AppleClang")
|
||||||
|
IF (NOT ${CMAKE_C_COMPILER_ID} STREQUAL ${CMAKE_CXX_COMPILER_ID})
|
||||||
|
message(WARNING "C and C++ compiler have different IDs: ${CMAKE_C_COMPILER_ID} != ${CMAKE_CXX_COMPILER_ID}")
|
||||||
|
ENDIF()
|
||||||
|
message(WARNING "AppleClang does not support OpenMP. Please use something else for more performance.")
|
||||||
|
SET(DEFAULT_ENABLE_OPENMP OFF)
|
||||||
|
ELSE()
|
||||||
|
SET(DEFAULT_ENABLE_OPENMP ON)
|
||||||
|
ENDIF()
|
||||||
|
|
||||||
|
# Options
|
||||||
|
OPTION(ENABLE_OPENMP "Activate OpenMP support" ${DEFAULT_ENABLE_OPENMP})
|
||||||
|
OPTION(DISABLE_DEBUG_OUTPUT "No debug output support (faster)" OFF)
|
||||||
|
OPTION(ENABLE_MPI "MPI support" OFF)
|
||||||
|
OPTION(CONTEXT_TIMER "Activate profiling of LibLSS contexts" OFF)
|
||||||
|
OPTION(USE_NATIVE_ARCH "Activate instruction set supported by the running system" OFF)
|
||||||
|
OPTION(ENABLE_FULL_WARNINGS "Ask the compiler to produce lots of warnings" OFF)
|
||||||
|
OPTION(BUILD_PYTHON_EXTENSION "Build the Python BORG extension" OFF)
|
||||||
|
|
||||||
|
IF(ENABLE_MPI)
|
||||||
|
find_package(MPI)
|
||||||
|
set(EXTRA_LIB ${MPI_C_LIBRARIES})
|
||||||
|
ELSE(ENABLE_MPI)
|
||||||
|
SET(EXTRA_LIB)
|
||||||
|
SET(MPI_C_INCLUDE_PATH)
|
||||||
|
ENDIF(ENABLE_MPI)
|
||||||
|
|
||||||
|
IF(USE_NATIVE_ARCH)
|
||||||
|
CHECK_CXX_COMPILER_FLAG("-march=native" COMPILER_SUPPORTS_MARCH_NATIVE)
|
||||||
|
if(COMPILER_SUPPORTS_MARCH_NATIVE)
|
||||||
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native")
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
|
||||||
|
|
||||||
|
include(${CMAKE_SOURCE_DIR}/external/external_build.cmake)
|
||||||
|
|
||||||
|
find_program(PYTHON_EXECUTABLE NAMES python3)
|
||||||
|
IF (NOT PYTHON_EXECUTABLE)
|
||||||
|
cmessage(FATAL_ERROR "Python3 interpreter to compile ARES")
|
||||||
|
ENDIF()
|
||||||
|
|
||||||
|
IF (BUILD_JULIA)
|
||||||
|
find_package(Julia)
|
||||||
|
if (NOT JULIA_EXECUTABLE)
|
||||||
|
cmessage(CWARNING "Julia not found, will not be built")
|
||||||
|
set(BUILD_JULIA OFF)
|
||||||
|
endif()
|
||||||
|
ENDIF()
|
||||||
|
|
||||||
|
IF(BUILD_PYTHON_EXTENSION)
|
||||||
|
FetchContent_MakeAvailable(pybind11)
|
||||||
|
ENDIF()
|
||||||
|
|
||||||
|
FetchContent_MakeAvailable(r3d)
|
||||||
|
|
||||||
|
# Retrieve current git revision
|
||||||
|
git_describe(GIT_VER)
|
||||||
|
|
||||||
|
IF (DEFINED CMAKE_C_COMPILER_VERSION)
|
||||||
|
cmessage(STATUS "C compiler version: ${CMAKE_C_COMPILER_VERSION}")
|
||||||
|
cmessage(STATUS "C++ compiler Version: ${CMAKE_CXX_COMPILER_VERSION}")
|
||||||
|
if (NOT ${CMAKE_C_COMPILER_VERSION} EQUAL ${CMAKE_CXX_COMPILER_VERSION})
|
||||||
|
cmessage(FATAL_ERROR "C and C++ compilers are different. Please fix parameters.")
|
||||||
|
ENDIF()
|
||||||
|
ELSE()
|
||||||
|
cmessage(WARNING "Cannot check compiler versions. Proceed with cautions.")
|
||||||
|
ENDIF()
|
||||||
|
|
||||||
|
SET(CMAKE_CXX_FLAGS_PROFILE "-O3 -pg" CACHE STRING "Flags to turn on profiling for C++ compiler")
|
||||||
|
SET(CMAKE_C_FLAGS_PROFILE "-O3 -pg" CACHE STRING "Flags to turn on profiling for C compiler" )
|
||||||
|
SET(CMAKE_EXE_LINKER_FLAGS_PROFILE "-pg" CACHE STRING "Flags to turn on profiling in linker")
|
||||||
|
mark_as_advanced(CMAKE_CXX_FLAGS_PROFILE CMAKE_C_FLAGS_PROFILE CMAKE_EXE_LINKER_FLAGS_PROFILE)
|
||||||
|
|
||||||
|
find_library(ZLIB_LIBRARY z)
|
||||||
|
find_library(_pre_RT_LIBRARY rt)
|
||||||
|
if(_pre_RT_LIBRARY)
|
||||||
|
SET(RT_LIBRARY ${_pre_RT_LIBRARY})
|
||||||
|
ENDIF()
|
||||||
|
find_library(DL_LIBRARY dl)
|
||||||
|
|
||||||
|
|
||||||
|
include_directories(
|
||||||
|
${Boost_INCLUDE_DIRS}
|
||||||
|
${GSL_INCLUDE}
|
||||||
|
${CMAKE_SOURCE_DIR}
|
||||||
|
${EIGEN_INCLUDE_DIRS}
|
||||||
|
${EXT_INSTALL}/include
|
||||||
|
${COSMOTOOL_INCLUDE}
|
||||||
|
${HDF5_INCLUDE_DIR}
|
||||||
|
${FFTW_INCLUDE_DIR}
|
||||||
|
${MPI_C_INCLUDE_PATH}
|
||||||
|
)
|
||||||
|
|
||||||
|
#
|
||||||
|
# OpenMP handling
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
IF(ENABLE_OPENMP)
|
||||||
|
|
||||||
|
IF (NOT OPENMP_FOUND)
|
||||||
|
MESSAGE(ERROR "No known compiler option for enabling OpenMP")
|
||||||
|
ENDIF(NOT OPENMP_FOUND)
|
||||||
|
|
||||||
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
|
||||||
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
|
||||||
|
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OpenMP_C_FLAGS}")
|
||||||
|
|
||||||
|
ENDIF(ENABLE_OPENMP)
|
||||||
|
|
||||||
|
IF (DISABLE_DEBUG_OUTPUT)
|
||||||
|
add_definitions(-DLIBLSS_CONSOLE_NO_DEBUG_SUPPORT)
|
||||||
|
ENDIF (DISABLE_DEBUG_OUTPUT)
|
||||||
|
|
||||||
|
IF(ENABLE_MPI)
|
||||||
|
add_definitions(-DARES_MPI_FFTW)
|
||||||
|
ENDIF (ENABLE_MPI)
|
||||||
|
|
||||||
|
IF (CONTEXT_TIMER)
|
||||||
|
add_definitions(-DLIBLSS_TIMED_CONTEXT)
|
||||||
|
ENDIF (CONTEXT_TIMER)
|
||||||
|
|
||||||
|
add_definitions(-DBOOST_ENABLE_ASSERT_DEBUG_HANDLER)
|
||||||
|
|
||||||
|
SET(ARES_INCLUDE_PATH)
|
||||||
|
|
||||||
|
|
||||||
|
# Detect extra modules
|
||||||
|
#
|
||||||
|
#
|
||||||
|
file(GLOB ARES_MODULES `LIST_DIRECTORIES false RELATIVE ${CMAKE_SOURCE_DIR}/extra ${CMAKE_SOURCE_DIR}/extra/*)
|
||||||
|
|
||||||
|
# Remove spurious contaminating OSX directories
|
||||||
|
list(REMOVE_ITEM ARES_MODULES .DS_Store)
|
||||||
|
|
||||||
|
message(STATUS "ARES modules found:")
|
||||||
|
foreach(module IN LISTS ARES_MODULES)
|
||||||
|
if (EXISTS ${CMAKE_SOURCE_DIR}/extra/${module}/DO_NOT_BUILD)
|
||||||
|
list(REMOVE_ITEM ARES_MODULES ${module})
|
||||||
|
cmessage(CWARNING " ${module} (do not build)")
|
||||||
|
else()
|
||||||
|
message(STATUS " ${module}")
|
||||||
|
endif()
|
||||||
|
endforeach()
|
||||||
|
|
||||||
|
add_subdirectory(libLSS)
|
||||||
|
add_subdirectory(src)
|
||||||
|
|
||||||
|
foreach(module IN LISTS ARES_MODULES)
|
||||||
|
if (EXISTS ${CMAKE_SOURCE_DIR}/extra/${module}/${module}.cmake)
|
||||||
|
include(${CMAKE_SOURCE_DIR}/extra/${module}/${module}.cmake)
|
||||||
|
endif()
|
||||||
|
endforeach()
|
||||||
|
|
||||||
|
setup_formatter(ARES_MODULES)
|
164
Jenkinsfile
vendored
Normal file
164
Jenkinsfile
vendored
Normal file
@ -0,0 +1,164 @@
|
|||||||
|
pipeline {
|
||||||
|
agent any
|
||||||
|
|
||||||
|
options { buildDiscarder(logRotator(numToKeepStr: '5')) }
|
||||||
|
|
||||||
|
environment {
|
||||||
|
DOC_DEPLOYER = credentials('Doc deployment')
|
||||||
|
}
|
||||||
|
|
||||||
|
stages {
|
||||||
|
stage('Preparation') {
|
||||||
|
steps {
|
||||||
|
script {
|
||||||
|
branchName = 'release/2.1'
|
||||||
|
cred = '0c503fb7-7bad-459f-81f1-71467b382d39'
|
||||||
|
|
||||||
|
env.PYTHON_VENV = """${sh(
|
||||||
|
returnStdout:true,
|
||||||
|
script: 'echo "${WORKSPACE}/${BUILD_TAG}"'
|
||||||
|
)}"""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('Source') { // for display purposes
|
||||||
|
steps {
|
||||||
|
git branch: branchName, credentialsId: cred, url: 'git@bitbucket.org:aquila-consortium/borg_public.git'
|
||||||
|
|
||||||
|
sh 'python3 -m venv ${PYTHON_VENV}'
|
||||||
|
sh 'ls && echo ${PYTHON_VENV} && ls ${PYTHON_VENV}'
|
||||||
|
sh 'test -e ${PYTHON_VENV}'
|
||||||
|
|
||||||
|
sh 'git submodule init'
|
||||||
|
sh 'git submodule update --recursive'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('Download deps') {
|
||||||
|
steps {
|
||||||
|
ansiColor('xterm') {
|
||||||
|
sh '''
|
||||||
|
bash build.sh --download-deps
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('Configure') {
|
||||||
|
steps {
|
||||||
|
ansiColor('xterm') {
|
||||||
|
sh '''
|
||||||
|
. ${WORKSPACE}/${BUILD_TAG}/bin/activate
|
||||||
|
CMAKE_PREFIX_PATH=${VIRTUAL_ENV}:/opt/boost
|
||||||
|
export CMAKE_PREFIX_PATH
|
||||||
|
bash build.sh --python --use-system-boost=/opt/boost --use-system-fftw --use-system-hdf5 --native --purge --extra-flags -DINSTALL_PYTHON_LOCAL=OFF
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('Build') {
|
||||||
|
steps {
|
||||||
|
ansiColor('xterm') {
|
||||||
|
dir('build') {
|
||||||
|
sh 'make -j6'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('Tests') {
|
||||||
|
steps {
|
||||||
|
dir('build') {
|
||||||
|
sh 'make test'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('Install') {
|
||||||
|
steps {
|
||||||
|
dir('build') {
|
||||||
|
sh 'make install'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage("Doc") {
|
||||||
|
steps {
|
||||||
|
dir('docs') {
|
||||||
|
sh """
|
||||||
|
. ${WORKSPACE}/${BUILD_TAG}/bin/activate
|
||||||
|
pip3 install wheel
|
||||||
|
pip3 install -r requirements.txt
|
||||||
|
rm -fr source/_generate
|
||||||
|
rm -fr _build
|
||||||
|
make SHELL=/bin/bash html
|
||||||
|
tar -C _build/html -zcvf doc.tgz .
|
||||||
|
curl -v -F filename=doc -F file=@doc.tgz http://athos.iap.fr:9595/deploy-doc2/$DOC_DEPLOYER/v2.1-public
|
||||||
|
"""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
post {
|
||||||
|
failure {
|
||||||
|
notifyBuild("FAIL")
|
||||||
|
}
|
||||||
|
success {
|
||||||
|
notifyBuild("SUCCESS")
|
||||||
|
}
|
||||||
|
cleanup {
|
||||||
|
|
||||||
|
/* clean up our workspace */
|
||||||
|
//deleteDir()
|
||||||
|
/* clean up tmp directory */
|
||||||
|
|
||||||
|
//dir("${WORKSPACE}/${BUILD_TAG}") {
|
||||||
|
// deleteDir()
|
||||||
|
//}
|
||||||
|
dir("${workspace}@tmp") {
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
dir("extra/hades@tmp") {
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
dir("extra/borg@tmp") {
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
dir("extra/ares_fg@tmp") {
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
dir("extra/python@tmp") {
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def notifyBuild(String buildStatus = 'STARTED') {
|
||||||
|
// build status of null means successful
|
||||||
|
buildStatus = buildStatus ?: 'SUCCESS'
|
||||||
|
|
||||||
|
// Default values
|
||||||
|
def colorName = 'RED'
|
||||||
|
def colorCode = '#FF0000'
|
||||||
|
def subject = "${buildStatus}: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'"
|
||||||
|
def summary = "${subject} (${env.BUILD_URL})"
|
||||||
|
|
||||||
|
// Override default values based on build status
|
||||||
|
if (buildStatus == 'STARTED') {
|
||||||
|
color = 'YELLOW'
|
||||||
|
colorCode = '#0000FF'
|
||||||
|
} else if (buildStatus == 'SUCCESS') {
|
||||||
|
color = 'GREEN'
|
||||||
|
colorCode = '#00FF00'
|
||||||
|
} else {
|
||||||
|
color = 'RED'
|
||||||
|
colorCode = '#FF0000'
|
||||||
|
}
|
||||||
|
|
||||||
|
def details = """<p>STARTED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]':</p>
|
||||||
|
<p>Check console output at "<a href='${env.BUILD_URL}'>${env.JOB_NAME} [${env.BUILD_NUMBER}]</a>"</p>
|
||||||
|
<p>Build status: <span style='color: ${colorCode};'>${buildStatus}</span></p>"""
|
||||||
|
|
||||||
|
emailext (
|
||||||
|
mimeType: 'text/html',
|
||||||
|
subject: subject,
|
||||||
|
body: details,
|
||||||
|
recipientProviders: [developers(), requestor()]
|
||||||
|
)
|
||||||
|
}
|
519
License_CeCILL_V2.1.txt
Normal file
519
License_CeCILL_V2.1.txt
Normal file
@ -0,0 +1,519 @@
|
|||||||
|
|
||||||
|
CeCILL FREE SOFTWARE LICENSE AGREEMENT
|
||||||
|
|
||||||
|
Version 2.1 dated 2013-06-21
|
||||||
|
|
||||||
|
|
||||||
|
Notice
|
||||||
|
|
||||||
|
This Agreement is a Free Software license agreement that is the result
|
||||||
|
of discussions between its authors in order to ensure compliance with
|
||||||
|
the two main principles guiding its drafting:
|
||||||
|
|
||||||
|
* firstly, compliance with the principles governing the distribution
|
||||||
|
of Free Software: access to source code, broad rights granted to users,
|
||||||
|
* secondly, the election of a governing law, French law, with which it
|
||||||
|
is conformant, both as regards the law of torts and intellectual
|
||||||
|
property law, and the protection that it offers to both authors and
|
||||||
|
holders of the economic rights over software.
|
||||||
|
|
||||||
|
The authors of the CeCILL (for Ce[a] C[nrs] I[nria] L[ogiciel] L[ibre])
|
||||||
|
license are:
|
||||||
|
|
||||||
|
Commissariat à l'énergie atomique et aux énergies alternatives - CEA, a
|
||||||
|
public scientific, technical and industrial research establishment,
|
||||||
|
having its principal place of business at 25 rue Leblanc, immeuble Le
|
||||||
|
Ponant D, 75015 Paris, France.
|
||||||
|
|
||||||
|
Centre National de la Recherche Scientifique - CNRS, a public scientific
|
||||||
|
and technological establishment, having its principal place of business
|
||||||
|
at 3 rue Michel-Ange, 75794 Paris cedex 16, France.
|
||||||
|
|
||||||
|
Institut National de Recherche en Informatique et en Automatique -
|
||||||
|
Inria, a public scientific and technological establishment, having its
|
||||||
|
principal place of business at Domaine de Voluceau, Rocquencourt, BP
|
||||||
|
105, 78153 Le Chesnay cedex, France.
|
||||||
|
|
||||||
|
|
||||||
|
Preamble
|
||||||
|
|
||||||
|
The purpose of this Free Software license agreement is to grant users
|
||||||
|
the right to modify and redistribute the software governed by this
|
||||||
|
license within the framework of an open source distribution model.
|
||||||
|
|
||||||
|
The exercising of this right is conditional upon certain obligations for
|
||||||
|
users so as to preserve this status for all subsequent redistributions.
|
||||||
|
|
||||||
|
In consideration of access to the source code and the rights to copy,
|
||||||
|
modify and redistribute granted by the license, users are provided only
|
||||||
|
with a limited warranty and the software's author, the holder of the
|
||||||
|
economic rights, and the successive licensors only have limited liability.
|
||||||
|
|
||||||
|
In this respect, the risks associated with loading, using, modifying
|
||||||
|
and/or developing or reproducing the software by the user are brought to
|
||||||
|
the user's attention, given its Free Software status, which may make it
|
||||||
|
complicated to use, with the result that its use is reserved for
|
||||||
|
developers and experienced professionals having in-depth computer
|
||||||
|
knowledge. Users are therefore encouraged to load and test the
|
||||||
|
suitability of the software as regards their requirements in conditions
|
||||||
|
enabling the security of their systems and/or data to be ensured and,
|
||||||
|
more generally, to use and operate it in the same conditions of
|
||||||
|
security. This Agreement may be freely reproduced and published,
|
||||||
|
provided it is not altered, and that no provisions are either added or
|
||||||
|
removed herefrom.
|
||||||
|
|
||||||
|
This Agreement may apply to any or all software for which the holder of
|
||||||
|
the economic rights decides to submit the use thereof to its provisions.
|
||||||
|
|
||||||
|
Frequently asked questions can be found on the official website of the
|
||||||
|
CeCILL licenses family (http://www.cecill.info/index.en.html) for any
|
||||||
|
necessary clarification.
|
||||||
|
|
||||||
|
|
||||||
|
Article 1 - DEFINITIONS
|
||||||
|
|
||||||
|
For the purpose of this Agreement, when the following expressions
|
||||||
|
commence with a capital letter, they shall have the following meaning:
|
||||||
|
|
||||||
|
Agreement: means this license agreement, and its possible subsequent
|
||||||
|
versions and annexes.
|
||||||
|
|
||||||
|
Software: means the software in its Object Code and/or Source Code form
|
||||||
|
and, where applicable, its documentation, "as is" when the Licensee
|
||||||
|
accepts the Agreement.
|
||||||
|
|
||||||
|
Initial Software: means the Software in its Source Code and possibly its
|
||||||
|
Object Code form and, where applicable, its documentation, "as is" when
|
||||||
|
it is first distributed under the terms and conditions of the Agreement.
|
||||||
|
|
||||||
|
Modified Software: means the Software modified by at least one
|
||||||
|
Contribution.
|
||||||
|
|
||||||
|
Source Code: means all the Software's instructions and program lines to
|
||||||
|
which access is required so as to modify the Software.
|
||||||
|
|
||||||
|
Object Code: means the binary files originating from the compilation of
|
||||||
|
the Source Code.
|
||||||
|
|
||||||
|
Holder: means the holder(s) of the economic rights over the Initial
|
||||||
|
Software.
|
||||||
|
|
||||||
|
Licensee: means the Software user(s) having accepted the Agreement.
|
||||||
|
|
||||||
|
Contributor: means a Licensee having made at least one Contribution.
|
||||||
|
|
||||||
|
Licensor: means the Holder, or any other individual or legal entity, who
|
||||||
|
distributes the Software under the Agreement.
|
||||||
|
|
||||||
|
Contribution: means any or all modifications, corrections, translations,
|
||||||
|
adaptations and/or new functions integrated into the Software by any or
|
||||||
|
all Contributors, as well as any or all Internal Modules.
|
||||||
|
|
||||||
|
Module: means a set of sources files including their documentation that
|
||||||
|
enables supplementary functions or services in addition to those offered
|
||||||
|
by the Software.
|
||||||
|
|
||||||
|
External Module: means any or all Modules, not derived from the
|
||||||
|
Software, so that this Module and the Software run in separate address
|
||||||
|
spaces, with one calling the other when they are run.
|
||||||
|
|
||||||
|
Internal Module: means any or all Module, connected to the Software so
|
||||||
|
that they both execute in the same address space.
|
||||||
|
|
||||||
|
GNU GPL: means the GNU General Public License version 2 or any
|
||||||
|
subsequent version, as published by the Free Software Foundation Inc.
|
||||||
|
|
||||||
|
GNU Affero GPL: means the GNU Affero General Public License version 3 or
|
||||||
|
any subsequent version, as published by the Free Software Foundation Inc.
|
||||||
|
|
||||||
|
EUPL: means the European Union Public License version 1.1 or any
|
||||||
|
subsequent version, as published by the European Commission.
|
||||||
|
|
||||||
|
Parties: mean both the Licensee and the Licensor.
|
||||||
|
|
||||||
|
These expressions may be used both in singular and plural form.
|
||||||
|
|
||||||
|
|
||||||
|
Article 2 - PURPOSE
|
||||||
|
|
||||||
|
The purpose of the Agreement is the grant by the Licensor to the
|
||||||
|
Licensee of a non-exclusive, transferable and worldwide license for the
|
||||||
|
Software as set forth in Article 5 <#scope> hereinafter for the whole
|
||||||
|
term of the protection granted by the rights over said Software.
|
||||||
|
|
||||||
|
|
||||||
|
Article 3 - ACCEPTANCE
|
||||||
|
|
||||||
|
3.1 The Licensee shall be deemed as having accepted the terms and
|
||||||
|
conditions of this Agreement upon the occurrence of the first of the
|
||||||
|
following events:
|
||||||
|
|
||||||
|
* (i) loading the Software by any or all means, notably, by
|
||||||
|
downloading from a remote server, or by loading from a physical medium;
|
||||||
|
* (ii) the first time the Licensee exercises any of the rights granted
|
||||||
|
hereunder.
|
||||||
|
|
||||||
|
3.2 One copy of the Agreement, containing a notice relating to the
|
||||||
|
characteristics of the Software, to the limited warranty, and to the
|
||||||
|
fact that its use is restricted to experienced users has been provided
|
||||||
|
to the Licensee prior to its acceptance as set forth in Article 3.1
|
||||||
|
<#accepting> hereinabove, and the Licensee hereby acknowledges that it
|
||||||
|
has read and understood it.
|
||||||
|
|
||||||
|
|
||||||
|
Article 4 - EFFECTIVE DATE AND TERM
|
||||||
|
|
||||||
|
|
||||||
|
4.1 EFFECTIVE DATE
|
||||||
|
|
||||||
|
The Agreement shall become effective on the date when it is accepted by
|
||||||
|
the Licensee as set forth in Article 3.1 <#accepting>.
|
||||||
|
|
||||||
|
|
||||||
|
4.2 TERM
|
||||||
|
|
||||||
|
The Agreement shall remain in force for the entire legal term of
|
||||||
|
protection of the economic rights over the Software.
|
||||||
|
|
||||||
|
|
||||||
|
Article 5 - SCOPE OF RIGHTS GRANTED
|
||||||
|
|
||||||
|
The Licensor hereby grants to the Licensee, who accepts, the following
|
||||||
|
rights over the Software for any or all use, and for the term of the
|
||||||
|
Agreement, on the basis of the terms and conditions set forth hereinafter.
|
||||||
|
|
||||||
|
Besides, if the Licensor owns or comes to own one or more patents
|
||||||
|
protecting all or part of the functions of the Software or of its
|
||||||
|
components, the Licensor undertakes not to enforce the rights granted by
|
||||||
|
these patents against successive Licensees using, exploiting or
|
||||||
|
modifying the Software. If these patents are transferred, the Licensor
|
||||||
|
undertakes to have the transferees subscribe to the obligations set
|
||||||
|
forth in this paragraph.
|
||||||
|
|
||||||
|
|
||||||
|
5.1 RIGHT OF USE
|
||||||
|
|
||||||
|
The Licensee is authorized to use the Software, without any limitation
|
||||||
|
as to its fields of application, with it being hereinafter specified
|
||||||
|
that this comprises:
|
||||||
|
|
||||||
|
1. permanent or temporary reproduction of all or part of the Software
|
||||||
|
by any or all means and in any or all form.
|
||||||
|
|
||||||
|
2. loading, displaying, running, or storing the Software on any or all
|
||||||
|
medium.
|
||||||
|
|
||||||
|
3. entitlement to observe, study or test its operation so as to
|
||||||
|
determine the ideas and principles behind any or all constituent
|
||||||
|
elements of said Software. This shall apply when the Licensee
|
||||||
|
carries out any or all loading, displaying, running, transmission or
|
||||||
|
storage operation as regards the Software, that it is entitled to
|
||||||
|
carry out hereunder.
|
||||||
|
|
||||||
|
|
||||||
|
5.2 ENTITLEMENT TO MAKE CONTRIBUTIONS
|
||||||
|
|
||||||
|
The right to make Contributions includes the right to translate, adapt,
|
||||||
|
arrange, or make any or all modifications to the Software, and the right
|
||||||
|
to reproduce the resulting software.
|
||||||
|
|
||||||
|
The Licensee is authorized to make any or all Contributions to the
|
||||||
|
Software provided that it includes an explicit notice that it is the
|
||||||
|
author of said Contribution and indicates the date of the creation thereof.
|
||||||
|
|
||||||
|
|
||||||
|
5.3 RIGHT OF DISTRIBUTION
|
||||||
|
|
||||||
|
In particular, the right of distribution includes the right to publish,
|
||||||
|
transmit and communicate the Software to the general public on any or
|
||||||
|
all medium, and by any or all means, and the right to market, either in
|
||||||
|
consideration of a fee, or free of charge, one or more copies of the
|
||||||
|
Software by any means.
|
||||||
|
|
||||||
|
The Licensee is further authorized to distribute copies of the modified
|
||||||
|
or unmodified Software to third parties according to the terms and
|
||||||
|
conditions set forth hereinafter.
|
||||||
|
|
||||||
|
|
||||||
|
5.3.1 DISTRIBUTION OF SOFTWARE WITHOUT MODIFICATION
|
||||||
|
|
||||||
|
The Licensee is authorized to distribute true copies of the Software in
|
||||||
|
Source Code or Object Code form, provided that said distribution
|
||||||
|
complies with all the provisions of the Agreement and is accompanied by:
|
||||||
|
|
||||||
|
1. a copy of the Agreement,
|
||||||
|
|
||||||
|
2. a notice relating to the limitation of both the Licensor's warranty
|
||||||
|
and liability as set forth in Articles 8 and 9,
|
||||||
|
|
||||||
|
and that, in the event that only the Object Code of the Software is
|
||||||
|
redistributed, the Licensee allows effective access to the full Source
|
||||||
|
Code of the Software for a period of at least three years from the
|
||||||
|
distribution of the Software, it being understood that the additional
|
||||||
|
acquisition cost of the Source Code shall not exceed the cost of the
|
||||||
|
data transfer.
|
||||||
|
|
||||||
|
|
||||||
|
5.3.2 DISTRIBUTION OF MODIFIED SOFTWARE
|
||||||
|
|
||||||
|
When the Licensee makes a Contribution to the Software, the terms and
|
||||||
|
conditions for the distribution of the resulting Modified Software
|
||||||
|
become subject to all the provisions of this Agreement.
|
||||||
|
|
||||||
|
The Licensee is authorized to distribute the Modified Software, in
|
||||||
|
source code or object code form, provided that said distribution
|
||||||
|
complies with all the provisions of the Agreement and is accompanied by:
|
||||||
|
|
||||||
|
1. a copy of the Agreement,
|
||||||
|
|
||||||
|
2. a notice relating to the limitation of both the Licensor's warranty
|
||||||
|
and liability as set forth in Articles 8 and 9,
|
||||||
|
|
||||||
|
and, in the event that only the object code of the Modified Software is
|
||||||
|
redistributed,
|
||||||
|
|
||||||
|
3. a note stating the conditions of effective access to the full source
|
||||||
|
code of the Modified Software for a period of at least three years
|
||||||
|
from the distribution of the Modified Software, it being understood
|
||||||
|
that the additional acquisition cost of the source code shall not
|
||||||
|
exceed the cost of the data transfer.
|
||||||
|
|
||||||
|
|
||||||
|
5.3.3 DISTRIBUTION OF EXTERNAL MODULES
|
||||||
|
|
||||||
|
When the Licensee has developed an External Module, the terms and
|
||||||
|
conditions of this Agreement do not apply to said External Module, that
|
||||||
|
may be distributed under a separate license agreement.
|
||||||
|
|
||||||
|
|
||||||
|
5.3.4 COMPATIBILITY WITH OTHER LICENSES
|
||||||
|
|
||||||
|
The Licensee can include a code that is subject to the provisions of one
|
||||||
|
of the versions of the GNU GPL, GNU Affero GPL and/or EUPL in the
|
||||||
|
Modified or unmodified Software, and distribute that entire code under
|
||||||
|
the terms of the same version of the GNU GPL, GNU Affero GPL and/or EUPL.
|
||||||
|
|
||||||
|
The Licensee can include the Modified or unmodified Software in a code
|
||||||
|
that is subject to the provisions of one of the versions of the GNU GPL,
|
||||||
|
GNU Affero GPL and/or EUPL and distribute that entire code under the
|
||||||
|
terms of the same version of the GNU GPL, GNU Affero GPL and/or EUPL.
|
||||||
|
|
||||||
|
|
||||||
|
Article 6 - INTELLECTUAL PROPERTY
|
||||||
|
|
||||||
|
|
||||||
|
6.1 OVER THE INITIAL SOFTWARE
|
||||||
|
|
||||||
|
The Holder owns the economic rights over the Initial Software. Any or
|
||||||
|
all use of the Initial Software is subject to compliance with the terms
|
||||||
|
and conditions under which the Holder has elected to distribute its work
|
||||||
|
and no one shall be entitled to modify the terms and conditions for the
|
||||||
|
distribution of said Initial Software.
|
||||||
|
|
||||||
|
The Holder undertakes that the Initial Software will remain ruled at
|
||||||
|
least by this Agreement, for the duration set forth in Article 4.2 <#term>.
|
||||||
|
|
||||||
|
|
||||||
|
6.2 OVER THE CONTRIBUTIONS
|
||||||
|
|
||||||
|
The Licensee who develops a Contribution is the owner of the
|
||||||
|
intellectual property rights over this Contribution as defined by
|
||||||
|
applicable law.
|
||||||
|
|
||||||
|
|
||||||
|
6.3 OVER THE EXTERNAL MODULES
|
||||||
|
|
||||||
|
The Licensee who develops an External Module is the owner of the
|
||||||
|
intellectual property rights over this External Module as defined by
|
||||||
|
applicable law and is free to choose the type of agreement that shall
|
||||||
|
govern its distribution.
|
||||||
|
|
||||||
|
|
||||||
|
6.4 JOINT PROVISIONS
|
||||||
|
|
||||||
|
The Licensee expressly undertakes:
|
||||||
|
|
||||||
|
1. not to remove, or modify, in any manner, the intellectual property
|
||||||
|
notices attached to the Software;
|
||||||
|
|
||||||
|
2. to reproduce said notices, in an identical manner, in the copies of
|
||||||
|
the Software modified or not.
|
||||||
|
|
||||||
|
The Licensee undertakes not to directly or indirectly infringe the
|
||||||
|
intellectual property rights on the Software of the Holder and/or
|
||||||
|
Contributors, and to take, where applicable, vis-à-vis its staff, any
|
||||||
|
and all measures required to ensure respect of said intellectual
|
||||||
|
property rights of the Holder and/or Contributors.
|
||||||
|
|
||||||
|
|
||||||
|
Article 7 - RELATED SERVICES
|
||||||
|
|
||||||
|
7.1 Under no circumstances shall the Agreement oblige the Licensor to
|
||||||
|
provide technical assistance or maintenance services for the Software.
|
||||||
|
|
||||||
|
However, the Licensor is entitled to offer this type of services. The
|
||||||
|
terms and conditions of such technical assistance, and/or such
|
||||||
|
maintenance, shall be set forth in a separate instrument. Only the
|
||||||
|
Licensor offering said maintenance and/or technical assistance services
|
||||||
|
shall incur liability therefor.
|
||||||
|
|
||||||
|
7.2 Similarly, any Licensor is entitled to offer to its licensees, under
|
||||||
|
its sole responsibility, a warranty, that shall only be binding upon
|
||||||
|
itself, for the redistribution of the Software and/or the Modified
|
||||||
|
Software, under terms and conditions that it is free to decide. Said
|
||||||
|
warranty, and the financial terms and conditions of its application,
|
||||||
|
shall be subject of a separate instrument executed between the Licensor
|
||||||
|
and the Licensee.
|
||||||
|
|
||||||
|
|
||||||
|
Article 8 - LIABILITY
|
||||||
|
|
||||||
|
8.1 Subject to the provisions of Article 8.2, the Licensee shall be
|
||||||
|
entitled to claim compensation for any direct loss it may have suffered
|
||||||
|
from the Software as a result of a fault on the part of the relevant
|
||||||
|
Licensor, subject to providing evidence thereof.
|
||||||
|
|
||||||
|
8.2 The Licensor's liability is limited to the commitments made under
|
||||||
|
this Agreement and shall not be incurred as a result of in particular:
|
||||||
|
(i) loss due the Licensee's total or partial failure to fulfill its
|
||||||
|
obligations, (ii) direct or consequential loss that is suffered by the
|
||||||
|
Licensee due to the use or performance of the Software, and (iii) more
|
||||||
|
generally, any consequential loss. In particular the Parties expressly
|
||||||
|
agree that any or all pecuniary or business loss (i.e. loss of data,
|
||||||
|
loss of profits, operating loss, loss of customers or orders,
|
||||||
|
opportunity cost, any disturbance to business activities) or any or all
|
||||||
|
legal proceedings instituted against the Licensee by a third party,
|
||||||
|
shall constitute consequential loss and shall not provide entitlement to
|
||||||
|
any or all compensation from the Licensor.
|
||||||
|
|
||||||
|
|
||||||
|
Article 9 - WARRANTY
|
||||||
|
|
||||||
|
9.1 The Licensee acknowledges that the scientific and technical
|
||||||
|
state-of-the-art when the Software was distributed did not enable all
|
||||||
|
possible uses to be tested and verified, nor for the presence of
|
||||||
|
possible defects to be detected. In this respect, the Licensee's
|
||||||
|
attention has been drawn to the risks associated with loading, using,
|
||||||
|
modifying and/or developing and reproducing the Software which are
|
||||||
|
reserved for experienced users.
|
||||||
|
|
||||||
|
The Licensee shall be responsible for verifying, by any or all means,
|
||||||
|
the suitability of the product for its requirements, its good working
|
||||||
|
order, and for ensuring that it shall not cause damage to either persons
|
||||||
|
or properties.
|
||||||
|
|
||||||
|
9.2 The Licensor hereby represents, in good faith, that it is entitled
|
||||||
|
to grant all the rights over the Software (including in particular the
|
||||||
|
rights set forth in Article 5 <#scope>).
|
||||||
|
|
||||||
|
9.3 The Licensee acknowledges that the Software is supplied "as is" by
|
||||||
|
the Licensor without any other express or tacit warranty, other than
|
||||||
|
that provided for in Article 9.2 <#good-faith> and, in particular,
|
||||||
|
without any warranty as to its commercial value, its secured, safe,
|
||||||
|
innovative or relevant nature.
|
||||||
|
|
||||||
|
Specifically, the Licensor does not warrant that the Software is free
|
||||||
|
from any error, that it will operate without interruption, that it will
|
||||||
|
be compatible with the Licensee's own equipment and software
|
||||||
|
configuration, nor that it will meet the Licensee's requirements.
|
||||||
|
|
||||||
|
9.4 The Licensor does not either expressly or tacitly warrant that the
|
||||||
|
Software does not infringe any third party intellectual property right
|
||||||
|
relating to a patent, software or any other property right. Therefore,
|
||||||
|
the Licensor disclaims any and all liability towards the Licensee
|
||||||
|
arising out of any or all proceedings for infringement that may be
|
||||||
|
instituted in respect of the use, modification and redistribution of the
|
||||||
|
Software. Nevertheless, should such proceedings be instituted against
|
||||||
|
the Licensee, the Licensor shall provide it with technical and legal
|
||||||
|
expertise for its defense. Such technical and legal expertise shall be
|
||||||
|
decided on a case-by-case basis between the relevant Licensor and the
|
||||||
|
Licensee pursuant to a memorandum of understanding. The Licensor
|
||||||
|
disclaims any and all liability as regards the Licensee's use of the
|
||||||
|
name of the Software. No warranty is given as regards the existence of
|
||||||
|
prior rights over the name of the Software or as regards the existence
|
||||||
|
of a trademark.
|
||||||
|
|
||||||
|
|
||||||
|
Article 10 - TERMINATION
|
||||||
|
|
||||||
|
10.1 In the event of a breach by the Licensee of its obligations
|
||||||
|
hereunder, the Licensor may automatically terminate this Agreement
|
||||||
|
thirty (30) days after notice has been sent to the Licensee and has
|
||||||
|
remained ineffective.
|
||||||
|
|
||||||
|
10.2 A Licensee whose Agreement is terminated shall no longer be
|
||||||
|
authorized to use, modify or distribute the Software. However, any
|
||||||
|
licenses that it may have granted prior to termination of the Agreement
|
||||||
|
shall remain valid subject to their having been granted in compliance
|
||||||
|
with the terms and conditions hereof.
|
||||||
|
|
||||||
|
|
||||||
|
Article 11 - MISCELLANEOUS
|
||||||
|
|
||||||
|
|
||||||
|
11.1 EXCUSABLE EVENTS
|
||||||
|
|
||||||
|
Neither Party shall be liable for any or all delay, or failure to
|
||||||
|
perform the Agreement, that may be attributable to an event of force
|
||||||
|
majeure, an act of God or an outside cause, such as defective
|
||||||
|
functioning or interruptions of the electricity or telecommunications
|
||||||
|
networks, network paralysis following a virus attack, intervention by
|
||||||
|
government authorities, natural disasters, water damage, earthquakes,
|
||||||
|
fire, explosions, strikes and labor unrest, war, etc.
|
||||||
|
|
||||||
|
11.2 Any failure by either Party, on one or more occasions, to invoke
|
||||||
|
one or more of the provisions hereof, shall under no circumstances be
|
||||||
|
interpreted as being a waiver by the interested Party of its right to
|
||||||
|
invoke said provision(s) subsequently.
|
||||||
|
|
||||||
|
11.3 The Agreement cancels and replaces any or all previous agreements,
|
||||||
|
whether written or oral, between the Parties and having the same
|
||||||
|
purpose, and constitutes the entirety of the agreement between said
|
||||||
|
Parties concerning said purpose. No supplement or modification to the
|
||||||
|
terms and conditions hereof shall be effective as between the Parties
|
||||||
|
unless it is made in writing and signed by their duly authorized
|
||||||
|
representatives.
|
||||||
|
|
||||||
|
11.4 In the event that one or more of the provisions hereof were to
|
||||||
|
conflict with a current or future applicable act or legislative text,
|
||||||
|
said act or legislative text shall prevail, and the Parties shall make
|
||||||
|
the necessary amendments so as to comply with said act or legislative
|
||||||
|
text. All other provisions shall remain effective. Similarly, invalidity
|
||||||
|
of a provision of the Agreement, for any reason whatsoever, shall not
|
||||||
|
cause the Agreement as a whole to be invalid.
|
||||||
|
|
||||||
|
|
||||||
|
11.5 LANGUAGE
|
||||||
|
|
||||||
|
The Agreement is drafted in both French and English and both versions
|
||||||
|
are deemed authentic.
|
||||||
|
|
||||||
|
|
||||||
|
Article 12 - NEW VERSIONS OF THE AGREEMENT
|
||||||
|
|
||||||
|
12.1 Any person is authorized to duplicate and distribute copies of this
|
||||||
|
Agreement.
|
||||||
|
|
||||||
|
12.2 So as to ensure coherence, the wording of this Agreement is
|
||||||
|
protected and may only be modified by the authors of the License, who
|
||||||
|
reserve the right to periodically publish updates or new versions of the
|
||||||
|
Agreement, each with a separate number. These subsequent versions may
|
||||||
|
address new issues encountered by Free Software.
|
||||||
|
|
||||||
|
12.3 Any Software distributed under a given version of the Agreement may
|
||||||
|
only be subsequently distributed under the same version of the Agreement
|
||||||
|
or a subsequent version, subject to the provisions of Article 5.3.4
|
||||||
|
<#compatibility>.
|
||||||
|
|
||||||
|
|
||||||
|
Article 13 - GOVERNING LAW AND JURISDICTION
|
||||||
|
|
||||||
|
13.1 The Agreement is governed by French law. The Parties agree to
|
||||||
|
endeavor to seek an amicable solution to any disagreements or disputes
|
||||||
|
that may arise during the performance of the Agreement.
|
||||||
|
|
||||||
|
13.2 Failing an amicable solution within two (2) months as from their
|
||||||
|
occurrence, and unless emergency proceedings are necessary, the
|
||||||
|
disagreements or disputes shall be referred to the Paris Courts having
|
||||||
|
jurisdiction, by the more diligent Party.
|
||||||
|
|
674
License_GPL-3.0.txt
Normal file
674
License_GPL-3.0.txt
Normal file
@ -0,0 +1,674 @@
|
|||||||
|
GNU GENERAL PUBLIC LICENSE
|
||||||
|
Version 3, 29 June 2007
|
||||||
|
|
||||||
|
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||||
|
Everyone is permitted to copy and distribute verbatim copies
|
||||||
|
of this license document, but changing it is not allowed.
|
||||||
|
|
||||||
|
Preamble
|
||||||
|
|
||||||
|
The GNU General Public License is a free, copyleft license for
|
||||||
|
software and other kinds of works.
|
||||||
|
|
||||||
|
The licenses for most software and other practical works are designed
|
||||||
|
to take away your freedom to share and change the works. By contrast,
|
||||||
|
the GNU General Public License is intended to guarantee your freedom to
|
||||||
|
share and change all versions of a program--to make sure it remains free
|
||||||
|
software for all its users. We, the Free Software Foundation, use the
|
||||||
|
GNU General Public License for most of our software; it applies also to
|
||||||
|
any other work released this way by its authors. You can apply it to
|
||||||
|
your programs, too.
|
||||||
|
|
||||||
|
When we speak of free software, we are referring to freedom, not
|
||||||
|
price. Our General Public Licenses are designed to make sure that you
|
||||||
|
have the freedom to distribute copies of free software (and charge for
|
||||||
|
them if you wish), that you receive source code or can get it if you
|
||||||
|
want it, that you can change the software or use pieces of it in new
|
||||||
|
free programs, and that you know you can do these things.
|
||||||
|
|
||||||
|
To protect your rights, we need to prevent others from denying you
|
||||||
|
these rights or asking you to surrender the rights. Therefore, you have
|
||||||
|
certain responsibilities if you distribute copies of the software, or if
|
||||||
|
you modify it: responsibilities to respect the freedom of others.
|
||||||
|
|
||||||
|
For example, if you distribute copies of such a program, whether
|
||||||
|
gratis or for a fee, you must pass on to the recipients the same
|
||||||
|
freedoms that you received. You must make sure that they, too, receive
|
||||||
|
or can get the source code. And you must show them these terms so they
|
||||||
|
know their rights.
|
||||||
|
|
||||||
|
Developers that use the GNU GPL protect your rights with two steps:
|
||||||
|
(1) assert copyright on the software, and (2) offer you this License
|
||||||
|
giving you legal permission to copy, distribute and/or modify it.
|
||||||
|
|
||||||
|
For the developers' and authors' protection, the GPL clearly explains
|
||||||
|
that there is no warranty for this free software. For both users' and
|
||||||
|
authors' sake, the GPL requires that modified versions be marked as
|
||||||
|
changed, so that their problems will not be attributed erroneously to
|
||||||
|
authors of previous versions.
|
||||||
|
|
||||||
|
Some devices are designed to deny users access to install or run
|
||||||
|
modified versions of the software inside them, although the manufacturer
|
||||||
|
can do so. This is fundamentally incompatible with the aim of
|
||||||
|
protecting users' freedom to change the software. The systematic
|
||||||
|
pattern of such abuse occurs in the area of products for individuals to
|
||||||
|
use, which is precisely where it is most unacceptable. Therefore, we
|
||||||
|
have designed this version of the GPL to prohibit the practice for those
|
||||||
|
products. If such problems arise substantially in other domains, we
|
||||||
|
stand ready to extend this provision to those domains in future versions
|
||||||
|
of the GPL, as needed to protect the freedom of users.
|
||||||
|
|
||||||
|
Finally, every program is threatened constantly by software patents.
|
||||||
|
States should not allow patents to restrict development and use of
|
||||||
|
software on general-purpose computers, but in those that do, we wish to
|
||||||
|
avoid the special danger that patents applied to a free program could
|
||||||
|
make it effectively proprietary. To prevent this, the GPL assures that
|
||||||
|
patents cannot be used to render the program non-free.
|
||||||
|
|
||||||
|
The precise terms and conditions for copying, distribution and
|
||||||
|
modification follow.
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
0. Definitions.
|
||||||
|
|
||||||
|
"This License" refers to version 3 of the GNU General Public License.
|
||||||
|
|
||||||
|
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||||
|
works, such as semiconductor masks.
|
||||||
|
|
||||||
|
"The Program" refers to any copyrightable work licensed under this
|
||||||
|
License. Each licensee is addressed as "you". "Licensees" and
|
||||||
|
"recipients" may be individuals or organizations.
|
||||||
|
|
||||||
|
To "modify" a work means to copy from or adapt all or part of the work
|
||||||
|
in a fashion requiring copyright permission, other than the making of an
|
||||||
|
exact copy. The resulting work is called a "modified version" of the
|
||||||
|
earlier work or a work "based on" the earlier work.
|
||||||
|
|
||||||
|
A "covered work" means either the unmodified Program or a work based
|
||||||
|
on the Program.
|
||||||
|
|
||||||
|
To "propagate" a work means to do anything with it that, without
|
||||||
|
permission, would make you directly or secondarily liable for
|
||||||
|
infringement under applicable copyright law, except executing it on a
|
||||||
|
computer or modifying a private copy. Propagation includes copying,
|
||||||
|
distribution (with or without modification), making available to the
|
||||||
|
public, and in some countries other activities as well.
|
||||||
|
|
||||||
|
To "convey" a work means any kind of propagation that enables other
|
||||||
|
parties to make or receive copies. Mere interaction with a user through
|
||||||
|
a computer network, with no transfer of a copy, is not conveying.
|
||||||
|
|
||||||
|
An interactive user interface displays "Appropriate Legal Notices"
|
||||||
|
to the extent that it includes a convenient and prominently visible
|
||||||
|
feature that (1) displays an appropriate copyright notice, and (2)
|
||||||
|
tells the user that there is no warranty for the work (except to the
|
||||||
|
extent that warranties are provided), that licensees may convey the
|
||||||
|
work under this License, and how to view a copy of this License. If
|
||||||
|
the interface presents a list of user commands or options, such as a
|
||||||
|
menu, a prominent item in the list meets this criterion.
|
||||||
|
|
||||||
|
1. Source Code.
|
||||||
|
|
||||||
|
The "source code" for a work means the preferred form of the work
|
||||||
|
for making modifications to it. "Object code" means any non-source
|
||||||
|
form of a work.
|
||||||
|
|
||||||
|
A "Standard Interface" means an interface that either is an official
|
||||||
|
standard defined by a recognized standards body, or, in the case of
|
||||||
|
interfaces specified for a particular programming language, one that
|
||||||
|
is widely used among developers working in that language.
|
||||||
|
|
||||||
|
The "System Libraries" of an executable work include anything, other
|
||||||
|
than the work as a whole, that (a) is included in the normal form of
|
||||||
|
packaging a Major Component, but which is not part of that Major
|
||||||
|
Component, and (b) serves only to enable use of the work with that
|
||||||
|
Major Component, or to implement a Standard Interface for which an
|
||||||
|
implementation is available to the public in source code form. A
|
||||||
|
"Major Component", in this context, means a major essential component
|
||||||
|
(kernel, window system, and so on) of the specific operating system
|
||||||
|
(if any) on which the executable work runs, or a compiler used to
|
||||||
|
produce the work, or an object code interpreter used to run it.
|
||||||
|
|
||||||
|
The "Corresponding Source" for a work in object code form means all
|
||||||
|
the source code needed to generate, install, and (for an executable
|
||||||
|
work) run the object code and to modify the work, including scripts to
|
||||||
|
control those activities. However, it does not include the work's
|
||||||
|
System Libraries, or general-purpose tools or generally available free
|
||||||
|
programs which are used unmodified in performing those activities but
|
||||||
|
which are not part of the work. For example, Corresponding Source
|
||||||
|
includes interface definition files associated with source files for
|
||||||
|
the work, and the source code for shared libraries and dynamically
|
||||||
|
linked subprograms that the work is specifically designed to require,
|
||||||
|
such as by intimate data communication or control flow between those
|
||||||
|
subprograms and other parts of the work.
|
||||||
|
|
||||||
|
The Corresponding Source need not include anything that users
|
||||||
|
can regenerate automatically from other parts of the Corresponding
|
||||||
|
Source.
|
||||||
|
|
||||||
|
The Corresponding Source for a work in source code form is that
|
||||||
|
same work.
|
||||||
|
|
||||||
|
2. Basic Permissions.
|
||||||
|
|
||||||
|
All rights granted under this License are granted for the term of
|
||||||
|
copyright on the Program, and are irrevocable provided the stated
|
||||||
|
conditions are met. This License explicitly affirms your unlimited
|
||||||
|
permission to run the unmodified Program. The output from running a
|
||||||
|
covered work is covered by this License only if the output, given its
|
||||||
|
content, constitutes a covered work. This License acknowledges your
|
||||||
|
rights of fair use or other equivalent, as provided by copyright law.
|
||||||
|
|
||||||
|
You may make, run and propagate covered works that you do not
|
||||||
|
convey, without conditions so long as your license otherwise remains
|
||||||
|
in force. You may convey covered works to others for the sole purpose
|
||||||
|
of having them make modifications exclusively for you, or provide you
|
||||||
|
with facilities for running those works, provided that you comply with
|
||||||
|
the terms of this License in conveying all material for which you do
|
||||||
|
not control copyright. Those thus making or running the covered works
|
||||||
|
for you must do so exclusively on your behalf, under your direction
|
||||||
|
and control, on terms that prohibit them from making any copies of
|
||||||
|
your copyrighted material outside their relationship with you.
|
||||||
|
|
||||||
|
Conveying under any other circumstances is permitted solely under
|
||||||
|
the conditions stated below. Sublicensing is not allowed; section 10
|
||||||
|
makes it unnecessary.
|
||||||
|
|
||||||
|
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||||
|
|
||||||
|
No covered work shall be deemed part of an effective technological
|
||||||
|
measure under any applicable law fulfilling obligations under article
|
||||||
|
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||||
|
similar laws prohibiting or restricting circumvention of such
|
||||||
|
measures.
|
||||||
|
|
||||||
|
When you convey a covered work, you waive any legal power to forbid
|
||||||
|
circumvention of technological measures to the extent such circumvention
|
||||||
|
is effected by exercising rights under this License with respect to
|
||||||
|
the covered work, and you disclaim any intention to limit operation or
|
||||||
|
modification of the work as a means of enforcing, against the work's
|
||||||
|
users, your or third parties' legal rights to forbid circumvention of
|
||||||
|
technological measures.
|
||||||
|
|
||||||
|
4. Conveying Verbatim Copies.
|
||||||
|
|
||||||
|
You may convey verbatim copies of the Program's source code as you
|
||||||
|
receive it, in any medium, provided that you conspicuously and
|
||||||
|
appropriately publish on each copy an appropriate copyright notice;
|
||||||
|
keep intact all notices stating that this License and any
|
||||||
|
non-permissive terms added in accord with section 7 apply to the code;
|
||||||
|
keep intact all notices of the absence of any warranty; and give all
|
||||||
|
recipients a copy of this License along with the Program.
|
||||||
|
|
||||||
|
You may charge any price or no price for each copy that you convey,
|
||||||
|
and you may offer support or warranty protection for a fee.
|
||||||
|
|
||||||
|
5. Conveying Modified Source Versions.
|
||||||
|
|
||||||
|
You may convey a work based on the Program, or the modifications to
|
||||||
|
produce it from the Program, in the form of source code under the
|
||||||
|
terms of section 4, provided that you also meet all of these conditions:
|
||||||
|
|
||||||
|
a) The work must carry prominent notices stating that you modified
|
||||||
|
it, and giving a relevant date.
|
||||||
|
|
||||||
|
b) The work must carry prominent notices stating that it is
|
||||||
|
released under this License and any conditions added under section
|
||||||
|
7. This requirement modifies the requirement in section 4 to
|
||||||
|
"keep intact all notices".
|
||||||
|
|
||||||
|
c) You must license the entire work, as a whole, under this
|
||||||
|
License to anyone who comes into possession of a copy. This
|
||||||
|
License will therefore apply, along with any applicable section 7
|
||||||
|
additional terms, to the whole of the work, and all its parts,
|
||||||
|
regardless of how they are packaged. This License gives no
|
||||||
|
permission to license the work in any other way, but it does not
|
||||||
|
invalidate such permission if you have separately received it.
|
||||||
|
|
||||||
|
d) If the work has interactive user interfaces, each must display
|
||||||
|
Appropriate Legal Notices; however, if the Program has interactive
|
||||||
|
interfaces that do not display Appropriate Legal Notices, your
|
||||||
|
work need not make them do so.
|
||||||
|
|
||||||
|
A compilation of a covered work with other separate and independent
|
||||||
|
works, which are not by their nature extensions of the covered work,
|
||||||
|
and which are not combined with it such as to form a larger program,
|
||||||
|
in or on a volume of a storage or distribution medium, is called an
|
||||||
|
"aggregate" if the compilation and its resulting copyright are not
|
||||||
|
used to limit the access or legal rights of the compilation's users
|
||||||
|
beyond what the individual works permit. Inclusion of a covered work
|
||||||
|
in an aggregate does not cause this License to apply to the other
|
||||||
|
parts of the aggregate.
|
||||||
|
|
||||||
|
6. Conveying Non-Source Forms.
|
||||||
|
|
||||||
|
You may convey a covered work in object code form under the terms
|
||||||
|
of sections 4 and 5, provided that you also convey the
|
||||||
|
machine-readable Corresponding Source under the terms of this License,
|
||||||
|
in one of these ways:
|
||||||
|
|
||||||
|
a) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by the
|
||||||
|
Corresponding Source fixed on a durable physical medium
|
||||||
|
customarily used for software interchange.
|
||||||
|
|
||||||
|
b) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by a
|
||||||
|
written offer, valid for at least three years and valid for as
|
||||||
|
long as you offer spare parts or customer support for that product
|
||||||
|
model, to give anyone who possesses the object code either (1) a
|
||||||
|
copy of the Corresponding Source for all the software in the
|
||||||
|
product that is covered by this License, on a durable physical
|
||||||
|
medium customarily used for software interchange, for a price no
|
||||||
|
more than your reasonable cost of physically performing this
|
||||||
|
conveying of source, or (2) access to copy the
|
||||||
|
Corresponding Source from a network server at no charge.
|
||||||
|
|
||||||
|
c) Convey individual copies of the object code with a copy of the
|
||||||
|
written offer to provide the Corresponding Source. This
|
||||||
|
alternative is allowed only occasionally and noncommercially, and
|
||||||
|
only if you received the object code with such an offer, in accord
|
||||||
|
with subsection 6b.
|
||||||
|
|
||||||
|
d) Convey the object code by offering access from a designated
|
||||||
|
place (gratis or for a charge), and offer equivalent access to the
|
||||||
|
Corresponding Source in the same way through the same place at no
|
||||||
|
further charge. You need not require recipients to copy the
|
||||||
|
Corresponding Source along with the object code. If the place to
|
||||||
|
copy the object code is a network server, the Corresponding Source
|
||||||
|
may be on a different server (operated by you or a third party)
|
||||||
|
that supports equivalent copying facilities, provided you maintain
|
||||||
|
clear directions next to the object code saying where to find the
|
||||||
|
Corresponding Source. Regardless of what server hosts the
|
||||||
|
Corresponding Source, you remain obligated to ensure that it is
|
||||||
|
available for as long as needed to satisfy these requirements.
|
||||||
|
|
||||||
|
e) Convey the object code using peer-to-peer transmission, provided
|
||||||
|
you inform other peers where the object code and Corresponding
|
||||||
|
Source of the work are being offered to the general public at no
|
||||||
|
charge under subsection 6d.
|
||||||
|
|
||||||
|
A separable portion of the object code, whose source code is excluded
|
||||||
|
from the Corresponding Source as a System Library, need not be
|
||||||
|
included in conveying the object code work.
|
||||||
|
|
||||||
|
A "User Product" is either (1) a "consumer product", which means any
|
||||||
|
tangible personal property which is normally used for personal, family,
|
||||||
|
or household purposes, or (2) anything designed or sold for incorporation
|
||||||
|
into a dwelling. In determining whether a product is a consumer product,
|
||||||
|
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||||
|
product received by a particular user, "normally used" refers to a
|
||||||
|
typical or common use of that class of product, regardless of the status
|
||||||
|
of the particular user or of the way in which the particular user
|
||||||
|
actually uses, or expects or is expected to use, the product. A product
|
||||||
|
is a consumer product regardless of whether the product has substantial
|
||||||
|
commercial, industrial or non-consumer uses, unless such uses represent
|
||||||
|
the only significant mode of use of the product.
|
||||||
|
|
||||||
|
"Installation Information" for a User Product means any methods,
|
||||||
|
procedures, authorization keys, or other information required to install
|
||||||
|
and execute modified versions of a covered work in that User Product from
|
||||||
|
a modified version of its Corresponding Source. The information must
|
||||||
|
suffice to ensure that the continued functioning of the modified object
|
||||||
|
code is in no case prevented or interfered with solely because
|
||||||
|
modification has been made.
|
||||||
|
|
||||||
|
If you convey an object code work under this section in, or with, or
|
||||||
|
specifically for use in, a User Product, and the conveying occurs as
|
||||||
|
part of a transaction in which the right of possession and use of the
|
||||||
|
User Product is transferred to the recipient in perpetuity or for a
|
||||||
|
fixed term (regardless of how the transaction is characterized), the
|
||||||
|
Corresponding Source conveyed under this section must be accompanied
|
||||||
|
by the Installation Information. But this requirement does not apply
|
||||||
|
if neither you nor any third party retains the ability to install
|
||||||
|
modified object code on the User Product (for example, the work has
|
||||||
|
been installed in ROM).
|
||||||
|
|
||||||
|
The requirement to provide Installation Information does not include a
|
||||||
|
requirement to continue to provide support service, warranty, or updates
|
||||||
|
for a work that has been modified or installed by the recipient, or for
|
||||||
|
the User Product in which it has been modified or installed. Access to a
|
||||||
|
network may be denied when the modification itself materially and
|
||||||
|
adversely affects the operation of the network or violates the rules and
|
||||||
|
protocols for communication across the network.
|
||||||
|
|
||||||
|
Corresponding Source conveyed, and Installation Information provided,
|
||||||
|
in accord with this section must be in a format that is publicly
|
||||||
|
documented (and with an implementation available to the public in
|
||||||
|
source code form), and must require no special password or key for
|
||||||
|
unpacking, reading or copying.
|
||||||
|
|
||||||
|
7. Additional Terms.
|
||||||
|
|
||||||
|
"Additional permissions" are terms that supplement the terms of this
|
||||||
|
License by making exceptions from one or more of its conditions.
|
||||||
|
Additional permissions that are applicable to the entire Program shall
|
||||||
|
be treated as though they were included in this License, to the extent
|
||||||
|
that they are valid under applicable law. If additional permissions
|
||||||
|
apply only to part of the Program, that part may be used separately
|
||||||
|
under those permissions, but the entire Program remains governed by
|
||||||
|
this License without regard to the additional permissions.
|
||||||
|
|
||||||
|
When you convey a copy of a covered work, you may at your option
|
||||||
|
remove any additional permissions from that copy, or from any part of
|
||||||
|
it. (Additional permissions may be written to require their own
|
||||||
|
removal in certain cases when you modify the work.) You may place
|
||||||
|
additional permissions on material, added by you to a covered work,
|
||||||
|
for which you have or can give appropriate copyright permission.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, for material you
|
||||||
|
add to a covered work, you may (if authorized by the copyright holders of
|
||||||
|
that material) supplement the terms of this License with terms:
|
||||||
|
|
||||||
|
a) Disclaiming warranty or limiting liability differently from the
|
||||||
|
terms of sections 15 and 16 of this License; or
|
||||||
|
|
||||||
|
b) Requiring preservation of specified reasonable legal notices or
|
||||||
|
author attributions in that material or in the Appropriate Legal
|
||||||
|
Notices displayed by works containing it; or
|
||||||
|
|
||||||
|
c) Prohibiting misrepresentation of the origin of that material, or
|
||||||
|
requiring that modified versions of such material be marked in
|
||||||
|
reasonable ways as different from the original version; or
|
||||||
|
|
||||||
|
d) Limiting the use for publicity purposes of names of licensors or
|
||||||
|
authors of the material; or
|
||||||
|
|
||||||
|
e) Declining to grant rights under trademark law for use of some
|
||||||
|
trade names, trademarks, or service marks; or
|
||||||
|
|
||||||
|
f) Requiring indemnification of licensors and authors of that
|
||||||
|
material by anyone who conveys the material (or modified versions of
|
||||||
|
it) with contractual assumptions of liability to the recipient, for
|
||||||
|
any liability that these contractual assumptions directly impose on
|
||||||
|
those licensors and authors.
|
||||||
|
|
||||||
|
All other non-permissive additional terms are considered "further
|
||||||
|
restrictions" within the meaning of section 10. If the Program as you
|
||||||
|
received it, or any part of it, contains a notice stating that it is
|
||||||
|
governed by this License along with a term that is a further
|
||||||
|
restriction, you may remove that term. If a license document contains
|
||||||
|
a further restriction but permits relicensing or conveying under this
|
||||||
|
License, you may add to a covered work material governed by the terms
|
||||||
|
of that license document, provided that the further restriction does
|
||||||
|
not survive such relicensing or conveying.
|
||||||
|
|
||||||
|
If you add terms to a covered work in accord with this section, you
|
||||||
|
must place, in the relevant source files, a statement of the
|
||||||
|
additional terms that apply to those files, or a notice indicating
|
||||||
|
where to find the applicable terms.
|
||||||
|
|
||||||
|
Additional terms, permissive or non-permissive, may be stated in the
|
||||||
|
form of a separately written license, or stated as exceptions;
|
||||||
|
the above requirements apply either way.
|
||||||
|
|
||||||
|
8. Termination.
|
||||||
|
|
||||||
|
You may not propagate or modify a covered work except as expressly
|
||||||
|
provided under this License. Any attempt otherwise to propagate or
|
||||||
|
modify it is void, and will automatically terminate your rights under
|
||||||
|
this License (including any patent licenses granted under the third
|
||||||
|
paragraph of section 11).
|
||||||
|
|
||||||
|
However, if you cease all violation of this License, then your
|
||||||
|
license from a particular copyright holder is reinstated (a)
|
||||||
|
provisionally, unless and until the copyright holder explicitly and
|
||||||
|
finally terminates your license, and (b) permanently, if the copyright
|
||||||
|
holder fails to notify you of the violation by some reasonable means
|
||||||
|
prior to 60 days after the cessation.
|
||||||
|
|
||||||
|
Moreover, your license from a particular copyright holder is
|
||||||
|
reinstated permanently if the copyright holder notifies you of the
|
||||||
|
violation by some reasonable means, this is the first time you have
|
||||||
|
received notice of violation of this License (for any work) from that
|
||||||
|
copyright holder, and you cure the violation prior to 30 days after
|
||||||
|
your receipt of the notice.
|
||||||
|
|
||||||
|
Termination of your rights under this section does not terminate the
|
||||||
|
licenses of parties who have received copies or rights from you under
|
||||||
|
this License. If your rights have been terminated and not permanently
|
||||||
|
reinstated, you do not qualify to receive new licenses for the same
|
||||||
|
material under section 10.
|
||||||
|
|
||||||
|
9. Acceptance Not Required for Having Copies.
|
||||||
|
|
||||||
|
You are not required to accept this License in order to receive or
|
||||||
|
run a copy of the Program. Ancillary propagation of a covered work
|
||||||
|
occurring solely as a consequence of using peer-to-peer transmission
|
||||||
|
to receive a copy likewise does not require acceptance. However,
|
||||||
|
nothing other than this License grants you permission to propagate or
|
||||||
|
modify any covered work. These actions infringe copyright if you do
|
||||||
|
not accept this License. Therefore, by modifying or propagating a
|
||||||
|
covered work, you indicate your acceptance of this License to do so.
|
||||||
|
|
||||||
|
10. Automatic Licensing of Downstream Recipients.
|
||||||
|
|
||||||
|
Each time you convey a covered work, the recipient automatically
|
||||||
|
receives a license from the original licensors, to run, modify and
|
||||||
|
propagate that work, subject to this License. You are not responsible
|
||||||
|
for enforcing compliance by third parties with this License.
|
||||||
|
|
||||||
|
An "entity transaction" is a transaction transferring control of an
|
||||||
|
organization, or substantially all assets of one, or subdividing an
|
||||||
|
organization, or merging organizations. If propagation of a covered
|
||||||
|
work results from an entity transaction, each party to that
|
||||||
|
transaction who receives a copy of the work also receives whatever
|
||||||
|
licenses to the work the party's predecessor in interest had or could
|
||||||
|
give under the previous paragraph, plus a right to possession of the
|
||||||
|
Corresponding Source of the work from the predecessor in interest, if
|
||||||
|
the predecessor has it or can get it with reasonable efforts.
|
||||||
|
|
||||||
|
You may not impose any further restrictions on the exercise of the
|
||||||
|
rights granted or affirmed under this License. For example, you may
|
||||||
|
not impose a license fee, royalty, or other charge for exercise of
|
||||||
|
rights granted under this License, and you may not initiate litigation
|
||||||
|
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||||
|
any patent claim is infringed by making, using, selling, offering for
|
||||||
|
sale, or importing the Program or any portion of it.
|
||||||
|
|
||||||
|
11. Patents.
|
||||||
|
|
||||||
|
A "contributor" is a copyright holder who authorizes use under this
|
||||||
|
License of the Program or a work on which the Program is based. The
|
||||||
|
work thus licensed is called the contributor's "contributor version".
|
||||||
|
|
||||||
|
A contributor's "essential patent claims" are all patent claims
|
||||||
|
owned or controlled by the contributor, whether already acquired or
|
||||||
|
hereafter acquired, that would be infringed by some manner, permitted
|
||||||
|
by this License, of making, using, or selling its contributor version,
|
||||||
|
but do not include claims that would be infringed only as a
|
||||||
|
consequence of further modification of the contributor version. For
|
||||||
|
purposes of this definition, "control" includes the right to grant
|
||||||
|
patent sublicenses in a manner consistent with the requirements of
|
||||||
|
this License.
|
||||||
|
|
||||||
|
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||||
|
patent license under the contributor's essential patent claims, to
|
||||||
|
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||||
|
propagate the contents of its contributor version.
|
||||||
|
|
||||||
|
In the following three paragraphs, a "patent license" is any express
|
||||||
|
agreement or commitment, however denominated, not to enforce a patent
|
||||||
|
(such as an express permission to practice a patent or covenant not to
|
||||||
|
sue for patent infringement). To "grant" such a patent license to a
|
||||||
|
party means to make such an agreement or commitment not to enforce a
|
||||||
|
patent against the party.
|
||||||
|
|
||||||
|
If you convey a covered work, knowingly relying on a patent license,
|
||||||
|
and the Corresponding Source of the work is not available for anyone
|
||||||
|
to copy, free of charge and under the terms of this License, through a
|
||||||
|
publicly available network server or other readily accessible means,
|
||||||
|
then you must either (1) cause the Corresponding Source to be so
|
||||||
|
available, or (2) arrange to deprive yourself of the benefit of the
|
||||||
|
patent license for this particular work, or (3) arrange, in a manner
|
||||||
|
consistent with the requirements of this License, to extend the patent
|
||||||
|
license to downstream recipients. "Knowingly relying" means you have
|
||||||
|
actual knowledge that, but for the patent license, your conveying the
|
||||||
|
covered work in a country, or your recipient's use of the covered work
|
||||||
|
in a country, would infringe one or more identifiable patents in that
|
||||||
|
country that you have reason to believe are valid.
|
||||||
|
|
||||||
|
If, pursuant to or in connection with a single transaction or
|
||||||
|
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||||
|
covered work, and grant a patent license to some of the parties
|
||||||
|
receiving the covered work authorizing them to use, propagate, modify
|
||||||
|
or convey a specific copy of the covered work, then the patent license
|
||||||
|
you grant is automatically extended to all recipients of the covered
|
||||||
|
work and works based on it.
|
||||||
|
|
||||||
|
A patent license is "discriminatory" if it does not include within
|
||||||
|
the scope of its coverage, prohibits the exercise of, or is
|
||||||
|
conditioned on the non-exercise of one or more of the rights that are
|
||||||
|
specifically granted under this License. You may not convey a covered
|
||||||
|
work if you are a party to an arrangement with a third party that is
|
||||||
|
in the business of distributing software, under which you make payment
|
||||||
|
to the third party based on the extent of your activity of conveying
|
||||||
|
the work, and under which the third party grants, to any of the
|
||||||
|
parties who would receive the covered work from you, a discriminatory
|
||||||
|
patent license (a) in connection with copies of the covered work
|
||||||
|
conveyed by you (or copies made from those copies), or (b) primarily
|
||||||
|
for and in connection with specific products or compilations that
|
||||||
|
contain the covered work, unless you entered into that arrangement,
|
||||||
|
or that patent license was granted, prior to 28 March 2007.
|
||||||
|
|
||||||
|
Nothing in this License shall be construed as excluding or limiting
|
||||||
|
any implied license or other defenses to infringement that may
|
||||||
|
otherwise be available to you under applicable patent law.
|
||||||
|
|
||||||
|
12. No Surrender of Others' Freedom.
|
||||||
|
|
||||||
|
If conditions are imposed on you (whether by court order, agreement or
|
||||||
|
otherwise) that contradict the conditions of this License, they do not
|
||||||
|
excuse you from the conditions of this License. If you cannot convey a
|
||||||
|
covered work so as to satisfy simultaneously your obligations under this
|
||||||
|
License and any other pertinent obligations, then as a consequence you may
|
||||||
|
not convey it at all. For example, if you agree to terms that obligate you
|
||||||
|
to collect a royalty for further conveying from those to whom you convey
|
||||||
|
the Program, the only way you could satisfy both those terms and this
|
||||||
|
License would be to refrain entirely from conveying the Program.
|
||||||
|
|
||||||
|
13. Use with the GNU Affero General Public License.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, you have
|
||||||
|
permission to link or combine any covered work with a work licensed
|
||||||
|
under version 3 of the GNU Affero General Public License into a single
|
||||||
|
combined work, and to convey the resulting work. The terms of this
|
||||||
|
License will continue to apply to the part which is the covered work,
|
||||||
|
but the special requirements of the GNU Affero General Public License,
|
||||||
|
section 13, concerning interaction through a network will apply to the
|
||||||
|
combination as such.
|
||||||
|
|
||||||
|
14. Revised Versions of this License.
|
||||||
|
|
||||||
|
The Free Software Foundation may publish revised and/or new versions of
|
||||||
|
the GNU General Public License from time to time. Such new versions will
|
||||||
|
be similar in spirit to the present version, but may differ in detail to
|
||||||
|
address new problems or concerns.
|
||||||
|
|
||||||
|
Each version is given a distinguishing version number. If the
|
||||||
|
Program specifies that a certain numbered version of the GNU General
|
||||||
|
Public License "or any later version" applies to it, you have the
|
||||||
|
option of following the terms and conditions either of that numbered
|
||||||
|
version or of any later version published by the Free Software
|
||||||
|
Foundation. If the Program does not specify a version number of the
|
||||||
|
GNU General Public License, you may choose any version ever published
|
||||||
|
by the Free Software Foundation.
|
||||||
|
|
||||||
|
If the Program specifies that a proxy can decide which future
|
||||||
|
versions of the GNU General Public License can be used, that proxy's
|
||||||
|
public statement of acceptance of a version permanently authorizes you
|
||||||
|
to choose that version for the Program.
|
||||||
|
|
||||||
|
Later license versions may give you additional or different
|
||||||
|
permissions. However, no additional obligations are imposed on any
|
||||||
|
author or copyright holder as a result of your choosing to follow a
|
||||||
|
later version.
|
||||||
|
|
||||||
|
15. Disclaimer of Warranty.
|
||||||
|
|
||||||
|
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||||
|
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||||
|
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||||
|
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||||
|
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||||
|
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||||
|
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||||
|
|
||||||
|
16. Limitation of Liability.
|
||||||
|
|
||||||
|
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||||
|
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||||
|
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||||
|
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||||
|
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||||
|
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||||
|
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||||
|
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||||
|
SUCH DAMAGES.
|
||||||
|
|
||||||
|
17. Interpretation of Sections 15 and 16.
|
||||||
|
|
||||||
|
If the disclaimer of warranty and limitation of liability provided
|
||||||
|
above cannot be given local legal effect according to their terms,
|
||||||
|
reviewing courts shall apply local law that most closely approximates
|
||||||
|
an absolute waiver of all civil liability in connection with the
|
||||||
|
Program, unless a warranty or assumption of liability accompanies a
|
||||||
|
copy of the Program in return for a fee.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
How to Apply These Terms to Your New Programs
|
||||||
|
|
||||||
|
If you develop a new program, and you want it to be of the greatest
|
||||||
|
possible use to the public, the best way to achieve this is to make it
|
||||||
|
free software which everyone can redistribute and change under these terms.
|
||||||
|
|
||||||
|
To do so, attach the following notices to the program. It is safest
|
||||||
|
to attach them to the start of each source file to most effectively
|
||||||
|
state the exclusion of warranty; and each file should have at least
|
||||||
|
the "copyright" line and a pointer to where the full notice is found.
|
||||||
|
|
||||||
|
<one line to give the program's name and a brief idea of what it does.>
|
||||||
|
Copyright (C) <year> <name of author>
|
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation, either version 3 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License
|
||||||
|
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
Also add information on how to contact you by electronic and paper mail.
|
||||||
|
|
||||||
|
If the program does terminal interaction, make it output a short
|
||||||
|
notice like this when it starts in an interactive mode:
|
||||||
|
|
||||||
|
<program> Copyright (C) <year> <name of author>
|
||||||
|
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||||
|
This is free software, and you are welcome to redistribute it
|
||||||
|
under certain conditions; type `show c' for details.
|
||||||
|
|
||||||
|
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||||
|
parts of the General Public License. Of course, your program's commands
|
||||||
|
might be different; for a GUI interface, you would use an "about box".
|
||||||
|
|
||||||
|
You should also get your employer (if you work as a programmer) or school,
|
||||||
|
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||||
|
For more information on this, and how to apply and follow the GNU GPL, see
|
||||||
|
<https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
The GNU General Public License does not permit incorporating your program
|
||||||
|
into proprietary programs. If your program is a subroutine library, you
|
||||||
|
may consider it more useful to permit linking proprietary applications with
|
||||||
|
the library. If this is what you want to do, use the GNU Lesser General
|
||||||
|
Public License instead of this License. But first, please read
|
||||||
|
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
13
MANIFEST.in
Normal file
13
MANIFEST.in
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
graft libLSS
|
||||||
|
graft extra
|
||||||
|
graft src
|
||||||
|
graft build_tools
|
||||||
|
graft scripts
|
||||||
|
graft cmake
|
||||||
|
graft cmake_modules
|
||||||
|
graft docs
|
||||||
|
graft examples
|
||||||
|
graft external
|
||||||
|
include build.sh CHANGES.rst
|
||||||
|
include requirements.txt setup.py License*txt VERSION.txt CMakeLists.txt README.rst
|
||||||
|
prune docs/_build
|
166
README.rst
Normal file
166
README.rst
Normal file
@ -0,0 +1,166 @@
|
|||||||
|
==================================================
|
||||||
|
BORG: Bayesian Origin Reconstruction from Galaxies
|
||||||
|
==================================================
|
||||||
|
|
||||||
|
Copyright(c) 2009-2020 Jens Jasche, 2014-2020 Guilhem Lavaux
|
||||||
|
|
||||||
|
Version 2.1
|
||||||
|
|
||||||
|
Description
|
||||||
|
-----------
|
||||||
|
|
||||||
|
This is the main component of the Bayesian Large Scale Structure inference
|
||||||
|
pipeline.
|
||||||
|
|
||||||
|
A lot of complementary informations are available on the wiki https://www.aquila-consortium.org/.
|
||||||
|
|
||||||
|
Cloning all the modules
|
||||||
|
-----------------------
|
||||||
|
|
||||||
|
The ARES software is only the foundation for other modules that adds many more functionalities to the framework.
|
||||||
|
|
||||||
|
Notably, the Aquila collaboration has developped the BORG extension that encodes advanced forward model and complex likelihoods
|
||||||
|
to run inferences on galaxy clustering, lyman-alpha, and more.
|
||||||
|
|
||||||
|
To get access to the extra modules please contact Aquila consortium members, who will tell you what are the policy in places.
|
||||||
|
Once your account on bitbucket is authorized you may use the script `get-aquila-modules.sh`. The procedure is as follow:
|
||||||
|
|
||||||
|
* first change to the desired branch (i.e. develop/2.1) with `git checkout the_interesting_branch`
|
||||||
|
* clone all the adequate modules `get-aquila-modules.sh --clone`
|
||||||
|
* setup the branches for the modules `get-aquila-modules.sh --branch-set`
|
||||||
|
* Polish up your environment by installing the git hooks `get-aquila-modules.sh --hooks`
|
||||||
|
|
||||||
|
**NOTE** the git hook require the availability `clang-format` to check on the formatting. If it is not present, then it will fail
|
||||||
|
the execution.
|
||||||
|
|
||||||
|
Building
|
||||||
|
--------
|
||||||
|
|
||||||
|
There is a special command line that prepares prepares build system to compile
|
||||||
|
all tools and libraries. It resides in the root directory of the ares source
|
||||||
|
tree and is called "build.sh". By default it will build everything in the
|
||||||
|
"build" subdirectory. To get all the options please run with the option
|
||||||
|
"--help".
|
||||||
|
|
||||||
|
After the tool has bee run, you can move to the build directory and execute
|
||||||
|
"make", which will build everything.
|
||||||
|
|
||||||
|
Please pay attention warnings and error messages. The most important are color marked.
|
||||||
|
Notably some problems may occur if two versions of the same compiler are used for C and C++.
|
||||||
|
To adjust that it is sufficient to explicitly specify the compiler path with the options '--c-compiler'
|
||||||
|
and '--cxx-compiler' of "build.sh".
|
||||||
|
|
||||||
|
*Note*: When modules are present in extra/, you may prevent them from building by putting an empty file called `DO_NOT_BUILD` in the
|
||||||
|
corresponding directory folder of the concerned module. For example, to prevent `borg` from building do `touch extra/borg/DO_NOT_BUILD`
|
||||||
|
from the present directory and the build system will ignore `borg`.
|
||||||
|
|
||||||
|
Compiler compatibilities
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
Tested on GCC 7.0 - 10.2.
|
||||||
|
Some performance regressions were noted with gcc 8.1.
|
||||||
|
Countermeasures have been introduced though some corner cases
|
||||||
|
may still be a bit slower. Clang is unaffected by this regression.
|
||||||
|
|
||||||
|
Note that GCC <= 6 fails because it does not support correctly C++14 features.
|
||||||
|
|
||||||
|
|
||||||
|
Documentation
|
||||||
|
-------------
|
||||||
|
|
||||||
|
Please refer to `docs/README.txt`.
|
||||||
|
|
||||||
|
Modules
|
||||||
|
-------
|
||||||
|
|
||||||
|
The core package supports to have extensions statically linked to the core.
|
||||||
|
They have to be put in extra/ and the cmake scripts will automatically link
|
||||||
|
to it. Check 'extra/demo/' for an example.
|
||||||
|
|
||||||
|
Usage policy
|
||||||
|
------------
|
||||||
|
|
||||||
|
If you are using BORG for your project, please cite the following articles for ARES2, ARES3 and BORG3:
|
||||||
|
|
||||||
|
* Jasche, Kitaura, Wandelt, 2010, MNRAS, 406, 1 (arXiv 0911.2493)
|
||||||
|
* Jasche & Lavaux, 2015, MNRAS, 447, 2 (arXiv 1402.1763)
|
||||||
|
* Lavaux & Jasche, 2016, MNRAS, 455, 3 (arXiv 1509.05040)
|
||||||
|
* Jasche & Lavaux, 2019, A&A, 625, A64 (arXiv 1806.11117)
|
||||||
|
|
||||||
|
However, bear in mind that depending on the features that you are using you may want to cite other papers as well.
|
||||||
|
Here is a non-exhaustive list of those articles:
|
||||||
|
|
||||||
|
* Model development:
|
||||||
|
|
||||||
|
* HADES epoch:
|
||||||
|
|
||||||
|
* HMC, exponential transform, linear bias: Jasche, Kitaura, Wandelt, 2010, 406, 1 (arXiv 0911.2493)
|
||||||
|
* HMC, exponential transform, power law bias:
|
||||||
|
|
||||||
|
* Jasche, Leclercq, Wandelt, 2015
|
||||||
|
* Jasche, Wandelt, 2012, MNRAS, 425, 1042 (arXiv 1106.2757)
|
||||||
|
|
||||||
|
* Foreground/Robustification:
|
||||||
|
|
||||||
|
* Jasche, Lavaux, 2017, A&A (arXiv:1706.08971)
|
||||||
|
* Porqueres, Kodi Ramanah, Jasche, Lavaux, 2019, A&A (arXiv: 1812.05113)
|
||||||
|
|
||||||
|
* Cosmic expansion model:
|
||||||
|
|
||||||
|
* Kodi Ramanah, Lavaux, Jasche, Wandelt, 2019, A&A (arXiv: 1808.07496)
|
||||||
|
|
||||||
|
* Photometric redshifts
|
||||||
|
|
||||||
|
* HADES with Photo-Z: Jasche & Wandelt, 2012, MNRAS, 425, 1042 (arXiv: 1106.2757)
|
||||||
|
|
||||||
|
* Galaxy shear:
|
||||||
|
|
||||||
|
* Porqueres, Heavens, Mortlock & Lavaux, 2021, MNRAS, 502, 3035 (arXiv 2011.07722)
|
||||||
|
* Porqueres, Heavens, Mortlock & Lavaux, 2022, MNRAS, 509, 3194 (arXiv 2108.04825)
|
||||||
|
|
||||||
|
* Cosmic velocity field:
|
||||||
|
|
||||||
|
* Prideaux-Ghee, Leclercq, Lavaux, Heavens, Jasche, 2022, MNRAS (arXiv: 2204.00023)
|
||||||
|
* Boruah, Lavaux, Hudson, 2022, MNRAS (arXiv 2111.15535)
|
||||||
|
|
||||||
|
* BORG-PM
|
||||||
|
|
||||||
|
* Jasche & Lavaux, 2019, A&A, 625, A64 (arXiv 1806.11117)
|
||||||
|
|
||||||
|
* EFT bias model and likelihood
|
||||||
|
|
||||||
|
* Schmidt, Elsner, Jasche, Nguyen, Lavaux, JCAP 01, 042 (2019) (arXiv:1808.02002)
|
||||||
|
* Schmidt, Cabass, Jasche, Lavaux, JCAP 11, 008 (2020) (arXiv:2004.06707)
|
||||||
|
|
||||||
|
|
||||||
|
* Data applications
|
||||||
|
|
||||||
|
* SDSS Main Galaxy sample:
|
||||||
|
* SDSS3 LRG sample:
|
||||||
|
|
||||||
|
* Lavaux, Jasche & Leclercq, 2019, arXiv:1909.06396
|
||||||
|
|
||||||
|
* 2M++ sample:
|
||||||
|
|
||||||
|
* Lavaux & Jasche, 2016, MNRAS, 455, 3 (arXiv 1509.05040)
|
||||||
|
* Jasche & Lavaux, 2019, A&A, 625, A64 (arXiv 1806.11117)
|
||||||
|
|
||||||
|
For a full listing of publications from the Aquila consortium. Please check
|
||||||
|
https://aquila-consortium.org/publications.html
|
||||||
|
|
||||||
|
Acknowledgements
|
||||||
|
----------------
|
||||||
|
|
||||||
|
This work has been funded by the following grants and institutions over the
|
||||||
|
years:
|
||||||
|
|
||||||
|
* the DFG cluster of excellence "Origin and Structure of the Universe"
|
||||||
|
(http://www.universe-cluster.de).
|
||||||
|
* Institut Lagrange de Paris (grant ANR-10-LABX-63, http://ilp.upmc.fr) within
|
||||||
|
the context of the Idex SUPER subsidized by the French government through
|
||||||
|
the Agence Nationale de la Recherche (ANR-11-IDEX-0004-02).
|
||||||
|
* BIG4 (ANR-16-CE23-0002) (https://big4.iap.fr)
|
||||||
|
* The "Programme National de Cosmologie et Galaxies" (PNCG, CNRS/INSU)
|
||||||
|
* Through the grant code ORIGIN, it has received support from
|
||||||
|
the "Domaine d'Interet Majeur (DIM) Astrophysique et Conditions d'Apparitions
|
||||||
|
de la Vie (ACAV)" from Ile-de-France region.
|
1
VERSION.txt
Normal file
1
VERSION.txt
Normal file
@ -0,0 +1 @@
|
|||||||
|
2.1
|
24
bitbucket-pipelines.yml
Normal file
24
bitbucket-pipelines.yml
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
image: glvx/bb_pipeline
|
||||||
|
|
||||||
|
pipelines:
|
||||||
|
custom:
|
||||||
|
standard:
|
||||||
|
- step:
|
||||||
|
name: "Default ARES testing"
|
||||||
|
script:
|
||||||
|
- git submodule init && git submodule update
|
||||||
|
- BOOST_LIB_DIR=/opt/boost1.71/lib bash build.sh --build-dir build-bb --use-system-hdf5 --use-system-boost --use-system-fftw /usr --use-system-gsl
|
||||||
|
- cd build-bb
|
||||||
|
- make
|
||||||
|
- make test
|
||||||
|
full:
|
||||||
|
- step:
|
||||||
|
script:
|
||||||
|
- git submodule init && git submodule update
|
||||||
|
- bash get-aquila-modules.sh --clone
|
||||||
|
- bash get-aquila-modules.sh --branch-set
|
||||||
|
- bash build.sh --download-deps
|
||||||
|
- BOOST_LIB_DIR=/opt/boost1.71/lib bash build.sh --use-system-hdf5 --use-system-boost --use-system-fftw /usr --use-system-gsl
|
||||||
|
- cd build
|
||||||
|
- make
|
||||||
|
- make test
|
480
build.sh
Executable file
480
build.sh
Executable file
@ -0,0 +1,480 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#+
|
||||||
|
# ARES/HADES/BORG Package -- ./build.sh
|
||||||
|
# Copyright (C) 2016-2018 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||||
|
# Copyright (C) 2020 Florent Leclercq <florent.leclercq@polytechnique.org>
|
||||||
|
#
|
||||||
|
# Additional contributions from:
|
||||||
|
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||||
|
#
|
||||||
|
#+
|
||||||
|
|
||||||
|
print_help()
|
||||||
|
{
|
||||||
|
cat <<EOF
|
||||||
|
This is the build helper. The arguments are the following:
|
||||||
|
|
||||||
|
--cmake CMAKE_BINARY instead of searching for cmake in the path,
|
||||||
|
use the indicated binary
|
||||||
|
|
||||||
|
--without-openmp build without openmp support (default with)
|
||||||
|
--with-mpi build with MPI support (default without)
|
||||||
|
--c-compiler COMPILER specify the C compiler to use (default to cc)
|
||||||
|
--cxx-compiler COMPILER specify the CXX compiler to use (default to c++)
|
||||||
|
--julia JULIA_BINARY specify the full path of julia interpreter
|
||||||
|
--build-dir DIRECTORY specify the build directory (default to "build/" )
|
||||||
|
--debug build for full debugging
|
||||||
|
--no-debug-log remove all the debug output to increase speed on parallel
|
||||||
|
filesystem.
|
||||||
|
--perf add timing instructions and report in the log files
|
||||||
|
|
||||||
|
--extra-flags FLAGS extra flags to pass to cmake
|
||||||
|
--download-deps Predownload dependencies
|
||||||
|
--use-predownload Use the predownloaded dependencies. They must be in
|
||||||
|
downloads/
|
||||||
|
--no-predownload Do not use predownloaded dependencies in downloads/
|
||||||
|
--purge Force purging the build directory without asking
|
||||||
|
questions.
|
||||||
|
--native Try to activate all optimizations supported by the
|
||||||
|
running CPU.
|
||||||
|
--python[=PATH] Enable the building of the python extension. If PATH
|
||||||
|
is provided it must point to the executable of your
|
||||||
|
choice for (e.g \`/usr/bin/python3.9\`)
|
||||||
|
--with-julia Build with Julia support (default false)
|
||||||
|
--hades-python Enable hades-python (implies --python)
|
||||||
|
--skip-building-tests Do not build all the tests
|
||||||
|
--install-system-python Install python package in the python system dir
|
||||||
|
--install-user-python Install python package in the user directory [default]
|
||||||
|
|
||||||
|
Advanced usage:
|
||||||
|
|
||||||
|
--eclipse Generate for eclipse use
|
||||||
|
--ninja Use ninja builder
|
||||||
|
--update-tags Update the TAGS file
|
||||||
|
--use-system-boost[=PATH] Use the boost install available from the system. This
|
||||||
|
reduces your footprint but also increases the
|
||||||
|
possibilities of miscompilation and symbol errors.
|
||||||
|
--use-system-fftw[=PATH] Same but for FFTW3. We require the prefix path.
|
||||||
|
--use-system-gsl Same but for GSL
|
||||||
|
--use-system-eigen=PATH Same but for EIGEN. Here we require the prefix path of
|
||||||
|
the installation.
|
||||||
|
--use-system-hdf5[=PATH] Same but for HDF5. Require an HDF5 with C++ support.
|
||||||
|
The path indicate the prefix path of the installation of HDF5
|
||||||
|
(e.g. /usr/local or /usr). By default it will use
|
||||||
|
environment variables to guess it (HDF5_ROOT)
|
||||||
|
|
||||||
|
After the configuration, you can further tweak the configuration using ccmake
|
||||||
|
(if available on your system).
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
add_skip()
|
||||||
|
{
|
||||||
|
if test "x${skip_url}" = x; then
|
||||||
|
skip_url=$1
|
||||||
|
else
|
||||||
|
skip_url="${skip_url}|$1"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
[[ x$ZSH_VERSION == x ]] || setopt local_options BASH_REMATCH
|
||||||
|
C_DEFAULT=$(echo -e "\033[0m")
|
||||||
|
C_WHITE=$(echo -e "\033[1m")
|
||||||
|
C_RED=$(echo -e "\033[91;1m")
|
||||||
|
C_ORANGE=$(echo -e "\033[33m")
|
||||||
|
C_BG_RED=$(echo -e "\033[41m")
|
||||||
|
C_BG_WHITE=$(echo -e "\033[107m")
|
||||||
|
C_BG_GREEN=$(echo -e "\033[42m")
|
||||||
|
|
||||||
|
errormsg() {
|
||||||
|
# explained in
|
||||||
|
# https://stackoverflow.com/questions/44440506/split-string-with-literal-n-in-a-for-loop
|
||||||
|
str=$1
|
||||||
|
while [[ $str ]]; do # iterate as long as we have input
|
||||||
|
if [[ $str = *'\n'* ]]; then # if there's a '\n' sequence later...
|
||||||
|
first=${str%%'\n'*} # put everything before it into 'first'
|
||||||
|
rest=${str#*'\n'} # and put everything after it in 'rest'
|
||||||
|
else # if there's no '\n' later...
|
||||||
|
first=$str # then put the whole rest of the string in 'first'
|
||||||
|
rest='' # and there is no 'rest'
|
||||||
|
fi
|
||||||
|
echo -e "${C_BG_RED}${first}${C_DEFAULT}"
|
||||||
|
str=$rest
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
noticemsg() {
|
||||||
|
str=$1
|
||||||
|
while [[ $str ]]; do # iterate as long as we have input
|
||||||
|
if [[ $str = *'\n'* ]]; then # if there's a '\n' sequence later...
|
||||||
|
first=${str%%'\n'*} # put everything before it into 'first'
|
||||||
|
rest=${str#*'\n'} # and put everything after it in 'rest'
|
||||||
|
else # if there's no '\n' later...
|
||||||
|
first=$str # then put the whole rest of the string in 'first'
|
||||||
|
rest='' # and there is no 'rest'
|
||||||
|
fi
|
||||||
|
echo -e "${C_WHITE}${C_BG_GREEN}${first}${C_DEFAULT}"
|
||||||
|
str=$rest
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
check_command() {
|
||||||
|
cmd="$1"
|
||||||
|
msg="$2"
|
||||||
|
|
||||||
|
if ! command -v "${cmd}" > /dev/null 2>&1; then
|
||||||
|
echo "${cmd} is not available. ${msg}";
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo -e "-- ${C_WHITE}${C_BG_GREEN}Found:${C_DEFAULT} ${C_WHITE}${cmd}${C_DEFAULT}"
|
||||||
|
}
|
||||||
|
|
||||||
|
check_existence() {
|
||||||
|
if test "$1" = "-q"; then
|
||||||
|
quiet=1
|
||||||
|
shift
|
||||||
|
else
|
||||||
|
quiet=0
|
||||||
|
fi
|
||||||
|
file="$1"
|
||||||
|
error_message="$2"
|
||||||
|
if ! test -e "${file}"; then
|
||||||
|
echo "-- ${C_RED}${C_BG_WHITE}Not found:${C_DEFAULT} ${file}"
|
||||||
|
echo "${error_message}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if test $quiet = 0; then
|
||||||
|
echo -e "-- ${C_WHITE}${C_BG_GREEN}Found:${C_DEFAULT} ${file}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
echo "Ensure the current directory is ARES"
|
||||||
|
check_existence -q "src/ares3.cpp" "Please move current working directory to ares3 source directory."
|
||||||
|
check_existence -q "external/cosmotool/CMakeLists.txt" "Submodules were not cloned. Please run 'git submodule update --init --recursive' (WARNING! You might have to start from afresh.)."
|
||||||
|
|
||||||
|
srcdir=$(pwd)
|
||||||
|
build_dir=${srcdir}/build
|
||||||
|
|
||||||
|
build_type=Release
|
||||||
|
cmake=cmake
|
||||||
|
cmake_flags=()
|
||||||
|
c_compiler=$(which cc)
|
||||||
|
cxx_compiler=$(which c++)
|
||||||
|
USE_PREDOWNLOAD=1
|
||||||
|
julia_binary=
|
||||||
|
do_purge=0
|
||||||
|
cmake_generator=
|
||||||
|
|
||||||
|
while test $# -gt 0; do
|
||||||
|
key="$1"
|
||||||
|
case $key in
|
||||||
|
--cmake)
|
||||||
|
cmake="$2"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--extra-flags)
|
||||||
|
cmake_flags+=($2)
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--without-openmp)
|
||||||
|
cmake_flags+=(-DENABLE_OPENMP:BOOL=OFF)
|
||||||
|
;;
|
||||||
|
--with-mpi)
|
||||||
|
cmake_flags+=(-DENABLE_MPI:BOOL=ON)
|
||||||
|
;;
|
||||||
|
--c-compiler)
|
||||||
|
c_compiler=$(which $2)
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--cxx-compiler)
|
||||||
|
cxx_compiler=$(which $2)
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--julia)
|
||||||
|
julia_binary="$2"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--build-dir)
|
||||||
|
build_dir="$2"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--debug)
|
||||||
|
build_type="Debug"
|
||||||
|
;;
|
||||||
|
--no-debug-log)
|
||||||
|
cmake_flags+=(-DDISABLE_DEBUG_OUTPUT:BOOL=ON)
|
||||||
|
;;
|
||||||
|
--eclipse)
|
||||||
|
cmake_generator=eclipse
|
||||||
|
;;
|
||||||
|
--native)
|
||||||
|
cmake_flags+=(-DUSE_NATIVE_ARCH:BOOL=ON)
|
||||||
|
;;
|
||||||
|
--perf)
|
||||||
|
cmake_flags+=(-DCONTEXT_TIMER:BOOL=ON)
|
||||||
|
;;
|
||||||
|
--with-julia)
|
||||||
|
cmake_flags+=(-DBUILD_JULIA:BOOL=ON)
|
||||||
|
;;
|
||||||
|
--install-user-python)
|
||||||
|
cmake_flags+=(-DINSTALL_PYTHON_LOCAL=ON)
|
||||||
|
;;
|
||||||
|
--install-system-python)
|
||||||
|
cmake_flags+=(-DINSTALL_PYTHON_LOCAL=OFF)
|
||||||
|
;;
|
||||||
|
--python|--python=*)
|
||||||
|
if [[ $1 =~ ^--python=(.+)$ ]]; then
|
||||||
|
PYTHON_PATH=${BASH_REMATCH[1]}
|
||||||
|
cmake_flags+=(-DPYTHON_EXECUTABLE=${PYTHON_PATH})
|
||||||
|
fi
|
||||||
|
cmake_flags+=(-DBUILD_PYTHON_EXTENSION:BOOL=ON)
|
||||||
|
;;
|
||||||
|
--hades-python)
|
||||||
|
cmake_flags+=(-DBUILD_PYTHON_EXTENSION:BOOL=ON -DBUILD_PYTHON_EMBEDDER:BOOL=ON)
|
||||||
|
;;
|
||||||
|
--skip-building-tests)
|
||||||
|
cmake_flags+=(-DBUILD_TESTING:BOOL=OFF)
|
||||||
|
;;
|
||||||
|
--ninja)
|
||||||
|
cmake_generator=ninja
|
||||||
|
;;
|
||||||
|
--no-predownload)
|
||||||
|
USE_PREDOWNLOAD=0
|
||||||
|
;;
|
||||||
|
--use-predownload)
|
||||||
|
USE_PREDOWNLOAD=1
|
||||||
|
;;
|
||||||
|
--download-deps)
|
||||||
|
|
||||||
|
#This step requires wget.
|
||||||
|
if ! command -v wget > /dev/null 2>&1; then
|
||||||
|
echo "The command wget is required to pre-download the dependencies. Please install it before retrying. Also it must be"
|
||||||
|
echo "available from the PATH"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
lf=$'\n'
|
||||||
|
grep -E "SET\\([a-zA-Z0-9_]+_URL" ${srcdir}/external/external_build.cmake |grep -e 'ftp://' | sed -e "s%^.*(\([a-zA-Z0-9_]*\)_URL[ ]*\"\(ftp.*\)\"[ ]*CACHE.*$%\1_URL \\$lf\2%g" > pre_list
|
||||||
|
grep -E "SET\\([a-zA-Z0-9_]+_URL" ${srcdir}/external/external_build.cmake | grep -E 'https?://' |sed -e "s%^.*(\([a-zA-Z0-9_]*\)_URL[ ]*\"\(http.*\)\"[ ]*CACHE.*$%\1_URL \\$lf\2%g" >> pre_list
|
||||||
|
|
||||||
|
test -e ${srcdir}/downloads || mkdir ${srcdir}/downloads;
|
||||||
|
( \
|
||||||
|
cd ${srcdir}/downloads; \
|
||||||
|
rm -f deps.txt; \
|
||||||
|
echo $dlist
|
||||||
|
while read url_name; do \
|
||||||
|
read d; \
|
||||||
|
prename=$(echo $url_name | sed -e 's%^\([a-zA-Z0-9]\+\)_URL%\L\1%g') ; \
|
||||||
|
d_tmp=$(echo $d | cut -d/ -f2-); \
|
||||||
|
if [[ $d_tmp =~ /.*/([^/]*(tar\.|zip)[^/]*).* ]]; then \
|
||||||
|
out_d=${BASH_REMATCH[1]}; \
|
||||||
|
else \
|
||||||
|
echo "Error matching $d"; \
|
||||||
|
exit 1; \
|
||||||
|
fi; \
|
||||||
|
out_d=${prename}_$out_d; \
|
||||||
|
echo "Downloading $d for ${url_name} to ${out_d}"; \
|
||||||
|
if ! test -e ${out_d}; then
|
||||||
|
wget --no-check-certificate --quiet -O $out_d $d || (echo "${C_RED}Failure to download $d to $out_d${C_DEFAULT}"; exit 1) || exit 1; \
|
||||||
|
else
|
||||||
|
echo "=> Already downloaded ${out_d}"; \
|
||||||
|
fi; \
|
||||||
|
echo ${url_name} >> deps.txt; \
|
||||||
|
echo ${out_d} >> deps.txt; \
|
||||||
|
done \
|
||||||
|
) < pre_list || echo "${C_RED}Error.${C_DEFAULT} "
|
||||||
|
rm -f pre_list
|
||||||
|
echo "Done. You can now upload the ${srcdir}/downloads/ directory to the remote computer in the source directory and use --use-predownload."
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
-h|--h|--he|--hel|--help)
|
||||||
|
print_help
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
--use-system-fftw|--use-system-fftw=*)
|
||||||
|
if [[ $1 =~ ^--use-system-fftw=(.+)$ ]]; then
|
||||||
|
FFTW_PATH=${BASH_REMATCH[1]}
|
||||||
|
else
|
||||||
|
if [[ $FFTW_INC =~ ^(.+)/include$ ]]; then
|
||||||
|
FFTW_PATH=${BASH_REMATCH[1]}
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
cmake_flags+=(-DINTERNAL_FFTW:BOOL=OFF)
|
||||||
|
if [ "x$FFTW_PATH" != x ]; then
|
||||||
|
CMAKE_PREFIX_PATH="${FFTW_PATH};${CMAKE_PREFIX_PATH}"
|
||||||
|
fi
|
||||||
|
add_skip FFTW_URL
|
||||||
|
;;
|
||||||
|
--use-system-hdf5|--use-system-hdf5=*)
|
||||||
|
if [[ $1 =~ ^--use-system-hdf5=(.+)$ ]]; then
|
||||||
|
HDF5_ROOT=${BASH_REMATCH[1]}
|
||||||
|
cmake_flags+=(-DINTERNAL_HDF5:BOOL=OFF "-DHDF5_ROOT=${HDF5_ROOT}")
|
||||||
|
else
|
||||||
|
cmake_flags+=(-DINTERNAL_HDF5:BOOL=OFF)
|
||||||
|
fi
|
||||||
|
add_skip HDF5_URL
|
||||||
|
;;
|
||||||
|
--use-system-boost|--use-system-boost=*)
|
||||||
|
cmake_flags+=(-DINTERNAL_BOOST:BOOL=OFF)
|
||||||
|
if [[ $1 =~ ^--use-system-boost=(.+)$ ]]; then
|
||||||
|
boost_root=${BASH_REMATCH[1]}
|
||||||
|
cmake_flags+=("-DBOOST_ROOT=${boost_root}")
|
||||||
|
fi
|
||||||
|
add_skip BOOST_URL
|
||||||
|
;;
|
||||||
|
--use-system-eigen|--use-system-eigen=*)
|
||||||
|
cmake_flags+=(-DINTERNAL_EIGEN:BOOL=OFF)
|
||||||
|
if [[ $1 =~ ^--use-system-eigen=(.+)$ ]]; then
|
||||||
|
EIGEN_PATH=${BASH_REMATCH[1]}
|
||||||
|
cmake_flags+=(-DEIGEN_PATH:PATH=${EIGEN_PATH})
|
||||||
|
fi
|
||||||
|
add_skip EIGEN_URL
|
||||||
|
;;
|
||||||
|
--use-system-gsl)
|
||||||
|
cmake_flags+=(-DINTERNAL_GSL:BOOL=OFF)
|
||||||
|
if ! command -v gsl-config > /dev/null 2>&1; then
|
||||||
|
errormsg "Missing 'gsl-config' in the execution path.\n I cannot detect location of GSL"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
CMAKE_PREFIX_PATH="$(gsl-config --prefix);${CMAKE_PREFIX_PATH}"
|
||||||
|
add_skip GSL_URL
|
||||||
|
;;
|
||||||
|
--purge)
|
||||||
|
do_purge=1
|
||||||
|
;;
|
||||||
|
--update-tags)
|
||||||
|
echo "Updating tags file."
|
||||||
|
rm -f ctags
|
||||||
|
for module in . extra/hades extra/borg extra/virbius extra/hmclet extra/dm_sheet; do
|
||||||
|
if test -e ${module}; then
|
||||||
|
(cd ${module}; git ls-files '*.[ch]pp' | awk "{ print \"${module}/\" \$0;}") | xargs ctags -a
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Done. Exiting."
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unknown option. Abort."
|
||||||
|
print_help
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
shift
|
||||||
|
done
|
||||||
|
|
||||||
|
|
||||||
|
if test ${USE_PREDOWNLOAD} = 1; then
|
||||||
|
if ! test -d "${srcdir}/downloads"; then
|
||||||
|
echo "--- ${C_RED}No deps predownloaded. Stop${C_DEFAULT} ---"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
cmd=$( (
|
||||||
|
flags=()
|
||||||
|
while read url_name; do
|
||||||
|
if [[ "${url_name}" =~ ^(${skip_url})$ ]]; then
|
||||||
|
read path;
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
read path;
|
||||||
|
path="${srcdir}/downloads/${path}";
|
||||||
|
flags+=("-D${url_name}:URL=file://${path}");
|
||||||
|
done;
|
||||||
|
echo "cmake_flags+=(${flags[@]})"
|
||||||
|
) < ${srcdir}/downloads/deps.txt )
|
||||||
|
eval ${cmd}
|
||||||
|
else
|
||||||
|
echo "--- ${C_ORANGE}WARNING: Not using predownloaded deps.${C_DEFAULT} --- "
|
||||||
|
fi
|
||||||
|
export CMAKE_PREFIX_PATH
|
||||||
|
#CMAKE_PREFIX_PATH=$(printf %q "${CMAKE_PREFIX_PATH}")
|
||||||
|
|
||||||
|
cmake_flags+=(-DARES_PREFIX_PATH=${CMAKE_PREFIX_PATH} -DCMAKE_BUILD_TYPE=${build_type} -DCMAKE_C_COMPILER=${c_compiler} -DCMAKE_CXX_COMPILER=${cxx_compiler})
|
||||||
|
if test x"${julia_binary}" != x""; then
|
||||||
|
cmake_flags+=(-DJULIA_EXECUTABLE=${julia_binary})
|
||||||
|
fi
|
||||||
|
|
||||||
|
if test x$cmake_generator = "xninja"; then
|
||||||
|
cmake_flags+=("-GNinja")
|
||||||
|
elif test x$cmake_generator = "xeclipse"; then
|
||||||
|
cmake_flags+=("-GEclipse CDT4 - Unix Makefiles")
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Summary of CMAKE_FLAGS:"
|
||||||
|
for f in "${cmake_flags[@]}"; do
|
||||||
|
printf " %s\n" "$f"
|
||||||
|
done
|
||||||
|
|
||||||
|
if test -e ${build_dir}; then
|
||||||
|
if test x${do_purge} == x1; then
|
||||||
|
rm -f -r ${build_dir}
|
||||||
|
else
|
||||||
|
while true; do
|
||||||
|
echo -n "${build_dir} already exists. Remove ? [y/n] "
|
||||||
|
read RESULT
|
||||||
|
if test "x${RESULT}" = "xn"; then
|
||||||
|
echo "Abort"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if test "x${RESULT}" = "xy"; then
|
||||||
|
echo "Removing"
|
||||||
|
rm -f -r ${build_dir}
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
check_command "${cmake}" "Please install CMake or provide --cmake to build.sh"
|
||||||
|
#check_command autoconf "Autoconf is missing. Please install it."
|
||||||
|
#check_command automake "Automake is missing. Please install it."
|
||||||
|
check_command patch "Patch is missing. Please install it."
|
||||||
|
check_command pkg-config "Pkgconfig is missing. Please install it."
|
||||||
|
|
||||||
|
|
||||||
|
if ! mkdir -p ${build_dir}; then
|
||||||
|
echo -e "${C_WHITE}--------------------------${C_DEFAULT}"
|
||||||
|
echo -e "${C_BG_RED}Cannot create build directory.${C_DEFAULT}"
|
||||||
|
echo -e "${C_WHITE}--------------------------${C_DEFAULT}"
|
||||||
|
echo
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! ( \
|
||||||
|
cd ${build_dir} && \
|
||||||
|
${cmake} "${cmake_flags[@]}" ${srcdir}; \
|
||||||
|
exit $? \
|
||||||
|
); then
|
||||||
|
echo -e "${C_WHITE}-------------------------------------------------${C_DEFAULT}"
|
||||||
|
echo -e "${C_BG_RED}An error occured in CMake.${C_DEFAULT}"
|
||||||
|
echo -e "${C_BG_RED}Please collect the messages above in your report.${C_DEFAULT}"
|
||||||
|
echo -e "${C_WHITE}-------------------------------------------------${C_DEFAULT}"
|
||||||
|
echo
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
cat <<EOF
|
||||||
|
------------------------------------------------------------------
|
||||||
|
|
||||||
|
${C_BG_GREEN}Configuration done.${C_DEFAULT}
|
||||||
|
Move to ${build_dir} and type 'make' now.
|
||||||
|
Please check the configuration of your MPI C compiler. You may need
|
||||||
|
to set an environment variable to use the proper compiler.
|
||||||
|
|
||||||
|
Some example (for SH/BASH shells):
|
||||||
|
- OpenMPI:
|
||||||
|
OMPI_CC=${c_compiler}
|
||||||
|
OMPI_CXX=${cxx_compiler}
|
||||||
|
export OMPI_CC OMPI_CXX
|
||||||
|
|
||||||
|
------------------------------------------------------------------
|
||||||
|
|
||||||
|
EOF
|
||||||
|
# ARES TAG: authors_num = 2
|
||||||
|
# ARES TAG: name(0) = Guilhem Lavaux
|
||||||
|
# ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
||||||
|
# ARES TAG: year(0) = 2016-2018
|
||||||
|
# ARES TAG: name(1) = Florent Leclercq
|
||||||
|
# ARES TAG: email(1) = florent.leclercq@polytechnique.org
|
||||||
|
# ARES TAG: year(1) = 2020
|
7
build_tools/capture_stats.sh
Normal file
7
build_tools/capture_stats.sh
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
git log|grep Author| awk -F: '{print $2;}' | sed 's%\([ ]*\)\(.*\)<.*>\([ ]*\)%\2%g' |sort|uniq | \
|
||||||
|
(
|
||||||
|
while read; do
|
||||||
|
n=$(git log | grep Author | grep "$REPLY" | wc | awk '{print $1;}' )
|
||||||
|
echo "'$REPLY' $n"
|
||||||
|
done
|
||||||
|
)
|
243
build_tools/gather_sources.py
Normal file
243
build_tools/gather_sources.py
Normal file
@ -0,0 +1,243 @@
|
|||||||
|
#+
|
||||||
|
# ARES/HADES/BORG Package -- ./build_tools/gather_sources.py
|
||||||
|
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||||
|
#
|
||||||
|
# Additional contributions from:
|
||||||
|
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||||
|
#
|
||||||
|
#+
|
||||||
|
# ARES TAG: authors_num = 1
|
||||||
|
# ARES TAG: name(0) = Guilhem Lavaux
|
||||||
|
# ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
||||||
|
# ARES TAG: year(0) = 2014-2020
|
||||||
|
import sys
|
||||||
|
import shutil
|
||||||
|
import tempfile
|
||||||
|
import re
|
||||||
|
import time
|
||||||
|
from git import Repo,Tree,Blob,Submodule
|
||||||
|
|
||||||
|
distribute_text=""
|
||||||
|
|
||||||
|
def build_slice(start, end):
|
||||||
|
if end == start:
|
||||||
|
return str(start)
|
||||||
|
else:
|
||||||
|
return str(start)+"-"+str(end)
|
||||||
|
|
||||||
|
def line_injection(tag, text, func):
|
||||||
|
output = []
|
||||||
|
for line in text.split('\n'):
|
||||||
|
if line.find(tag) >= 0:
|
||||||
|
line = func(line)
|
||||||
|
output.append(line)
|
||||||
|
return "\n".join(output)
|
||||||
|
|
||||||
|
def build_years(years):
|
||||||
|
year_list = []
|
||||||
|
start = prev_y = years[0]
|
||||||
|
for y in years[1:]:
|
||||||
|
if y != prev_y+1:
|
||||||
|
year_list.append(build_slice(start, prev_y))
|
||||||
|
start = y
|
||||||
|
prev_y = y
|
||||||
|
year_list.append(build_slice(start, prev_y))
|
||||||
|
|
||||||
|
return ", ".join(year_list) #[str(y) for y in years])
|
||||||
|
|
||||||
|
class BadFileData(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def checked_author(data, i):
|
||||||
|
defaults = {
|
||||||
|
'name(0)': 'Guilhem Lavaux',
|
||||||
|
'email(0)': 'guilhem.lavaux@iap.fr',
|
||||||
|
'year(0)': '2014-2020',
|
||||||
|
'name(1)': 'Jens Jasche',
|
||||||
|
'email(1)': 'jens.jasche@fysik.su.se',
|
||||||
|
'year(1)': '2009-2020'
|
||||||
|
}
|
||||||
|
codes = ['name(%d)' % i, 'email(%d)' % i, 'year(%d)' % i]
|
||||||
|
if any(not s in data for s in codes):
|
||||||
|
if i <= 1:
|
||||||
|
print(" \033[1mWARNING: Using default author data. Please fix file.\033[0m")
|
||||||
|
else:
|
||||||
|
print(" \033[41mERROR: Need more author data. Please fix file.\033[0m")
|
||||||
|
raise BadFileData()
|
||||||
|
|
||||||
|
defaults.update(data)
|
||||||
|
return tuple(map(lambda k: defaults[k], codes))
|
||||||
|
|
||||||
|
def main_author_handler(line, data, fname):
|
||||||
|
num = int(data.get('authors_num', 2))
|
||||||
|
output = []
|
||||||
|
for i in range(num):
|
||||||
|
name, email, year = checked_author(data, i)
|
||||||
|
this_line = re.sub('@MAIN_NAME@', name, line)
|
||||||
|
this_line = re.sub('@MAIN_EMAIL@', email, this_line)
|
||||||
|
output.append( re.sub('@MAIN_YEAR@', year, this_line) )
|
||||||
|
return "\n".join(output)
|
||||||
|
|
||||||
|
discard_set = set(['Temp'])
|
||||||
|
|
||||||
|
def apply_license(license, relimit, filename, authors):
|
||||||
|
header = re.sub(r'@FILENAME@', filename, license)
|
||||||
|
# Look for the AUTHORS tag in the license template, it has to be support both type of comment.
|
||||||
|
m = re.search(
|
||||||
|
r'^([@#/<>()\w\-*+ \t\n:.]+)\n([#/()\w\-* \t]*)@AUTHORS@([#/()\w\-* \t]*)\n([@#/()\w\-*+ \n:.;,<>]+)$',
|
||||||
|
header, flags=re.MULTILINE)
|
||||||
|
init_header,pre_author,post_author,final_header = m.group(1,2,3,4)
|
||||||
|
header = init_header + '\n'
|
||||||
|
author_list = list( authors.keys())
|
||||||
|
author_list.sort()
|
||||||
|
for author in author_list:
|
||||||
|
if author in discard_set:
|
||||||
|
continue
|
||||||
|
a_data = authors[author]
|
||||||
|
email = a_data['email']
|
||||||
|
years = a_data['years']
|
||||||
|
years = build_years(years)
|
||||||
|
header += pre_author + ("%(name)s <%(email)s> (%(years)s)" % dict(name=author,email=email, years=years)) + post_author + '\n'
|
||||||
|
header += final_header
|
||||||
|
|
||||||
|
m = re.search(
|
||||||
|
r'^([@#/<>(),\w\-*+ \t\n:.]+)\n([#/()\w\-* \t]*)@DISTRIBUTE@([#/()\w\-* \t]*)\n([@#/()\w\-*+ \n:.;,<>]+)$',
|
||||||
|
header, flags=re.MULTILINE)
|
||||||
|
if m is None:
|
||||||
|
print("We reached an invalid state.")
|
||||||
|
print(f"Header is:\n{header}")
|
||||||
|
sys.exit(1)
|
||||||
|
init_header,pre_distribute,post_distribute,final_header = m.group(1,2,3,4)
|
||||||
|
header = f"{init_header}\n"
|
||||||
|
for distribute_line in distribute_text.split('\n'):
|
||||||
|
header += f"{pre_distribute}{distribute_line}{post_distribute}\n"
|
||||||
|
header += final_header
|
||||||
|
|
||||||
|
with open(filename, mode="rt", encoding="UTF-8") as f:
|
||||||
|
lines = f.read()
|
||||||
|
|
||||||
|
# Now look for the tag section
|
||||||
|
specials = {}
|
||||||
|
for a in re.finditer(r"(#|//) ARES TAG:[ \t]*(?P<tag>[\w()]+)[ \t]*=[ \t]*(?P<value>[\w\t \-_\.@]*)", lines):
|
||||||
|
b = a.groupdict()
|
||||||
|
specials[b['tag']] = b['value']
|
||||||
|
|
||||||
|
header = line_injection('@MAIN_NAME@', header, lambda l: main_author_handler(l, specials, filename))
|
||||||
|
|
||||||
|
lines = re.sub(relimit, lambda x: (("" if x.group(1) is None else x.group(1)) + header), lines)
|
||||||
|
|
||||||
|
with tempfile.NamedTemporaryFile(delete=False,encoding="UTF-8",mode="wt") as tmp_sources:
|
||||||
|
tmp_sources.write(lines)
|
||||||
|
|
||||||
|
shutil.move(tmp_sources.name, filename)
|
||||||
|
|
||||||
|
def apply_python_license(filename, authors):
|
||||||
|
license="""#+
|
||||||
|
# ARES/HADES/BORG Package -- @FILENAME@
|
||||||
|
# Copyright (C) @MAIN_YEAR@ @MAIN_NAME@ <@MAIN_EMAIL@>
|
||||||
|
#
|
||||||
|
# Additional contributions from:
|
||||||
|
# @AUTHORS@
|
||||||
|
# @DISTRIBUTE@
|
||||||
|
#+
|
||||||
|
"""
|
||||||
|
|
||||||
|
print("Shell/Python/Julia file: %s" % filename)
|
||||||
|
relimit=r'^(#!.*\n)?#\+\n(#.*\n)*#\+\n'
|
||||||
|
apply_license(license, relimit, filename, authors)
|
||||||
|
|
||||||
|
|
||||||
|
def apply_cpp_license(filename, authors):
|
||||||
|
license="""/*+
|
||||||
|
ARES/HADES/BORG Package -- @FILENAME@
|
||||||
|
Copyright (C) @MAIN_YEAR@ @MAIN_NAME@ <@MAIN_EMAIL@>
|
||||||
|
|
||||||
|
Additional contributions from:
|
||||||
|
@AUTHORS@
|
||||||
|
@DISTRIBUTE@
|
||||||
|
+*/
|
||||||
|
"""
|
||||||
|
relimit = r'(?s)^()/\*\+.*\+\*/\n'
|
||||||
|
print("C++ file: %s" % filename)
|
||||||
|
apply_license(license, relimit, filename, authors)
|
||||||
|
|
||||||
|
|
||||||
|
def patch_author_list(authors):
|
||||||
|
patcher={
|
||||||
|
'Guilhem Lavaux':'guilhem.lavaux@iap.fr',
|
||||||
|
'Jens Jasche':'j.jasche@tum.de'}
|
||||||
|
for a in patcher.keys():
|
||||||
|
if a in authors:
|
||||||
|
data = authors[a]
|
||||||
|
data['email'] = patcher[a]
|
||||||
|
|
||||||
|
author_merge(authors, 'MinhMPA', 'Minh Nguyen')
|
||||||
|
author_merge(authors, 'Minh MPA', 'Minh Nguyen')
|
||||||
|
author_merge(authors, 'flo', 'Florian Führer')
|
||||||
|
author_merge(authors, 'LAVAUX Guilhem', 'Guilhem Lavaux')
|
||||||
|
|
||||||
|
def author_merge(authors, a_from, a_to):
|
||||||
|
if a_from in authors:
|
||||||
|
data1 = authors[a_from]
|
||||||
|
del authors[a_from]
|
||||||
|
if a_to in authors:
|
||||||
|
data2 = authors[a_to]
|
||||||
|
s = set(data2['years'])
|
||||||
|
for y in data1['years']:
|
||||||
|
s.add(y)
|
||||||
|
s = list(s)
|
||||||
|
s.sort()
|
||||||
|
data2['years'] = s
|
||||||
|
else:
|
||||||
|
authors[a_to] = data1
|
||||||
|
|
||||||
|
def check_authors(repo, fname):
|
||||||
|
authors={}
|
||||||
|
author_names={}
|
||||||
|
for c,_ in repo.blame('HEAD',fname,w=True,M=True):
|
||||||
|
if not c.author.name in authors:
|
||||||
|
authors[c.author.name] = set()
|
||||||
|
author_names[c.author.name] = c.author.email
|
||||||
|
authors[c.author.name].add(time.gmtime(c.authored_date)[0])
|
||||||
|
|
||||||
|
for k in authors.keys():
|
||||||
|
authors[k] = list(authors[k])
|
||||||
|
authors[k].sort()
|
||||||
|
|
||||||
|
authors = {k:dict(email=author_names[k],years=authors[k]) for k in authors.keys()}
|
||||||
|
patch_author_list(authors)
|
||||||
|
return authors
|
||||||
|
|
||||||
|
def manage_file(repo, fname):
|
||||||
|
authors = check_authors(repo, fname)
|
||||||
|
if re.match(".*\.(sh|py|pyx|jl)$",fname) != None:
|
||||||
|
apply_python_license(fname, authors)
|
||||||
|
if re.match('.*\.(tcc|cpp|hpp|h)$', fname) != None:
|
||||||
|
apply_cpp_license(fname, authors)
|
||||||
|
|
||||||
|
def analyze_tree(repo, prefix, t):
|
||||||
|
for entry in t:
|
||||||
|
if type(entry) == Submodule:
|
||||||
|
# analyze_tree(prefix + "/" + entry.path, Repo(entry.path).tree())
|
||||||
|
#entry.module())
|
||||||
|
print("Seeing a submodule at path " + entry.path)
|
||||||
|
continue
|
||||||
|
ename = entry.name
|
||||||
|
if ename == 'external' or ename == 'cmake_modules':
|
||||||
|
continue
|
||||||
|
if type(entry) == Tree:
|
||||||
|
analyze_tree(repo, prefix + "/" + ename, entry)
|
||||||
|
elif type(entry) == Blob:
|
||||||
|
fname=prefix+"/"+ename
|
||||||
|
manage_file(repo, fname)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__=="__main__":
|
||||||
|
repo = Repo(".")
|
||||||
|
assert repo.bare == False
|
||||||
|
if len(sys.argv) > 1:
|
||||||
|
for f in sys.argv[1:]:
|
||||||
|
manage_file(repo, f)
|
||||||
|
else:
|
||||||
|
t = repo.tree()
|
||||||
|
analyze_tree(repo, ".", t)
|
20
build_tools/gen_code_in_header.py
Normal file
20
build_tools/gen_code_in_header.py
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
#+
|
||||||
|
# ARES/HADES/BORG Package -- ./build_tools/gen_code_in_header.py
|
||||||
|
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||||
|
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||||
|
#
|
||||||
|
# Additional contributions from:
|
||||||
|
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||||
|
#
|
||||||
|
#+
|
||||||
|
import sys
|
||||||
|
import re
|
||||||
|
|
||||||
|
code=""
|
||||||
|
with open(sys.argv[1], mode="r") as f_in, open(sys.argv[2], mode="w") as f_out:
|
||||||
|
for line in f_in:
|
||||||
|
line = re.sub(r'\\', r'\\\\', line)
|
||||||
|
line = re.sub(r'"', r'\"', line)
|
||||||
|
line = line[:-1]
|
||||||
|
code += line + "\\n"
|
||||||
|
f_out.write('"%s"' % (code,))
|
21
build_tools/gen_splash.py
Normal file
21
build_tools/gen_splash.py
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
#+
|
||||||
|
# ARES/HADES/BORG Package -- ./build_tools/gen_splash.py
|
||||||
|
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
|
||||||
|
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
|
||||||
|
#
|
||||||
|
# Additional contributions from:
|
||||||
|
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
|
||||||
|
#
|
||||||
|
#+
|
||||||
|
import sys
|
||||||
|
import re
|
||||||
|
|
||||||
|
prev_line=None
|
||||||
|
with open(sys.argv[1], mode="r") as f_in, open(sys.argv[2], mode="w") as f_out:
|
||||||
|
for line in f_in:
|
||||||
|
if prev_line is not None:
|
||||||
|
f_out.write('"' + prev_line + '",\n')
|
||||||
|
line = re.sub(r'\\', r'\\\\', line)
|
||||||
|
line = re.sub(r'"', r'\"', line)
|
||||||
|
prev_line = line[:-1]
|
||||||
|
f_out.write('"' + prev_line + '"\n')
|
20
build_tools/python-builder/Dockerfile
Normal file
20
build_tools/python-builder/Dockerfile
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
#FROM quay.io/pypa/manylinux2014_x86_64
|
||||||
|
FROM quay.io/pypa/manylinux2010_x86_64
|
||||||
|
ARG CMAKE=3.17.3
|
||||||
|
|
||||||
|
RUN yum install -y git && \
|
||||||
|
( \
|
||||||
|
git clone git://github.com/ninja-build/ninja.git && \
|
||||||
|
cd ninja && \
|
||||||
|
/opt/python/cp36-cp36m/bin/python3 ./configure.py --bootstrap && \
|
||||||
|
./ninja && cp ninja /usr/bin \
|
||||||
|
) && rm -fr ninja && yum clean all
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
yum install -y wget openssl-devel && \
|
||||||
|
wget https://github.com/Kitware/CMake/releases/download/v${CMAKE}/cmake-${CMAKE}.tar.gz && \
|
||||||
|
tar zxvf cmake-${CMAKE}.tar.gz && \
|
||||||
|
( \
|
||||||
|
cd cmake-${CMAKE} && ./bootstrap && make -j4 && make install \
|
||||||
|
) && rm -fr cmake-${CMAKE}* && \
|
||||||
|
yum clean all
|
32
build_tools/python-builder/build-wheels.sh
Normal file
32
build_tools/python-builder/build-wheels.sh
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -e -x
|
||||||
|
|
||||||
|
# Install a system package required by our library
|
||||||
|
#yum install -y atlas-devel
|
||||||
|
yum install -y zlib-devel
|
||||||
|
|
||||||
|
ln -fs /usr/local/bin/cmake /usr/bin/cmake
|
||||||
|
|
||||||
|
|
||||||
|
ALL_PYTHON="cp36-cp36m" # cp37-cp37m cp38-cp38"
|
||||||
|
|
||||||
|
# Compile wheels
|
||||||
|
for pkg in $ALL_PYTHON; do
|
||||||
|
PYBIN=/opt/python/${pkg}/bin
|
||||||
|
# "${PYBIN}/pip" install -r /io/dev-requirements.txt
|
||||||
|
"${PYBIN}/pip" install -r /io/requirements.txt
|
||||||
|
"${PYBIN}/pip" wheel -vvv /io/ -w wheelhouse/
|
||||||
|
done
|
||||||
|
|
||||||
|
rm -f wheelhouse/numpy*
|
||||||
|
|
||||||
|
# Bundle external shared libraries into the wheels
|
||||||
|
for whl in wheelhouse/*.whl; do
|
||||||
|
auditwheel repair "$whl" --plat $PLAT -w /io/wheelhouse/
|
||||||
|
done
|
||||||
|
|
||||||
|
# Install packages and test
|
||||||
|
for pkg in $ALL_PYTHON; do
|
||||||
|
PYBIN=/opt/python/${pkg}/bin
|
||||||
|
"${PYBIN}/pip" install pyborg --no-index -f /io/wheelhouse
|
||||||
|
done
|
13
build_tools/python-builder/start.sh
Normal file
13
build_tools/python-builder/start.sh
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
d=$(pwd)
|
||||||
|
if test x"$(basename $d)" = xbuilder; then
|
||||||
|
d=${d}/../
|
||||||
|
fi
|
||||||
|
if ! [ -e ${d}/setup.py ] ; then
|
||||||
|
echo "Unknown directory. Please move to the root of cosmotool source tree."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
#podman run -ti --rm -e PLAT=manylinux2014_x86_64 -v ${d}:/io:Z pip-builder /io/builder/build-wheels.sh
|
||||||
|
podman run -ti --rm -e PLAT=manylinux2010_x86_64 -v ${d}:/io:Z pip-builder /io/builder/build-wheels.sh
|
112
cmake/FindFFTW.cmake
Normal file
112
cmake/FindFFTW.cmake
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
# - Find FFTW
|
||||||
|
# Find the native FFTW includes and library
|
||||||
|
# This module defines
|
||||||
|
# FFTW_INCLUDE_DIR, where to find fftw3.h, etc.
|
||||||
|
# FFTW_LIBRARIES, the libraries needed to use FFTW.
|
||||||
|
# FFTW_FOUND, If false, do not try to use FFTW.
|
||||||
|
# also defined, but not for general use are
|
||||||
|
# FFTW_LIBRARY, where to find the FFTW library.
|
||||||
|
|
||||||
|
FIND_PATH(FFTW_INCLUDE_DIR fftw3.h)
|
||||||
|
|
||||||
|
SET(FFTW_NAMES ${FFTW_NAMES} fftw3 fftw3 fftw3-3)
|
||||||
|
FIND_LIBRARY(FFTW_LIBRARY
|
||||||
|
NAMES ${FFTW_NAMES}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Find threads part of FFTW
|
||||||
|
|
||||||
|
SET(FFTW_THREADS_NAMES ${FFTW_THREADS_NAMES} fftw3_threads fftw3-3_threads)
|
||||||
|
FIND_LIBRARY(FFTW_THREADS_LIBRARY
|
||||||
|
NAMES ${FFTW_THREADS_NAMES}
|
||||||
|
)
|
||||||
|
|
||||||
|
SET(FFTW_OMP_NAMES ${FFTW_OMP_NAMES} fftw3_omp fftw3-3_omp)
|
||||||
|
FIND_LIBRARY(FFTW_OMP_LIBRARY
|
||||||
|
NAMES ${FFTW_OMP_NAMES}
|
||||||
|
)
|
||||||
|
|
||||||
|
SET(FFTW_MPI_NAMES ${FFTW_MPI_NAMES} fftw3_mpi fftw3-3_mpi)
|
||||||
|
FIND_LIBRARY(FFTW_MPI_LIBRARY
|
||||||
|
NAMES ${FFTW_MPI_NAMES}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
IF (FFTW_THREADS_LIBRARY AND FFTW_INCLUDE_DIR)
|
||||||
|
SET(FFTW_THREADS_LIBRARIES ${FFTW_THREADS_LIBRARY})
|
||||||
|
SET(FFTW_THREADS_FOUND "YES")
|
||||||
|
ELSE (FFTW_THREADS_LIBRARY AND FFTW_INCLUDE_DIR)
|
||||||
|
SET(FFTW_THREADS_FOUND "NO")
|
||||||
|
ENDIF (FFTW_THREADS_LIBRARY AND FFTW_INCLUDE_DIR)
|
||||||
|
|
||||||
|
IF (FFTW_MPI_LIBRARY AND FFTW_INCLUDE_DIR)
|
||||||
|
SET(FFTW_MPI_LIBRARIES ${FFTW_MPI_LIBRARY})
|
||||||
|
SET(FFTW_MPI_FOUND "YES")
|
||||||
|
ELSE()
|
||||||
|
SET(FFTW_MPI_FOUND "NO")
|
||||||
|
ENDIF ()
|
||||||
|
|
||||||
|
IF (FFTW_THREADS_FOUND)
|
||||||
|
IF (NOT FFTW_THREADS_FIND_QUIETLY)
|
||||||
|
MESSAGE(STATUS "Found FFTW threads: ${FFTW_THREADS_LIBRARIES}")
|
||||||
|
ENDIF (NOT FFTW_THREADS_FIND_QUIETLY)
|
||||||
|
ELSE (FFTW_THREADS_FOUND)
|
||||||
|
IF (FFTW_THREADS_FIND_REQUIRED)
|
||||||
|
MESSAGE(FATAL_ERROR "Could not find FFTW threads library")
|
||||||
|
ENDIF (FFTW_THREADS_FIND_REQUIRED)
|
||||||
|
ENDIF (FFTW_THREADS_FOUND)
|
||||||
|
|
||||||
|
IF (FFTW_MPI_FOUND)
|
||||||
|
IF (NOT FFTW_MPI_FIND_QUIETLY)
|
||||||
|
MESSAGE(STATUS "Found FFTW MPI: ${FFTW_MPI_LIBRARIES}")
|
||||||
|
ENDIF()
|
||||||
|
ELSE()
|
||||||
|
IF (FFTW_MPI_FIND_REQUIRED)
|
||||||
|
MESSAGE(FATAL_ERROR "Could not find FFTW MPI library")
|
||||||
|
ENDIF()
|
||||||
|
ENDIF ()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
IF (FFTW_OMP_LIBRARY AND FFTW_INCLUDE_DIR)
|
||||||
|
SET(FFTW_OMP_LIBRARIES ${FFTW_OMP_LIBRARY})
|
||||||
|
SET(FFTW_OMP_FOUND "YES")
|
||||||
|
ELSE (FFTW_OMP_LIBRARY AND FFTW_INCLUDE_DIR)
|
||||||
|
SET(FFTW_OMP_FOUND "NO")
|
||||||
|
ENDIF (FFTW_OMP_LIBRARY AND FFTW_INCLUDE_DIR)
|
||||||
|
|
||||||
|
|
||||||
|
IF (FFTW_OMP_FOUND)
|
||||||
|
IF (NOT FFTW_OMP_FIND_QUIETLY)
|
||||||
|
MESSAGE(STATUS "Found FFTW threads: ${FFTW_OMP_LIBRARIES}")
|
||||||
|
ENDIF (NOT FFTW_OMP_FIND_QUIETLY)
|
||||||
|
ELSE (FFTW_OMP_FOUND)
|
||||||
|
IF (FFTW_OMP_FIND_REQUIRED)
|
||||||
|
MESSAGE(FATAL_ERROR "Could not find FFTW OpenMP library")
|
||||||
|
ENDIF (FFTW_OMP_FIND_REQUIRED)
|
||||||
|
ENDIF (FFTW_OMP_FOUND)
|
||||||
|
|
||||||
|
|
||||||
|
IF (FFTW_LIBRARY AND FFTW_INCLUDE_DIR)
|
||||||
|
SET(FFTW_LIBRARIES ${FFTW_LIBRARY})
|
||||||
|
SET(FFTW_FOUND "YES")
|
||||||
|
ELSE (FFTW_LIBRARY AND FFTW_INCLUDE_DIR)
|
||||||
|
SET(FFTW_FOUND "NO")
|
||||||
|
ENDIF (FFTW_LIBRARY AND FFTW_INCLUDE_DIR)
|
||||||
|
|
||||||
|
|
||||||
|
IF (FFTW_FOUND)
|
||||||
|
IF (NOT FFTW_FIND_QUIETLY)
|
||||||
|
MESSAGE(STATUS "Found FFTW: ${FFTW_LIBRARIES}")
|
||||||
|
ENDIF (NOT FFTW_FIND_QUIETLY)
|
||||||
|
ELSE (FFTW_FOUND)
|
||||||
|
IF (FFTW_FIND_REQUIRED)
|
||||||
|
MESSAGE(FATAL_ERROR "Could not find FFTW library")
|
||||||
|
ENDIF (FFTW_FIND_REQUIRED)
|
||||||
|
ENDIF (FFTW_FOUND)
|
||||||
|
|
||||||
|
SET (ON_UNIX ${CMAKE_SYSTEM_NAME} STREQUAL "Linux" OR
|
||||||
|
${CMAKE_SYSTEM_NAME} STREQUAL "Darwin")
|
||||||
|
IF (${ON_UNIX})
|
||||||
|
SET (FFTW_EXECUTABLE_LIBRARIES fftw3 fftw3_threads)
|
||||||
|
ENDIF (${ON_UNIX})
|
1061
cmake_modules/FetchContent.cmake
Normal file
1061
cmake_modules/FetchContent.cmake
Normal file
File diff suppressed because it is too large
Load Diff
23
cmake_modules/FetchContent/CMakeLists.cmake.in
Normal file
23
cmake_modules/FetchContent/CMakeLists.cmake.in
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
# Distributed under the OSI-approved BSD 3-Clause License. See accompanying
|
||||||
|
# file Copyright.txt or https://cmake.org/licensing for details.
|
||||||
|
|
||||||
|
cmake_minimum_required(VERSION ${CMAKE_VERSION})
|
||||||
|
|
||||||
|
# We name the project and the target for the ExternalProject_Add() call
|
||||||
|
# to something that will highlight to the user what we are working on if
|
||||||
|
# something goes wrong and an error message is produced.
|
||||||
|
|
||||||
|
project(${contentName}-populate NONE)
|
||||||
|
|
||||||
|
include(ExternalProject)
|
||||||
|
ExternalProject_Add(${contentName}-populate
|
||||||
|
${ARG_EXTRA}
|
||||||
|
SOURCE_DIR "${ARG_SOURCE_DIR}"
|
||||||
|
BINARY_DIR "${ARG_BINARY_DIR}"
|
||||||
|
CONFIGURE_COMMAND ""
|
||||||
|
BUILD_COMMAND ""
|
||||||
|
INSTALL_COMMAND ""
|
||||||
|
TEST_COMMAND ""
|
||||||
|
USES_TERMINAL_DOWNLOAD YES
|
||||||
|
USES_TERMINAL_UPDATE YES
|
||||||
|
)
|
167
cmake_modules/FindJulia.cmake
Normal file
167
cmake_modules/FindJulia.cmake
Normal file
@ -0,0 +1,167 @@
|
|||||||
|
# Inspiration from https://gist.github.com/JayKickliter/06d0e7c4f84ef7ccc7a9
|
||||||
|
#
|
||||||
|
|
||||||
|
find_program(JULIA_EXECUTABLE julia DOC "Julia executable")
|
||||||
|
IF (NOT JULIA_EXECUTABLE)
|
||||||
|
cmessage(STATUS "Julia executable has not been found")
|
||||||
|
return()
|
||||||
|
endif()
|
||||||
|
|
||||||
|
#
|
||||||
|
# Julia version
|
||||||
|
#
|
||||||
|
execute_process(
|
||||||
|
COMMAND ${JULIA_EXECUTABLE} --version
|
||||||
|
OUTPUT_VARIABLE JULIA_VERSION_STRING
|
||||||
|
RESULT_VARIABLE RESULT
|
||||||
|
)
|
||||||
|
if(RESULT EQUAL 0)
|
||||||
|
string(REGEX REPLACE ".*([0-9]+\\.[0-9]+\\.[0-9]+).*" "\\1"
|
||||||
|
JULIA_VERSION_STRING ${JULIA_VERSION_STRING})
|
||||||
|
string(REGEX REPLACE "([0-9]+)\\.([0-9]+)\\.([0-9]+)" "JULIA_VERSION_MAJOR=\\1;JULIA_VERSION_MINOR=\\2;JULIA_VERSION_FIX=\\3" JULIA_VERSION_DEFS ${JULIA_VERSION_STRING})
|
||||||
|
endif()
|
||||||
|
|
||||||
|
cmessage(STATUS "Julia version: ${JULIA_VERSION_STRING}")
|
||||||
|
|
||||||
|
#
|
||||||
|
# Julia home
|
||||||
|
#
|
||||||
|
IF (JULIA_VERSION_STRING VERSION_GREATER_EQUAL "0.7.0")
|
||||||
|
IF (JULIA_VERSION_STRING VERSION_LESS "1.7.0")
|
||||||
|
execute_process(
|
||||||
|
COMMAND ${JULIA_EXECUTABLE} -E "abspath(Sys.BINDIR)"
|
||||||
|
OUTPUT_VARIABLE JULIA_BINDIR
|
||||||
|
RESULT_VARIABLE RESULT
|
||||||
|
)
|
||||||
|
if(RESULT EQUAL 0)
|
||||||
|
string(REGEX REPLACE "\"" "" JULIA_BINDIR ${JULIA_BINDIR})
|
||||||
|
string(STRIP "${JULIA_BINDIR}" JULIA_BINDIR)
|
||||||
|
get_filename_component(JULIA_HOME "${JULIA_BINDIR}/../" ABSOLUTE)
|
||||||
|
else()
|
||||||
|
cmessage(ERROR "Cannot find JULIA_HOME")
|
||||||
|
endif()
|
||||||
|
ELSE()
|
||||||
|
cmessage(ERROR "Unknown Julia version ${JULIA_VERSION}")
|
||||||
|
ENDIF()
|
||||||
|
ELSE()
|
||||||
|
execute_process(
|
||||||
|
COMMAND ${JULIA_EXECUTABLE} -E "abspath(JULIA_HOME)"
|
||||||
|
OUTPUT_VARIABLE JULIA_HOME
|
||||||
|
RESULT_VARIABLE RESULT
|
||||||
|
)
|
||||||
|
if(RESULT EQUAL 0)
|
||||||
|
string(REGEX REPLACE "\"" "" JULIA_HOME ${JULIA_HOME})
|
||||||
|
string(STRIP "${JULIA_HOME}" JULIA_HOME)
|
||||||
|
set(JULIA_BINDIR "${JULIA_HOME}")
|
||||||
|
else()
|
||||||
|
cmessage(ERROR "Cannot find JULIA_HOME")
|
||||||
|
endif()
|
||||||
|
ENDIF()
|
||||||
|
|
||||||
|
|
||||||
|
cmessage(STATUS "Julia: Executable is ${JULIA_EXECUTABLE} (${JULIA_VERSION_STRING})")
|
||||||
|
cmessage(STATUS "Julia: HOME is ${JULIA_HOME}")
|
||||||
|
cmessage(STATUS "Julia: BINDIR is ${JULIA_BINDIR}")
|
||||||
|
|
||||||
|
#
|
||||||
|
# Check threading
|
||||||
|
#
|
||||||
|
execute_process(
|
||||||
|
COMMAND ${JULIA_EXECUTABLE} -E "ccall(:jl_threading_enabled, Cint, ()) != 0"
|
||||||
|
OUTPUT_VARIABLE JULIA_THREADING_STATE
|
||||||
|
RESULT_VARIABLE RESULT
|
||||||
|
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||||
|
)
|
||||||
|
cmessage(STATUS "Julia: threading state is '${JULIA_THREADING_STATE}'")
|
||||||
|
if(RESULT EQUAL 0)
|
||||||
|
string(STRIP "${JULIA_THREADING_STATE}" JULIA_THREADING_STATE)
|
||||||
|
if (JULIA_THREADING_STATE STREQUAL "true")
|
||||||
|
set(JULIA_DEFS "JULIA_ENABLE_THREADING=1")
|
||||||
|
elseif(JULIA_THREADING_STATE STREQUAL "false")
|
||||||
|
set(JULIA_DEFS "")
|
||||||
|
else()
|
||||||
|
cmessage(CWARNING "Julia: unknown return value of threading")
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set(JULIA_DEFS ${JULIA_DEFS};JULIA_HOME=\"${JULIA_HOME}\";JULIA_BINDIR=\"${JULIA_BINDIR}\";${JULIA_VERSION_DEFS})
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Julia includes
|
||||||
|
#
|
||||||
|
|
||||||
|
IF (JULIA_VERSION_STRING VERSION_GREATER_EQUAL "0.7.0")
|
||||||
|
IF (JULIA_VERSION_STRING VERSION_LESS "1.7.0")
|
||||||
|
execute_process(
|
||||||
|
COMMAND ${JULIA_EXECUTABLE} -E "abspath(Sys.BINDIR, Base.INCLUDEDIR, \"julia\")"
|
||||||
|
OUTPUT_VARIABLE JULIA_INCLUDE_DIRS
|
||||||
|
RESULT_VARIABLE RESULT
|
||||||
|
)
|
||||||
|
ELSE()
|
||||||
|
cmessage(ERROR "Unknown Julia version ${JULIA_VERSION}")
|
||||||
|
ENDIF()
|
||||||
|
ELSE()
|
||||||
|
execute_process(
|
||||||
|
COMMAND ${JULIA_EXECUTABLE} -E "abspath(\"${JULIA_HOME}\", Base.INCLUDEDIR, \"julia\")"
|
||||||
|
OUTPUT_VARIABLE JULIA_INCLUDE_DIRS
|
||||||
|
RESULT_VARIABLE RESULT
|
||||||
|
)
|
||||||
|
ENDIF()
|
||||||
|
|
||||||
|
if(RESULT EQUAL 0)
|
||||||
|
string(REGEX REPLACE "\"" "" JULIA_INCLUDE_DIRS ${JULIA_INCLUDE_DIRS})
|
||||||
|
string(STRIP "${JULIA_INCLUDE_DIRS}" JULIA_INCLUDE_DIRS)
|
||||||
|
set(JULIA_INCLUDE_DIRS ${JULIA_INCLUDE_DIRS}
|
||||||
|
CACHE PATH "Location of Julia include files")
|
||||||
|
ELSE()
|
||||||
|
cmessage(ERROR "Cannot find location of Julia header files")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
#
|
||||||
|
# Julia libs
|
||||||
|
#
|
||||||
|
execute_process(
|
||||||
|
COMMAND ${JULIA_EXECUTABLE} -E "using Libdl; dirname(abspath(Libdl.dlpath(\"libjulia\")))"
|
||||||
|
OUTPUT_VARIABLE JULIA_LIBRARY_DIR
|
||||||
|
RESULT_VARIABLE RESULT
|
||||||
|
)
|
||||||
|
if(RESULT EQUAL 0)
|
||||||
|
string(REGEX REPLACE "\"" "" JULIA_LIBRARY_DIR "${JULIA_LIBRARY_DIR}")
|
||||||
|
string(STRIP "${JULIA_LIBRARY_DIR}" JULIA_LIBRARY_DIR)
|
||||||
|
cmessage(STATUS "Julia: library dir is ${JULIA_LIBRARY_DIR}")
|
||||||
|
set(JULIA_LIBRARY_DIRS ${JULIA_LIBRARY_DIR}
|
||||||
|
CACHE PATH "Location of Julia lib dirs")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
execute_process(
|
||||||
|
COMMAND ${JULIA_EXECUTABLE} -E "abspath(\"${JULIA_BINDIR}\", Base.PRIVATE_LIBDIR)"
|
||||||
|
OUTPUT_VARIABLE JULIA_PRIVATE_LIBRARY_DIR
|
||||||
|
RESULT_VARIABLE RESULT
|
||||||
|
)
|
||||||
|
if(RESULT EQUAL 0)
|
||||||
|
string(REGEX REPLACE "\"" "" JULIA_PRIVATE_LIBRARY_DIR "${JULIA_PRIVATE_LIBRARY_DIR}")
|
||||||
|
string(STRIP "${JULIA_PRIVATE_LIBRARY_DIR}" JULIA_PRIVATE_LIBRARY_DIR)
|
||||||
|
cmessage(STATUS "Julia: private library dir is ${JULIA_PRIVATE_LIBRARY_DIR}")
|
||||||
|
set(JULIA_PRIVATE_LIBRARY_DIRS ${JULIA_PRIVATE_LIBRARY_DIR}
|
||||||
|
CACHE PATH "Location of Julia lib dirs")
|
||||||
|
|
||||||
|
SET(CMAKE_BUILD_WITH_INSTALL_RPATH TRUE)
|
||||||
|
SET(CMAKE_INSTALL_RPATH ${CMAKE_INSTALL_RPATH} "${JULIA_PRIVATE_LIBRARY_DIRS}")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
|
||||||
|
find_library( JULIA_LIBRARY
|
||||||
|
NAMES julia.${JULIA_VERSION_STRING} julia
|
||||||
|
PATHS ${JULIA_LIBRARY_DIRS}
|
||||||
|
NO_DEFAULT_PATH
|
||||||
|
)
|
||||||
|
cmessage(STATUS "Julia: library is ${JULIA_LIBRARY}")
|
||||||
|
|
||||||
|
include(FindPackageHandleStandardArgs)
|
||||||
|
find_package_handle_standard_args(
|
||||||
|
Julia
|
||||||
|
REQUIRED_VARS JULIA_LIBRARY JULIA_LIBRARY_DIR JULIA_PRIVATE_LIBRARY_DIR JULIA_INCLUDE_DIRS JULIA_DEFS
|
||||||
|
VERSION_VAR JULIA_VERSION_STRING
|
||||||
|
FAIL_MESSAGE "Julia not found"
|
||||||
|
)
|
70
cmake_modules/GenOptMacro.cmake
Normal file
70
cmake_modules/GenOptMacro.cmake
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
|
||||||
|
find_program(GENGETOPT gengetopt)
|
||||||
|
|
||||||
|
|
||||||
|
macro(add_genopt _sourcelist _ggofile _basefile)
|
||||||
|
|
||||||
|
unset(_structname)
|
||||||
|
unset(_funcname)
|
||||||
|
|
||||||
|
if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/${_ggofile})
|
||||||
|
set(_ggofile2 ${CMAKE_CURRENT_SOURCE_DIR}/${_ggofile})
|
||||||
|
else(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/${_ggofile})
|
||||||
|
set(_ggofile2 ${CMAKE_CURRENT_BINARY_DIR}/${_ggofile})
|
||||||
|
endif(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/${_ggofile})
|
||||||
|
|
||||||
|
set(_add_depends "")
|
||||||
|
|
||||||
|
SET(USE_PREBUILT_IF_NECESSARY OFF)
|
||||||
|
foreach(arg ${ARGN})
|
||||||
|
if ("x${arg}" MATCHES "^x(STRUCTNAME|FUNCNAME|DEPENDS|PREBUILT_C|PREBUILT_H)$")
|
||||||
|
SET(doing "${arg}")
|
||||||
|
elseif(doing STREQUAL "STRUCTNAME")
|
||||||
|
SET(_structname ${arg})
|
||||||
|
unset(doing)
|
||||||
|
elseif(doing STREQUAL "FUNCNAME")
|
||||||
|
SET(_funcname ${arg})
|
||||||
|
unset(doing)
|
||||||
|
elseif(doing STREQUAL "DEPENDS")
|
||||||
|
SET(_add_depends ${_add_depends} ${arg})
|
||||||
|
elseif(doing STREQUAL "PREBUILT_C")
|
||||||
|
SET(USE_PREBUILT_IF_NECESSARY ON)
|
||||||
|
SET(_prebuilt_c ${arg})
|
||||||
|
elseif(doing STREQUAL "PREBUILT_H")
|
||||||
|
SET(USE_PREBUILT_IF_NECESSARY ON)
|
||||||
|
SET(_prebuilt_h ${arg})
|
||||||
|
endif()
|
||||||
|
endforeach(arg ${ARGN})
|
||||||
|
|
||||||
|
if(NOT DEFINED _structname)
|
||||||
|
set(_structname ${_basefile})
|
||||||
|
endif(NOT DEFINED _structname)
|
||||||
|
|
||||||
|
if(NOT DEFINED _funcname)
|
||||||
|
set(_funcname ${_basefile})
|
||||||
|
endif(NOT DEFINED _funcname)
|
||||||
|
|
||||||
|
set(_cfile ${CMAKE_CURRENT_BINARY_DIR}/${_basefile}.c)
|
||||||
|
set(_hfile ${CMAKE_CURRENT_BINARY_DIR}/${_basefile}.h)
|
||||||
|
|
||||||
|
include_directories(${CMAKE_CURRENT_BINARY_DIR})
|
||||||
|
|
||||||
|
IF(GENGETOPT)
|
||||||
|
add_custom_command(
|
||||||
|
OUTPUT ${_cfile} ${_hfile}
|
||||||
|
COMMAND ${GENGETOPT} -i ${_ggofile2} -f ${_funcname} -a ${_structname} -F ${_basefile} -C
|
||||||
|
DEPENDS ${_ggofile2} ${_add_depends}
|
||||||
|
)
|
||||||
|
ELSE(GENGETOPT)
|
||||||
|
IF(NOT USE_PREBUILT_IF_NECESSARY)
|
||||||
|
message(FATAL_ERROR "Gengetopt has not been found and is required to build intermediate files")
|
||||||
|
ELSE(NOT USE_PREBUILT_IF_NECESSARY)
|
||||||
|
message(WARNING "Using prebuilt configuration parser")
|
||||||
|
configure_file(${_prebuilt_c} ${_cfile} COPYONLY)
|
||||||
|
configure_file(${_prebuilt_h} ${_hfile} COPYONLY)
|
||||||
|
ENDIF(NOT USE_PREBUILT_IF_NECESSARY)
|
||||||
|
ENDIF(GENGETOPT)
|
||||||
|
|
||||||
|
set(${_sourcelist} ${_cfile} ${${_sourcelist}})
|
||||||
|
|
||||||
|
endmacro(add_genopt)
|
134
cmake_modules/GetGitRevisionDescription.cmake
Normal file
134
cmake_modules/GetGitRevisionDescription.cmake
Normal file
@ -0,0 +1,134 @@
|
|||||||
|
# - Returns a version string from Git
|
||||||
|
#
|
||||||
|
# These functions force a re-configure on each git commit so that you can
|
||||||
|
# trust the values of the variables in your build system.
|
||||||
|
#
|
||||||
|
# get_git_head_revision(<refspecvar> <hashvar> [<additional arguments to git describe> ...])
|
||||||
|
#
|
||||||
|
# Returns the refspec and sha hash of the current head revision
|
||||||
|
#
|
||||||
|
# git_describe(<var> [<additional arguments to git describe> ...])
|
||||||
|
#
|
||||||
|
# Returns the results of git describe on the source tree, and adjusting
|
||||||
|
# the output so that it tests false if an error occurs.
|
||||||
|
#
|
||||||
|
# git_get_exact_tag(<var> [<additional arguments to git describe> ...])
|
||||||
|
#
|
||||||
|
# Returns the results of git describe --exact-match on the source tree,
|
||||||
|
# and adjusting the output so that it tests false if there was no exact
|
||||||
|
# matching tag.
|
||||||
|
#
|
||||||
|
# Requires CMake 2.6 or newer (uses the 'function' command)
|
||||||
|
#
|
||||||
|
# Original Author:
|
||||||
|
# 2009-2010 Ryan Pavlik <rpavlik@iastate.edu> <abiryan@ryand.net>
|
||||||
|
# http://academic.cleardefinition.com
|
||||||
|
# Iowa State University HCI Graduate Program/VRAC
|
||||||
|
#
|
||||||
|
# Copyright Iowa State University 2009-2010.
|
||||||
|
# Distributed under the Boost Software License, Version 1.0.
|
||||||
|
# (See accompanying file LICENSE_1_0.txt or copy at
|
||||||
|
# http://www.boost.org/LICENSE_1_0.txt)
|
||||||
|
|
||||||
|
if(__get_git_revision_description)
|
||||||
|
return()
|
||||||
|
endif()
|
||||||
|
set(__get_git_revision_description YES)
|
||||||
|
|
||||||
|
# We must run the following at "include" time, not at function call time,
|
||||||
|
# to find the path to this module rather than the path to a calling list file
|
||||||
|
get_filename_component(_gitdescmoddir ${CMAKE_CURRENT_LIST_FILE} PATH)
|
||||||
|
|
||||||
|
function(get_git_head_revision _refspecvar _hashvar)
|
||||||
|
set(GIT_PARENT_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
|
||||||
|
set(GIT_DIR "${GIT_PARENT_DIR}/.git")
|
||||||
|
while(NOT EXISTS "${GIT_DIR}") # .git dir not found, search parent directories
|
||||||
|
set(GIT_PREVIOUS_PARENT "${GIT_PARENT_DIR}")
|
||||||
|
get_filename_component(GIT_PARENT_DIR ${GIT_PARENT_DIR} PATH)
|
||||||
|
if(GIT_PARENT_DIR STREQUAL GIT_PREVIOUS_PARENT)
|
||||||
|
# We have reached the root directory, we are not in git
|
||||||
|
set(${_refspecvar} "GITDIR-NOTFOUND" PARENT_SCOPE)
|
||||||
|
set(${_hashvar} "GITDIR-NOTFOUND" PARENT_SCOPE)
|
||||||
|
return()
|
||||||
|
endif()
|
||||||
|
set(GIT_DIR "${GIT_PARENT_DIR}/.git")
|
||||||
|
endwhile()
|
||||||
|
# check if this is a submodule
|
||||||
|
if(NOT IS_DIRECTORY ${GIT_DIR})
|
||||||
|
file(READ ${GIT_DIR} submodule)
|
||||||
|
string(REGEX REPLACE "gitdir: (.*)\n$" "\\1" GIT_DIR_RELATIVE ${submodule})
|
||||||
|
get_filename_component(SUBMODULE_DIR ${GIT_DIR} PATH)
|
||||||
|
if (IS_ABSOLUTE ${GIT_DIR_RELATIVE})
|
||||||
|
get_filename_component(GIT_DIR ${GIT_DIR_RELATIVE} ABSOLUTE)
|
||||||
|
else()
|
||||||
|
get_filename_component(GIT_DIR ${SUBMODULE_DIR}/${GIT_DIR_RELATIVE} ABSOLUTE)
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
set(GIT_DATA "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/git-data")
|
||||||
|
if(NOT EXISTS "${GIT_DATA}")
|
||||||
|
file(MAKE_DIRECTORY "${GIT_DATA}")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if(NOT EXISTS "${GIT_DIR}/HEAD")
|
||||||
|
return()
|
||||||
|
endif()
|
||||||
|
set(HEAD_FILE "${GIT_DATA}/HEAD")
|
||||||
|
configure_file("${GIT_DIR}/HEAD" "${HEAD_FILE}" COPYONLY)
|
||||||
|
|
||||||
|
configure_file("${_gitdescmoddir}/GetGitRevisionDescription.cmake.in"
|
||||||
|
"${GIT_DATA}/grabRef.cmake"
|
||||||
|
@ONLY)
|
||||||
|
include("${GIT_DATA}/grabRef.cmake")
|
||||||
|
|
||||||
|
set(${_refspecvar} "${HEAD_REF}" PARENT_SCOPE)
|
||||||
|
set(${_hashvar} "${HEAD_HASH}" PARENT_SCOPE)
|
||||||
|
endfunction()
|
||||||
|
|
||||||
|
function(git_describe _var)
|
||||||
|
if(NOT GIT_FOUND)
|
||||||
|
find_package(Git QUIET)
|
||||||
|
endif()
|
||||||
|
get_git_head_revision(refspec hash)
|
||||||
|
if(NOT GIT_FOUND)
|
||||||
|
set(${_var} "GIT-NOTFOUND" PARENT_SCOPE)
|
||||||
|
return()
|
||||||
|
endif()
|
||||||
|
if(NOT hash)
|
||||||
|
set(${_var} "HEAD-HASH-NOTFOUND" PARENT_SCOPE)
|
||||||
|
return()
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# TODO sanitize
|
||||||
|
#if((${ARGN}" MATCHES "&&") OR
|
||||||
|
# (ARGN MATCHES "||") OR
|
||||||
|
# (ARGN MATCHES "\\;"))
|
||||||
|
# message("Please report the following error to the project!")
|
||||||
|
# message(FATAL_ERROR "Looks like someone's doing something nefarious with git_describe! Passed arguments ${ARGN}")
|
||||||
|
#endif()
|
||||||
|
|
||||||
|
#message(STATUS "Arguments to execute_process: ${ARGN}")
|
||||||
|
|
||||||
|
execute_process(COMMAND
|
||||||
|
"${GIT_EXECUTABLE}"
|
||||||
|
describe
|
||||||
|
${hash}
|
||||||
|
${ARGN}
|
||||||
|
WORKING_DIRECTORY
|
||||||
|
"${CMAKE_CURRENT_SOURCE_DIR}"
|
||||||
|
RESULT_VARIABLE
|
||||||
|
res
|
||||||
|
OUTPUT_VARIABLE
|
||||||
|
out
|
||||||
|
ERROR_QUIET
|
||||||
|
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||||
|
if(NOT res EQUAL 0)
|
||||||
|
set(out "${out}-${res}-NOTFOUND")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set(${_var} "${out}" PARENT_SCOPE)
|
||||||
|
endfunction()
|
||||||
|
|
||||||
|
function(git_get_exact_tag _var)
|
||||||
|
git_describe(out --exact-match ${ARGN})
|
||||||
|
set(${_var} "${out}" PARENT_SCOPE)
|
||||||
|
endfunction()
|
41
cmake_modules/GetGitRevisionDescription.cmake.in
Normal file
41
cmake_modules/GetGitRevisionDescription.cmake.in
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
#
|
||||||
|
# Internal file for GetGitRevisionDescription.cmake
|
||||||
|
#
|
||||||
|
# Requires CMake 2.6 or newer (uses the 'function' command)
|
||||||
|
#
|
||||||
|
# Original Author:
|
||||||
|
# 2009-2010 Ryan Pavlik <rpavlik@iastate.edu> <abiryan@ryand.net>
|
||||||
|
# http://academic.cleardefinition.com
|
||||||
|
# Iowa State University HCI Graduate Program/VRAC
|
||||||
|
#
|
||||||
|
# Copyright Iowa State University 2009-2010.
|
||||||
|
# Distributed under the Boost Software License, Version 1.0.
|
||||||
|
# (See accompanying file LICENSE_1_0.txt or copy at
|
||||||
|
# http://www.boost.org/LICENSE_1_0.txt)
|
||||||
|
|
||||||
|
set(HEAD_HASH)
|
||||||
|
|
||||||
|
file(READ "@HEAD_FILE@" HEAD_CONTENTS LIMIT 1024)
|
||||||
|
|
||||||
|
string(STRIP "${HEAD_CONTENTS}" HEAD_CONTENTS)
|
||||||
|
if(HEAD_CONTENTS MATCHES "ref")
|
||||||
|
# named branch
|
||||||
|
string(REPLACE "ref: " "" HEAD_REF "${HEAD_CONTENTS}")
|
||||||
|
if(EXISTS "@GIT_DIR@/${HEAD_REF}")
|
||||||
|
configure_file("@GIT_DIR@/${HEAD_REF}" "@GIT_DATA@/head-ref" COPYONLY)
|
||||||
|
else()
|
||||||
|
configure_file("@GIT_DIR@/packed-refs" "@GIT_DATA@/packed-refs" COPYONLY)
|
||||||
|
file(READ "@GIT_DATA@/packed-refs" PACKED_REFS)
|
||||||
|
if(${PACKED_REFS} MATCHES "([0-9a-z]*) ${HEAD_REF}")
|
||||||
|
set(HEAD_HASH "${CMAKE_MATCH_1}")
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
else()
|
||||||
|
# detached HEAD
|
||||||
|
configure_file("@GIT_DIR@/HEAD" "@GIT_DATA@/head-ref" COPYONLY)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if(NOT HEAD_HASH)
|
||||||
|
file(READ "@GIT_DATA@/head-ref" HEAD_HASH LIMIT 1024)
|
||||||
|
string(STRIP "${HEAD_HASH}" HEAD_HASH)
|
||||||
|
endif()
|
1504
cmake_modules/UseLATEX.cmake
Normal file
1504
cmake_modules/UseLATEX.cmake
Normal file
File diff suppressed because it is too large
Load Diff
38
cmake_modules/ares_module.cmake
Normal file
38
cmake_modules/ares_module.cmake
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
macro(add_liblss_module module)
|
||||||
|
set(_module_fname "${CMAKE_SOURCE_DIR}/extra/${module}/libLSS/${module}.cmake")
|
||||||
|
if (EXISTS ${_module_fname})
|
||||||
|
set(BUILD_ARES_MODULE_${module} ON)
|
||||||
|
set(_ARES_current_parse_module ${module})
|
||||||
|
set(ARES_MODULE_DIR "${CMAKE_SOURCE_DIR}/extra/${module}")
|
||||||
|
# Add the libLSS in the module to the search path
|
||||||
|
SET(ARES_INCLUDE_PATH ${ARES_INCLUDE_PATH} ${CMAKE_SOURCE_DIR}/extra/${module})
|
||||||
|
include(${_module_fname})
|
||||||
|
endif()
|
||||||
|
endmacro()
|
||||||
|
|
||||||
|
macro(add_liblss_test_module module)
|
||||||
|
set(_module_fname_base "${CMAKE_SOURCE_DIR}/extra/${module}/libLSS/${module}.cmake")
|
||||||
|
set(_module_fname "${CMAKE_SOURCE_DIR}/extra/${module}/libLSS/tests/tests.cmake")
|
||||||
|
if (EXISTS ${_module_fname_base} AND EXISTS ${_module_fname})
|
||||||
|
set(_ARES_current_parse_module ${module})
|
||||||
|
set(ARES_MODULE_DIR "${CMAKE_SOURCE_DIR}/extra/${module}")
|
||||||
|
include(${_module_fname})
|
||||||
|
endif()
|
||||||
|
endmacro()
|
||||||
|
|
||||||
|
function(check_ares_module _my_var)
|
||||||
|
set(${_my_var} TRUE PARENT_SCOPE)
|
||||||
|
foreach(module IN LISTS ARGN)
|
||||||
|
list(FIND ARES_MODULES ${module} _module_found)
|
||||||
|
if(${_module_found} EQUAL -1)
|
||||||
|
set(${_my_var} FALSE PARENT_SCOPE)
|
||||||
|
endif()
|
||||||
|
endforeach()
|
||||||
|
endfunction()
|
||||||
|
|
||||||
|
function(require_ares_module)
|
||||||
|
check_ares_module(_result ${ARGV})
|
||||||
|
if (NOT ${_result})
|
||||||
|
cmessage(FATAL_ERROR "Module(s) ${ARGV} are necessary to build ${_ARES_current_parse_module}")
|
||||||
|
endif()
|
||||||
|
endfunction()
|
32
cmake_modules/clang-format.cmake
Normal file
32
cmake_modules/clang-format.cmake
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
find_program(CLANG_FORMAT clang-format)
|
||||||
|
|
||||||
|
function(setup_formatter MODULES)
|
||||||
|
|
||||||
|
if(CLANG_FORMAT)
|
||||||
|
|
||||||
|
SET(_glob_pattern
|
||||||
|
${CMAKE_SOURCE_DIR}/libLSS/*.cpp
|
||||||
|
${CMAKE_SOURCE_DIR}/libLSS/*.hpp
|
||||||
|
)
|
||||||
|
foreach(module IN LISTS ${MODULES})
|
||||||
|
set(_glob_module
|
||||||
|
${CMAKE_SOURCE_DIR}/extra/${module}/libLSS/*.cpp
|
||||||
|
${CMAKE_SOURCE_DIR}/extra/${module}/libLSS/*.hpp
|
||||||
|
)
|
||||||
|
SET(_glob_pattern ${_glob_pattern} ${_glob_module})
|
||||||
|
|
||||||
|
file(GLOB_RECURSE module_sources ${_glob_module})
|
||||||
|
add_custom_target(clangformat-${module}
|
||||||
|
COMMAND ${CLANG_FORMAT} -style=file -i ${module_sources}
|
||||||
|
)
|
||||||
|
endforeach()
|
||||||
|
|
||||||
|
file(GLOB_RECURSE ALL_SOURCE_FILES ${_glob_pattern})
|
||||||
|
|
||||||
|
add_custom_target(clangformat
|
||||||
|
COMMAND ${CLANG_FORMAT} -style=file -i ${ALL_SOURCE_FILES}
|
||||||
|
)
|
||||||
|
|
||||||
|
endif()
|
||||||
|
|
||||||
|
endfunction()
|
42
cmake_modules/color_msg.cmake
Normal file
42
cmake_modules/color_msg.cmake
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
if(NOT WIN32)
|
||||||
|
string(ASCII 27 Esc)
|
||||||
|
set(ColourReset "${Esc}[m")
|
||||||
|
set(ColourBold "${Esc}[1m")
|
||||||
|
set(Red "${Esc}[31m")
|
||||||
|
set(Green "${Esc}[32m")
|
||||||
|
set(Yellow "${Esc}[33m")
|
||||||
|
set(Blue "${Esc}[34m")
|
||||||
|
set(Magenta "${Esc}[35m")
|
||||||
|
set(Cyan "${Esc}[36m")
|
||||||
|
set(White "${Esc}[37m")
|
||||||
|
set(BoldRed "${Esc}[1;31m")
|
||||||
|
set(BoldGreen "${Esc}[1;32m")
|
||||||
|
set(BoldYellow "${Esc}[1;33m")
|
||||||
|
set(BoldBlue "${Esc}[1;34m")
|
||||||
|
set(BoldMagenta "${Esc}[1;35m")
|
||||||
|
set(BoldCyan "${Esc}[1;36m")
|
||||||
|
set(BoldWhite "${Esc}[1;37m")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
function(cmessage)
|
||||||
|
list(GET ARGV 0 MessageType)
|
||||||
|
if(MessageType STREQUAL FATAL_ERROR OR MessageType STREQUAL SEND_ERROR)
|
||||||
|
list(REMOVE_AT ARGV 0)
|
||||||
|
message(${MessageType} "${BoldRed}${ARGV}${ColourReset}")
|
||||||
|
elseif(MessageType STREQUAL CWARNING)
|
||||||
|
list(REMOVE_AT ARGV 0)
|
||||||
|
message(STATUS "${BoldYellow}${ARGV}${ColourReset}")
|
||||||
|
elseif(MessageType STREQUAL WARNING)
|
||||||
|
list(REMOVE_AT ARGV 0)
|
||||||
|
message(${MessageType} "${BoldYellow}${ARGV}${ColourReset}")
|
||||||
|
elseif(MessageType STREQUAL AUTHOR_WARNING)
|
||||||
|
list(REMOVE_AT ARGV 0)
|
||||||
|
message(${MessageType} "${BoldCyan}${ARGV}${ColourReset}")
|
||||||
|
elseif(MessageType STREQUAL STATUS)
|
||||||
|
list(REMOVE_AT ARGV 0)
|
||||||
|
message(${MessageType} "${Green}${ARGV}${ColourReset}")
|
||||||
|
else()
|
||||||
|
message("${ARGV}")
|
||||||
|
endif()
|
||||||
|
endfunction()
|
||||||
|
|
243
cmake_modules/git-archive-all.sh
Normal file
243
cmake_modules/git-archive-all.sh
Normal file
@ -0,0 +1,243 @@
|
|||||||
|
#+
|
||||||
|
# This is ABYSS (./cmake_modules/git-archive-all.sh) -- Copyright (C) Guilhem Lavaux (2009-2014)
|
||||||
|
#
|
||||||
|
# guilhem.lavaux@gmail.com
|
||||||
|
#
|
||||||
|
# This software is a computer program whose purpose is to provide to do full sky
|
||||||
|
# bayesian analysis of random fields (e.g., non exhaustively,
|
||||||
|
# wiener filtering, power spectra, lens reconstruction, template fitting).
|
||||||
|
#
|
||||||
|
# This software is governed by the CeCILL license under French law and
|
||||||
|
# abiding by the rules of distribution of free software. You can use,
|
||||||
|
# modify and/ or redistribute the software under the terms of the CeCILL
|
||||||
|
# license as circulated by CEA, CNRS and INRIA at the following URL
|
||||||
|
# "http://www.cecill.info".
|
||||||
|
#
|
||||||
|
# As a counterpart to the access to the source code and rights to copy,
|
||||||
|
# modify and redistribute granted by the license, users are provided only
|
||||||
|
# with a limited warranty and the software's author, the holder of the
|
||||||
|
# economic rights, and the successive licensors have only limited
|
||||||
|
# liability.
|
||||||
|
#
|
||||||
|
# In this respect, the user's attention is drawn to the risks associated
|
||||||
|
# with loading, using, modifying and/or developing or reproducing the
|
||||||
|
# software by the user in light of its specific status of free software,
|
||||||
|
# that may mean that it is complicated to manipulate, and that also
|
||||||
|
# therefore means that it is reserved for developers and experienced
|
||||||
|
# professionals having in-depth computer knowledge. Users are therefore
|
||||||
|
# encouraged to load and test the software's suitability as regards their
|
||||||
|
# requirements in conditions enabling the security of their systems and/or
|
||||||
|
# data to be ensured and, more generally, to use and operate it in the
|
||||||
|
# same conditions as regards security.
|
||||||
|
#
|
||||||
|
# The fact that you are presently reading this means that you have had
|
||||||
|
# knowledge of the CeCILL license and that you accept its terms.
|
||||||
|
#+
|
||||||
|
#!/bin/bash -
|
||||||
|
#
|
||||||
|
# File: git-archive-all.sh
|
||||||
|
#
|
||||||
|
# Description: A utility script that builds an archive file(s) of all
|
||||||
|
# git repositories and submodules in the current path.
|
||||||
|
# Useful for creating a single tarfile of a git super-
|
||||||
|
# project that contains other submodules.
|
||||||
|
#
|
||||||
|
# Examples: Use git-archive-all.sh to create archive distributions
|
||||||
|
# from git repositories. To use, simply do:
|
||||||
|
#
|
||||||
|
# cd $GIT_DIR; git-archive-all.sh
|
||||||
|
#
|
||||||
|
# where $GIT_DIR is the root of your git superproject.
|
||||||
|
#
|
||||||
|
# License: GPL3
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
#
|
||||||
|
# This program is free software; you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation; either version 2 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program; if not, write to the Free Software
|
||||||
|
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
# DEBUGGING
|
||||||
|
set -e
|
||||||
|
set -C # noclobber
|
||||||
|
|
||||||
|
# TRAP SIGNALS
|
||||||
|
trap 'cleanup' QUIT EXIT
|
||||||
|
|
||||||
|
# For security reasons, explicitly set the internal field separator
|
||||||
|
# to newline, space, tab
|
||||||
|
OLD_IFS=$IFS
|
||||||
|
IFS='
|
||||||
|
'
|
||||||
|
|
||||||
|
function cleanup () {
|
||||||
|
rm -f $TMPFILE
|
||||||
|
rm -f $TOARCHIVE
|
||||||
|
IFS="$OLD_IFS"
|
||||||
|
}
|
||||||
|
|
||||||
|
function usage () {
|
||||||
|
echo "Usage is as follows:"
|
||||||
|
echo
|
||||||
|
echo "$PROGRAM <--version>"
|
||||||
|
echo " Prints the program version number on a line by itself and exits."
|
||||||
|
echo
|
||||||
|
echo "$PROGRAM <--usage|--help|-?>"
|
||||||
|
echo " Prints this usage output and exits."
|
||||||
|
echo
|
||||||
|
echo "$PROGRAM [--format <fmt>] [--prefix <path>] [--separate|-s] [output_file]"
|
||||||
|
echo " Creates an archive for the entire git superproject, and its submodules"
|
||||||
|
echo " using the passed parameters, described below."
|
||||||
|
echo
|
||||||
|
echo " If '--format' is specified, the archive is created with the named"
|
||||||
|
echo " git archiver backend. Obviously, this must be a backend that git-archive"
|
||||||
|
echo " understands. The format defaults to 'tar' if not specified."
|
||||||
|
echo
|
||||||
|
echo " If '--prefix' is specified, the archive's superproject and all submodules"
|
||||||
|
echo " are created with the <path> prefix named. The default is to not use one."
|
||||||
|
echo
|
||||||
|
echo " If '--separate' or '-s' is specified, individual archives will be created"
|
||||||
|
echo " for each of the superproject itself and its submodules. The default is to"
|
||||||
|
echo " concatenate individual archives into one larger archive."
|
||||||
|
echo
|
||||||
|
echo " If 'output_file' is specified, the resulting archive is created as the"
|
||||||
|
echo " file named. This parameter is essentially a path that must be writeable."
|
||||||
|
echo " When combined with '--separate' ('-s') this path must refer to a directory."
|
||||||
|
echo " Without this parameter or when combined with '--separate' the resulting"
|
||||||
|
echo " archive(s) are named with a dot-separated path of the archived directory and"
|
||||||
|
echo " a file extension equal to their format (e.g., 'superdir.submodule1dir.tar')."
|
||||||
|
}
|
||||||
|
|
||||||
|
function version () {
|
||||||
|
echo "$PROGRAM version $VERSION"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Internal variables and initializations.
|
||||||
|
readonly PROGRAM=`basename "$0"`
|
||||||
|
readonly VERSION=0.2
|
||||||
|
|
||||||
|
OLD_PWD="`pwd`"
|
||||||
|
TMPDIR=${TMPDIR:-/tmp}
|
||||||
|
TMPFILE=`mktemp "$TMPDIR/$PROGRAM.XXXXXX"` # Create a place to store our work's progress
|
||||||
|
TOARCHIVE=`mktemp "$TMPDIR/$PROGRAM.toarchive.XXXXXX"`
|
||||||
|
OUT_FILE=$OLD_PWD # assume "this directory" without a name change by default
|
||||||
|
SEPARATE=0
|
||||||
|
|
||||||
|
FORMAT=tar
|
||||||
|
PREFIX=
|
||||||
|
TREEISH=HEAD
|
||||||
|
|
||||||
|
# RETURN VALUES/EXIT STATUS CODES
|
||||||
|
readonly E_BAD_OPTION=254
|
||||||
|
readonly E_UNKNOWN=255
|
||||||
|
|
||||||
|
# Process command-line arguments.
|
||||||
|
while test $# -gt 0; do
|
||||||
|
case $1 in
|
||||||
|
--format )
|
||||||
|
shift
|
||||||
|
FORMAT="$1"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
|
||||||
|
--prefix )
|
||||||
|
shift
|
||||||
|
PREFIX="$1"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
|
||||||
|
--separate | -s )
|
||||||
|
shift
|
||||||
|
SEPARATE=1
|
||||||
|
;;
|
||||||
|
|
||||||
|
--version )
|
||||||
|
version
|
||||||
|
exit
|
||||||
|
;;
|
||||||
|
|
||||||
|
-? | --usage | --help )
|
||||||
|
usage
|
||||||
|
exit
|
||||||
|
;;
|
||||||
|
|
||||||
|
-* )
|
||||||
|
echo "Unrecognized option: $1" >&2
|
||||||
|
usage
|
||||||
|
exit $E_BAD_OPTION
|
||||||
|
;;
|
||||||
|
|
||||||
|
* )
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ ! -z "$1" ]; then
|
||||||
|
OUT_FILE="$1"
|
||||||
|
shift
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Validate parameters; error early, error often.
|
||||||
|
if [ $SEPARATE -eq 1 -a ! -d $OUT_FILE ]; then
|
||||||
|
echo "When creating multiple archives, your destination must be a directory."
|
||||||
|
echo "If it's not, you risk being surprised when your files are overwritten."
|
||||||
|
exit
|
||||||
|
elif [ `git config -l | grep -q '^core\.bare=false'; echo $?` -ne 0 ]; then
|
||||||
|
echo "$PROGRAM must be run from a git working copy (i.e., not a bare repository)."
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create the superproject's git-archive
|
||||||
|
git archive --format=$FORMAT --prefix="$PREFIX" $TREEISH > $TMPDIR/$(basename $(pwd)).$FORMAT
|
||||||
|
echo $TMPDIR/$(basename $(pwd)).$FORMAT >| $TMPFILE # clobber on purpose
|
||||||
|
superfile=`head -n 1 $TMPFILE`
|
||||||
|
|
||||||
|
# find all '.git' dirs, these show us the remaining to-be-archived dirs
|
||||||
|
find . -name '.git' -type d -print | sed -e 's/^\.\///' -e 's/\.git$//' | grep -v '^$' >> $TOARCHIVE
|
||||||
|
|
||||||
|
while read path; do
|
||||||
|
TREEISH=$(git submodule | grep "^ .*${path%/} " | cut -d ' ' -f 2) # git-submodule does not list trailing slashes in $path
|
||||||
|
cd "$path"
|
||||||
|
git archive --format=$FORMAT --prefix="${PREFIX}$path" ${TREEISH:-HEAD} > "$TMPDIR"/"$(echo "$path" | sed -e 's/\//./g')"$FORMAT
|
||||||
|
if [ $FORMAT == 'zip' ]; then
|
||||||
|
# delete the empty directory entry; zipped submodules won't unzip if we don't do this
|
||||||
|
zip -d "$(tail -n 1 $TMPFILE)" "${PREFIX}${path%/}" >/dev/null # remove trailing '/'
|
||||||
|
fi
|
||||||
|
echo "$TMPDIR"/"$(echo "$path" | sed -e 's/\//./g')"$FORMAT >> $TMPFILE
|
||||||
|
cd "$OLD_PWD"
|
||||||
|
done < $TOARCHIVE
|
||||||
|
|
||||||
|
# Concatenate archives into a super-archive.
|
||||||
|
if [ $SEPARATE -eq 0 ]; then
|
||||||
|
if [ $FORMAT == 'tar' ]; then
|
||||||
|
sed -e '1d' $TMPFILE | while read file; do
|
||||||
|
tar --concatenate -f "$superfile" "$file" && rm -f "$file"
|
||||||
|
done
|
||||||
|
elif [ $FORMAT == 'zip' ]; then
|
||||||
|
sed -e '1d' $TMPFILE | while read file; do
|
||||||
|
# zip incorrectly stores the full path, so cd and then grow
|
||||||
|
cd `dirname "$file"`
|
||||||
|
zip -g "$superfile" `basename "$file"` && rm -f "$file"
|
||||||
|
done
|
||||||
|
cd "$OLD_PWD"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "$superfile" >| $TMPFILE # clobber on purpose
|
||||||
|
fi
|
||||||
|
|
||||||
|
while read file; do
|
||||||
|
mv "$file" "$OUT_FILE"
|
||||||
|
done < $TMPFILE
|
36
cmake_modules/run_test.cmake
Normal file
36
cmake_modules/run_test.cmake
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
# https://cmake.org/pipermail/cmake/2009-July/030788.html
|
||||||
|
#-------------------------------------------------
|
||||||
|
# some argument checking:
|
||||||
|
# test_cmd is the command to run with all its arguments
|
||||||
|
if( NOT test_cmd )
|
||||||
|
message( FATAL_ERROR "Variable test_cmd not defined" )
|
||||||
|
endif( NOT test_cmd )
|
||||||
|
|
||||||
|
# output_blessed contains the name of the "blessed" output file
|
||||||
|
if( NOT output_blessed )
|
||||||
|
message( FATAL_ERROR "Variable output_blessed not defined" )
|
||||||
|
endif( NOT output_blessed )
|
||||||
|
|
||||||
|
# output_test contains the name of the output file the test_cmd will produce
|
||||||
|
if( NOT output_test )
|
||||||
|
message( FATAL_ERROR "Variable output_test not defined" )
|
||||||
|
endif( NOT output_test )
|
||||||
|
|
||||||
|
# convert the space-separated string to a list
|
||||||
|
separate_arguments( test_args )
|
||||||
|
message( "ARGUMENTS: ${test_cmd} ${test_args}" )
|
||||||
|
|
||||||
|
execute_process(
|
||||||
|
COMMAND ${test_cmd} ${test_args}
|
||||||
|
OUTPUT_FILE ${output_test}
|
||||||
|
)
|
||||||
|
|
||||||
|
execute_process(
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E compare_files ${output_blessed} ${output_test}
|
||||||
|
RESULT_VARIABLE test_not_successful
|
||||||
|
)
|
||||||
|
|
||||||
|
if( test_not_successful )
|
||||||
|
message( SEND_ERROR "${output_test} does not match ${output_blessed}!" )
|
||||||
|
endif( test_not_successful )
|
||||||
|
#-------------------------------------------------
|
10
cmake_modules/test_compile_template.cmake
Normal file
10
cmake_modules/test_compile_template.cmake
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
include_directories(@TEST_INCLUDE_DIRS@)
|
||||||
|
try_compile(COMPILE_SUCCEEDED
|
||||||
|
${CMAKE_BINARY_DIR}/compile_tests
|
||||||
|
@COMPILE_SOURCE@
|
||||||
|
)
|
||||||
|
|
||||||
|
if(COMPILE_SUCCEEDED)
|
||||||
|
message("Success!")
|
||||||
|
else()
|
||||||
|
endif()
|
51
cmake_modules/test_macros.cmake
Normal file
51
cmake_modules/test_macros.cmake
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
SET(TEST_DIR ${CMAKE_BINARY_DIR}/_test_dir)
|
||||||
|
|
||||||
|
execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory ${TEST_DIR})
|
||||||
|
|
||||||
|
macro(ADD_FAILING_TEST NAME SOURCE_FILE )
|
||||||
|
set(NAME_BIN ${NAME}.exe)
|
||||||
|
|
||||||
|
add_executable(${NAME_BIN} ${SOURCE_FILE})
|
||||||
|
|
||||||
|
set_target_properties(${NAME_BIN} PROPERTIES
|
||||||
|
EXCLUDE_FROM_ALL TRUE
|
||||||
|
EXCLUDE_FROM_DEFAULT_BUILD TRUE)
|
||||||
|
|
||||||
|
add_test(NAME ${NAME}
|
||||||
|
COMMAND ${CMAKE_COMMAND} --build . --target ${NAME_BIN} --config $<CONFIGURATION>
|
||||||
|
WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
|
||||||
|
|
||||||
|
set_tests_properties(${NAME} PROPERTIES WILL_FAIL TRUE)
|
||||||
|
endmacro()
|
||||||
|
|
||||||
|
macro(add_test_to_run NAME bin)
|
||||||
|
add_test(NAME ${NAME} COMMAND ${CMAKE_CURRENT_BINARY_DIR}/${bin} WORKING_DIRECTORY ${TEST_DIR})
|
||||||
|
endmacro()
|
||||||
|
|
||||||
|
macro(add_direct_test NAME SOURCE_FILE)
|
||||||
|
set(NAME_BIN ${NAME}_exe)
|
||||||
|
|
||||||
|
add_executable(${NAME_BIN} ${SOURCE_FILE})
|
||||||
|
target_link_libraries(${NAME_BIN} test_library_LSS LSS ${LIBS})
|
||||||
|
|
||||||
|
add_test(NAME ${NAME} COMMAND ${CMAKE_CURRENT_BINARY_DIR}/${NAME_BIN} WORKING_DIRECTORY ${TEST_DIR})
|
||||||
|
endmacro()
|
||||||
|
|
||||||
|
macro(add_check_output_test NAME SOURCE_FILE ARG)
|
||||||
|
set(NAME_BIN ${NAME}_exe)
|
||||||
|
|
||||||
|
add_executable(${NAME_BIN} ${SOURCE_FILE})
|
||||||
|
target_link_libraries(${NAME_BIN} test_library_LSS LSS ${LIBS})
|
||||||
|
|
||||||
|
# The output must match
|
||||||
|
add_test(NAME ${NAME_BIN}.output
|
||||||
|
COMMAND ${CMAKE_COMMAND}
|
||||||
|
-D test_cmd=${CMAKE_CURRENT_BINARY_DIR}/${NAME_BIN}
|
||||||
|
-D test_args:string=${ARG}
|
||||||
|
-D output_blessed=${SOURCE_FILE}.expected
|
||||||
|
-D output_test=${TEST_DIR}/${NAME_BIN}.out
|
||||||
|
-P ${CMAKE_SOURCE_DIR}/cmake_modules/run_test.cmake
|
||||||
|
WORKING_DIRECTORY ${TEST_DIR})
|
||||||
|
|
||||||
|
endmacro()
|
||||||
|
|
40
codemeta.json
Normal file
40
codemeta.json
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
{
|
||||||
|
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
|
||||||
|
"@type": "SoftwareSourceCode",
|
||||||
|
"identifier": "ARES",
|
||||||
|
"description": "CodeMeta is a concept vocabulary that can be used to standardize the exchange of software metadata across repositories and organizations.",
|
||||||
|
"name": "ARES3: Algorithm for REconstruction and Sampling",
|
||||||
|
"codeRepository": "https://bitbucket.org/bayesian_lss_team/ares",
|
||||||
|
"issueTracker": "https://bitbucket.org/bayesian_lss_team/ares/issues",
|
||||||
|
"license": "https://spdx.org/licenses/CECILL-2.1",
|
||||||
|
"version": "0.1",
|
||||||
|
"author": [{
|
||||||
|
"@id": "https://orcid.org/0000-0003-0143-8891",
|
||||||
|
"@type": "Person",
|
||||||
|
"email": "guilhem.lavaux@iap.fr",
|
||||||
|
"givenName": "Guilhem",
|
||||||
|
"familyName": "Lavaux",
|
||||||
|
"affiliation": "CNRS / Sorbonne Université"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@id":"https://orcid.org/0000-0002-4677-5843",
|
||||||
|
"@type":"Person",
|
||||||
|
"email":"jens.jasche@fysik.su.se",
|
||||||
|
"givenName": "Jens",
|
||||||
|
"familyName": "Jasche",
|
||||||
|
"affiliation": "Stockholm University"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"copyrightHolder": {"Organization":"multiple"},
|
||||||
|
"creator": {},
|
||||||
|
"datePublished": "2018-06-16",
|
||||||
|
"dateModified": "2018-07-08",
|
||||||
|
"dateCreated": "2018-07-08",
|
||||||
|
"publisher": "",
|
||||||
|
"keywords": [],
|
||||||
|
"programmingLanguage" : {
|
||||||
|
"name":"C++"
|
||||||
|
},
|
||||||
|
"downloadUrl": "https://bitbucket.org/bayesian_lss_team/ares/get/cb741d97113e.zip",
|
||||||
|
"softwareRequirements": ["https://cmake.org/"]
|
||||||
|
}
|
1
docs/.gitignore
vendored
Normal file
1
docs/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
!source/user/building/
|
20
docs/Makefile
Normal file
20
docs/Makefile
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
# Minimal makefile for Sphinx documentation
|
||||||
|
#
|
||||||
|
|
||||||
|
# You can set these variables from the command line, and also
|
||||||
|
# from the environment for the first two.
|
||||||
|
SPHINXOPTS ?=
|
||||||
|
SPHINXBUILD ?= sphinx-build
|
||||||
|
SOURCEDIR = source
|
||||||
|
BUILDDIR = _build
|
||||||
|
|
||||||
|
# Put it first so that "make" without argument is like "make help".
|
||||||
|
help:
|
||||||
|
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||||
|
|
||||||
|
.PHONY: help Makefile
|
||||||
|
|
||||||
|
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||||
|
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
||||||
|
%: Makefile
|
||||||
|
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
9
docs/README.txt
Normal file
9
docs/README.txt
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
|
||||||
|
|
||||||
|
* Install python3 sphinx. You can use your favoured package manager (e.g. dnf install python3-sphinx)
|
||||||
|
|
||||||
|
* Install doxygen (dnf install doxygen)
|
||||||
|
|
||||||
|
* pip3 install --user -r requirements.txt
|
||||||
|
|
||||||
|
* "make html"
|
14
docs/build_python_doc.patch
Normal file
14
docs/build_python_doc.patch
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
diff --git a/docs/source/index.rst b/docs/source/index.rst
|
||||||
|
index 411d36bc..3fbb72e1 100644
|
||||||
|
--- a/docs/source/index.rst
|
||||||
|
+++ b/docs/source/index.rst
|
||||||
|
@@ -89,3 +89,9 @@ years:
|
||||||
|
~~~~~~~~~ subsubsections
|
||||||
|
^^^^^^^^^
|
||||||
|
'''''''''
|
||||||
|
+
|
||||||
|
+.. toctree::
|
||||||
|
+ :maxdepth: 1
|
||||||
|
+ :caption: Python reference documentation
|
||||||
|
+
|
||||||
|
+ pythonref.rst
|
35
docs/make.bat
Normal file
35
docs/make.bat
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
@ECHO OFF
|
||||||
|
|
||||||
|
pushd %~dp0
|
||||||
|
|
||||||
|
REM Command file for Sphinx documentation
|
||||||
|
|
||||||
|
if "%SPHINXBUILD%" == "" (
|
||||||
|
set SPHINXBUILD=sphinx-build
|
||||||
|
)
|
||||||
|
set SOURCEDIR=.
|
||||||
|
set BUILDDIR=_build
|
||||||
|
|
||||||
|
if "%1" == "" goto help
|
||||||
|
|
||||||
|
%SPHINXBUILD% >NUL 2>NUL
|
||||||
|
if errorlevel 9009 (
|
||||||
|
echo.
|
||||||
|
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
|
||||||
|
echo.installed, then set the SPHINXBUILD environment variable to point
|
||||||
|
echo.to the full path of the 'sphinx-build' executable. Alternatively you
|
||||||
|
echo.may add the Sphinx directory to PATH.
|
||||||
|
echo.
|
||||||
|
echo.If you don't have Sphinx installed, grab it from
|
||||||
|
echo.http://sphinx-doc.org/
|
||||||
|
exit /b 1
|
||||||
|
)
|
||||||
|
|
||||||
|
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
|
||||||
|
goto end
|
||||||
|
|
||||||
|
:help
|
||||||
|
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
|
||||||
|
|
||||||
|
:end
|
||||||
|
popd
|
BIN
docs/notes/lightcone_considerations_BLSS.pdf
Normal file
BIN
docs/notes/lightcone_considerations_BLSS.pdf
Normal file
Binary file not shown.
11
docs/outputs.rst
Normal file
11
docs/outputs.rst
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
|
||||||
|
# BORG output files
|
||||||
|
|
||||||
|
## hmc_performance.txt
|
||||||
|
|
||||||
|
[FS: in particular what the sign of Delta H is]
|
||||||
|
|
||||||
|
## mcmc_??.h5
|
||||||
|
|
||||||
|
## restart_??.h5
|
||||||
|
|
14
docs/requirements.txt
Normal file
14
docs/requirements.txt
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
exhale
|
||||||
|
sphinx_rtd_theme
|
||||||
|
sphinx==2.2.2
|
||||||
|
jinja2<3.0
|
||||||
|
ipython
|
||||||
|
nbsphinx
|
||||||
|
setuptools
|
||||||
|
disttools
|
||||||
|
sphinx_copybutton
|
||||||
|
pandoc
|
||||||
|
numpy
|
||||||
|
nbconvert
|
||||||
|
docutils<0.17
|
||||||
|
markupsafe==2.0.1
|
0
docs/source/_static/.empty
Normal file
0
docs/source/_static/.empty
Normal file
6
docs/source/_static/css/custom.css
Normal file
6
docs/source/_static/css/custom.css
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
.math {
|
||||||
|
text-align: left;
|
||||||
|
}
|
||||||
|
.eqno {
|
||||||
|
float: right;
|
||||||
|
}
|
7
docs/source/_templates/autosummary/base.rst
Normal file
7
docs/source/_templates/autosummary/base.rst
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
.. default-domain:: py
|
||||||
|
|
||||||
|
{{ name | escape | underline}}
|
||||||
|
|
||||||
|
.. currentmodule:: {{ module }}
|
||||||
|
|
||||||
|
.. auto{{ objtype }}:: {{ module }}.{{ objname }}
|
16
docs/source/_templates/autosummary/class.rst
Normal file
16
docs/source/_templates/autosummary/class.rst
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
.. default-domain:: py
|
||||||
|
|
||||||
|
{{ objname | escape | underline }}
|
||||||
|
|
||||||
|
.. currentmodule:: {{ module }}
|
||||||
|
|
||||||
|
.. autoclass:: {{ module }}.{{ objname }}
|
||||||
|
:members:
|
||||||
|
|
||||||
|
|
||||||
|
.. rubric:: Methods
|
||||||
|
|
||||||
|
.. autosummary::
|
||||||
|
{% for item in methods %}
|
||||||
|
~{{ module }}.{{ objname }}.{{ item }}
|
||||||
|
{%- endfor %}
|
38
docs/source/_templates/autosummary/module.rst
Normal file
38
docs/source/_templates/autosummary/module.rst
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
.. default-domain:: py
|
||||||
|
|
||||||
|
{{ fullname | escape | underline}}
|
||||||
|
|
||||||
|
.. automodule:: {{ fullname }}
|
||||||
|
|
||||||
|
{% block functions %}
|
||||||
|
{% if functions %}
|
||||||
|
.. rubric:: Functions
|
||||||
|
|
||||||
|
.. autosummary::
|
||||||
|
{% for item in functions %}
|
||||||
|
{{ item }}
|
||||||
|
{%- endfor %}
|
||||||
|
{% endif %}
|
||||||
|
{% endblock %}
|
||||||
|
|
||||||
|
{% block classes %}
|
||||||
|
{% if classes %}
|
||||||
|
.. rubric:: Classes
|
||||||
|
|
||||||
|
.. autosummary::
|
||||||
|
{% for item in classes %}
|
||||||
|
{{ item }}
|
||||||
|
{%- endfor %}
|
||||||
|
{% endif %}
|
||||||
|
{% endblock %}
|
||||||
|
|
||||||
|
{% block exceptions %}
|
||||||
|
{% if exceptions %}
|
||||||
|
.. rubric:: Exceptions
|
||||||
|
|
||||||
|
.. autosummary::
|
||||||
|
{% for item in exceptions %}
|
||||||
|
{{ item }}
|
||||||
|
{%- endfor %}
|
||||||
|
{% endif %}
|
||||||
|
{% endblock %}
|
3
docs/source/changes.rst
Normal file
3
docs/source/changes.rst
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
.. _CHANGES overview:
|
||||||
|
|
||||||
|
.. include:: ../../CHANGES.rst
|
128
docs/source/conf.py
Normal file
128
docs/source/conf.py
Normal file
@ -0,0 +1,128 @@
|
|||||||
|
# Configuration file for the Sphinx documentation builder.
|
||||||
|
#
|
||||||
|
# This file only contains a selection of the most common options. For a full
|
||||||
|
# list see the documentation:
|
||||||
|
# http://www.sphinx-doc.org/en/master/config
|
||||||
|
|
||||||
|
# -- Path setup --------------------------------------------------------------
|
||||||
|
|
||||||
|
# If extensions (or modules to document with autodoc) are in another directory,
|
||||||
|
# add these directories to sys.path here. If the directory is relative to the
|
||||||
|
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||||
|
#
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
# sys.path.insert(0, os.path.abspath('.'))
|
||||||
|
sys.path.append(os.path.abspath('../sphinx_ext/'))
|
||||||
|
import datetime
|
||||||
|
now = datetime.datetime.now()
|
||||||
|
year = '{:02d}'.format(now.year)
|
||||||
|
|
||||||
|
# -- Project information -----------------------------------------------------
|
||||||
|
extensions = [
|
||||||
|
'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.autosummary',
|
||||||
|
'sphinx.ext.napoleon', 'sphinx_rtd_theme', 'sphinx.ext.mathjax',
|
||||||
|
'sphinx.ext.todo', 'nbsphinx',
|
||||||
|
'IPython.sphinxext.ipython_console_highlighting', 'sphinx_copybutton',
|
||||||
|
'toctree_filter'
|
||||||
|
]
|
||||||
|
master_doc = 'index'
|
||||||
|
source_suffix = '.rst'
|
||||||
|
rst_prolog = '''
|
||||||
|
.. |a| replace:: *ARES*
|
||||||
|
'''
|
||||||
|
|
||||||
|
# General information about the project.
|
||||||
|
project = u'ARES-HADES-BORG'
|
||||||
|
author = u'the Aquila Consortium'
|
||||||
|
copyright = u"""
|
||||||
|
2009-""" + year + """, the Aquila Consortium
|
||||||
|
"""
|
||||||
|
#version = "latest"
|
||||||
|
|
||||||
|
autosummary_generate = True
|
||||||
|
|
||||||
|
todo_include_todos = True
|
||||||
|
|
||||||
|
# -- General configuration ---------------------------------------------------
|
||||||
|
|
||||||
|
# Add any Sphinx extension module names here, as strings. They can be
|
||||||
|
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||||
|
# ones.
|
||||||
|
#extensions = [
|
||||||
|
# 'breathe',
|
||||||
|
# 'exhale',
|
||||||
|
#]
|
||||||
|
|
||||||
|
nbsphinx_execute = 'never'
|
||||||
|
|
||||||
|
# Add any paths that contain templates here, relative to this directory.
|
||||||
|
templates_path = ['_templates']
|
||||||
|
|
||||||
|
# List of patterns, relative to source directory, that match files and
|
||||||
|
# directories to ignore when looking for source files.
|
||||||
|
# This pattern also affects html_static_path and html_extra_path.
|
||||||
|
|
||||||
|
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '**.inc.rst']
|
||||||
|
# Excluding the extension .inc.rst avoids compiling "included" rst file
|
||||||
|
# (otherwise the corresponding .html is produced) and avoids the "duplicate label"
|
||||||
|
# warning in case a label is found there (Florent Leclercq, 24-10-2020)
|
||||||
|
|
||||||
|
#html_extra_path = [os.path.abspath('../_build/html')]
|
||||||
|
|
||||||
|
# -- Options for HTML output -------------------------------------------------
|
||||||
|
|
||||||
|
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||||
|
# a list of builtin themes.
|
||||||
|
#
|
||||||
|
html_theme = 'sphinx_rtd_theme'
|
||||||
|
|
||||||
|
html_context = {
|
||||||
|
'theme_vcs_pageview_mode': 'view&spa=0'
|
||||||
|
}
|
||||||
|
|
||||||
|
# Add any paths that contain custom static files (such as style sheets) here,
|
||||||
|
# relative to this directory. They are copied after the builtin static files,
|
||||||
|
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||||
|
html_static_path = ['_static']
|
||||||
|
|
||||||
|
html_css_files = [
|
||||||
|
'css/custom.css',
|
||||||
|
]
|
||||||
|
|
||||||
|
# --- Breathe/Exhale options
|
||||||
|
|
||||||
|
breathe_projects = {"ARES libLSS": "./doxyoutput/xml"}
|
||||||
|
|
||||||
|
breathe_default_project = "ARES libLSS"
|
||||||
|
|
||||||
|
exhale_args = {
|
||||||
|
"containmentFolder": "./api",
|
||||||
|
"rootFileName": "library_root.rst",
|
||||||
|
"rootFileTitle": "Library API",
|
||||||
|
"doxygenStripFromPath": "..",
|
||||||
|
"createTreeView": True,
|
||||||
|
"exhaleExecutesDoxygen": True,
|
||||||
|
"exhaleUseDoxyfile": True
|
||||||
|
}
|
||||||
|
|
||||||
|
primary_domain = 'py'
|
||||||
|
highlight_language = 'py'
|
||||||
|
|
||||||
|
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
|
||||||
|
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
|
||||||
|
|
||||||
|
if not on_rtd: # only import and set the theme if we're building docs locally
|
||||||
|
import sphinx_rtd_theme
|
||||||
|
html_theme = 'sphinx_rtd_theme'
|
||||||
|
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
||||||
|
toc_filter_exclude = []
|
||||||
|
meta={"bitbucket_url": 'https://www.bitbucket.org/bayesian_lss_team/ares'}
|
||||||
|
bitbucket_url='https://www.bitbucket.org/bayesian_lss_team/ares'
|
||||||
|
else:
|
||||||
|
toc_filter_exclude = ["aquila"]
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
os.environ["ARES_BASE"] = os.path.abspath(os.path.join(os.getcwd(), ".."))
|
||||||
|
#subprocess.call('doxygen Doxyfile', shell=True)
|
@ -0,0 +1,695 @@
|
|||||||
|
Adding a new likelihood in C++
|
||||||
|
==============================
|
||||||
|
|
||||||
|
Steps to wire a C++ likelihood in hades3.
|
||||||
|
|
||||||
|
Preamble
|
||||||
|
--------
|
||||||
|
|
||||||
|
Forward models can self register now. Unfortunately likelihood cannot. So more
|
||||||
|
work is required. First one must think that there are three variants of
|
||||||
|
implementing a new likelihood. One of the three options are possible, depending
|
||||||
|
on the complexity and level of code reuse that is sought about (from more
|
||||||
|
abstract/more code-reuse to less abstract-more flexible):
|
||||||
|
|
||||||
|
1. rely on the generic framework (see
|
||||||
|
``extra/borg/libLSS/physics/likelihoods/gaussian.hpp`` for example)
|
||||||
|
2. use the base class of HADES
|
||||||
|
``extra/hades/libLSS/samplers/hades/base_likelihood.hpp``
|
||||||
|
3. implement a full likelihood from scratch
|
||||||
|
|
||||||
|
Use generic framework
|
||||||
|
---------------------
|
||||||
|
|
||||||
|
The generic framework provides more *turnkey* models at the price of
|
||||||
|
more programming abstraction.
|
||||||
|
|
||||||
|
*Warning! The following was written by Fabian. To be checked by
|
||||||
|
Guilhem.*
|
||||||
|
|
||||||
|
This works best by copying some existing classes using the generic
|
||||||
|
framework. The generic framework separates the posterior into "bias
|
||||||
|
model" and "likelihood", which then form a "bundle". Two basic working examples can be checked
|
||||||
|
to give a better impression:
|
||||||
|
|
||||||
|
- *bias:* e.g., ``extra/borg/libLSS/physics/bias/power_law.hpp`` (the Power law
|
||||||
|
bias model)
|
||||||
|
- *likelihood:* e.g., ``extra/borg/libLSS/physics/likelihoods/gaussian.hpp``
|
||||||
|
(the per voxel Gaussian likelihood)
|
||||||
|
|
||||||
|
Note that you do not need to recreate both likelihood and bias, if one is
|
||||||
|
sufficient for your needs (e.g., you can bundle a new bias model to an existing
|
||||||
|
likelihood). Of course, your classes can be defined with additional template
|
||||||
|
parameters, although we shall assume there are none here.
|
||||||
|
|
||||||
|
We will now see the three steps involved in the creation and link of a generic bias model.
|
||||||
|
|
||||||
|
Writing a bias model
|
||||||
|
~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
We will consider the noop (for no operation) bias model, which does nothing to
|
||||||
|
the input density contrast to demonstrate the steps involved in the modification
|
||||||
|
and development of a bias model. The full code is available in
|
||||||
|
``extra/borg/libLSS/physics/bias/noop.hpp``. The model requires an ample use of
|
||||||
|
templates. The reason is that a number of the exchanged arrays in the process
|
||||||
|
have very complicated types: they are not necessarily simple
|
||||||
|
``boost::multi_array_ref``, they can also be expressions. The advantage of using
|
||||||
|
expressions is the global reduction of the number of mathematical operations if
|
||||||
|
the data is masked, and the strong reduction of Input/Output memory operations,
|
||||||
|
which is generally a bottleneck in modern computers. The disadvantage is that
|
||||||
|
the compilation becomes longer and the compilation error may become obscure.
|
||||||
|
|
||||||
|
Here is a simplification of the NoopBias class (defined as a ``struct`` here which has a default visibility of public to all members):
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
struct Noop {
|
||||||
|
|
||||||
|
static constexpr const bool NmeanIsBias = true;
|
||||||
|
static const int numParams = 1;
|
||||||
|
|
||||||
|
selection::SimpleAdaptor selection_adaptor;
|
||||||
|
|
||||||
|
double nmean;
|
||||||
|
|
||||||
|
// Default constructor
|
||||||
|
Noop(LikelihoodInfo const& = LikelihoodInfo()) {}
|
||||||
|
|
||||||
|
// Setup the default bias parameters
|
||||||
|
template <typename B>
|
||||||
|
static inline void setup_default(B ¶ms) {}
|
||||||
|
|
||||||
|
// Prepare the bias model for computations
|
||||||
|
template <
|
||||||
|
class ForwardModel, typename FinalDensityArray,
|
||||||
|
typename BiasParameters, typename MetaSelect = NoSelector>
|
||||||
|
inline void prepare(
|
||||||
|
ForwardModel &fwd_model, const FinalDensityArray &final_density,
|
||||||
|
double const _nmean, const BiasParameters ¶ms,
|
||||||
|
bool density_updated, MetaSelect _select = MetaSelect()) {
|
||||||
|
nmean = params[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cleanup the bias model
|
||||||
|
void cleanup() {}
|
||||||
|
|
||||||
|
// This function is a relic required by the API. You can return 1 and it
|
||||||
|
// will be fine.
|
||||||
|
inline double get_linear_bias() const { return 1; }
|
||||||
|
|
||||||
|
// Check whether the given array like object passes the constraints of the bias model.
|
||||||
|
template <typename Array>
|
||||||
|
static inline bool check_bias_constraints(Array &&a) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute a tuple of biased densities. The computation may be lazy or not.
|
||||||
|
template <typename FinalDensityArray>
|
||||||
|
inline auto compute_density(const FinalDensityArray &array) {
|
||||||
|
return std::make_tuple(b_va_fused<double>(
|
||||||
|
[nmean](double delta) { return nmean*(1 + delta); }, array));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute a tuple of adjoint gradient on the biased densities.
|
||||||
|
template <
|
||||||
|
typename FinalDensityArray, typename TupleGradientLikelihoodArray>
|
||||||
|
inline auto apply_adjoint_gradient(
|
||||||
|
const FinalDensityArray &array,
|
||||||
|
TupleGradientLikelihoodArray grad_array) {
|
||||||
|
return std::make_tuple(b_va_fused<double>(
|
||||||
|
[](double g) { return g; },
|
||||||
|
std::move(std::get<0>(grad_array))));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
The bias model can be decomposed in:
|
||||||
|
|
||||||
|
1. a setup phase, with the constructor, the ``setup_default``, ``get_linear_bias``
|
||||||
|
2. a sanity check phase with ``check_bias_constraints``
|
||||||
|
3. a pre-computation, cleanup phase with ``prepare`` and ``cleanup``
|
||||||
|
4. the actual computation in ``compute_density`` and ``apply_adjoint_gradient``.
|
||||||
|
|
||||||
|
The life cycle of a computation is following roughly the above steps:
|
||||||
|
|
||||||
|
1. construct
|
||||||
|
2. setup
|
||||||
|
3. prepare computation
|
||||||
|
4. compute density
|
||||||
|
5. (optionally) compute adjoint gradient
|
||||||
|
6. cleanup
|
||||||
|
|
||||||
|
As you can see in the above most functions are templatized, for the reason
|
||||||
|
expressed before the code. As a reminder, the name of of each template indicated
|
||||||
|
after the keyword ``typename X`` indicates that we need a potentially different
|
||||||
|
type, which is discovered at the use of the specific function or class.
|
||||||
|
|
||||||
|
Let us focus on ``compute_density``:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
// Compute a tuple of biased densities. The computation may be lazy or not.
|
||||||
|
template <typename FinalDensityArray>
|
||||||
|
inline auto compute_density(const FinalDensityArray &array) {
|
||||||
|
return std::make_tuple(b_va_fused<double>(
|
||||||
|
[nmean](double delta) { return nmean*(1 + delta); }, array));
|
||||||
|
}
|
||||||
|
|
||||||
|
Conventionally, it accepts an object which must behave, **syntaxically**, like
|
||||||
|
an a ``boost::multi_array``. In case a concrete, memory-backed, array is needed,
|
||||||
|
one has to allocate it and copy the content of ``array`` to the newly allocated
|
||||||
|
array. The member function must return a tuple (type ``std::tuple<T1, T2,
|
||||||
|
...>``) of array-like objects. As this type is complicated, we leverage a C++14
|
||||||
|
feature which allows the compiler to decide the returned type of the function by
|
||||||
|
inspecting the value provided to ``return``. Here, this is the value returned by
|
||||||
|
``make_tuple``, which is built out of a single "fused" array. The fused array is
|
||||||
|
built out of a function that is evaluated for each element of the array provided
|
||||||
|
as a second argument to ``b_va_fused``. In practice if we call ``a`` that array,
|
||||||
|
the element at i, j, k is ``a[i][j][k]`` would be strictly equal to
|
||||||
|
``nmean*(1+delta[i][j][k])``.
|
||||||
|
|
||||||
|
Writing a likelihood model
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
|
||||||
|
Linking your bias/likelihood bundle to BORG
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Suppose then you have ``mybias.hpp``, ``mylike.hpp``, which define classes
|
||||||
|
``MyBias, MyLikelihood``. If you have encapsulated the classes in their
|
||||||
|
own namespace, make sure they are visible in the ``bias::`` namespace
|
||||||
|
(in case of MyBias) and the root namespace (in case of MyLikelihood). The
|
||||||
|
rationale behind that is to avoid polluting namespaces and avoid name collisions
|
||||||
|
while combining different headers/C++ modules.
|
||||||
|
|
||||||
|
1. each bias class has to declare the following two parameters in
|
||||||
|
``extra/borg/physics/bias/biases.cpp`` (which are defined in
|
||||||
|
``mybias.hpp``; make sure to also ``#include "mybias.hpp"``):
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
const int LibLSS::bias::mynamespace::MyBias::numParams;
|
||||||
|
const bool LibLSS::bias::mynamespace::EFTBias::NmeanIsBias;
|
||||||
|
|
||||||
|
2. Then, you have to *register your bundle:* in
|
||||||
|
``extra/hades/src/hades_bundle_init.hpp``, under
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
std::map<
|
||||||
|
std::string,
|
||||||
|
std::function<std::shared_ptr<VirtualGenericBundle>(
|
||||||
|
ptree &, std::shared_ptr<GridDensityLikelihoodBase<3>> &,
|
||||||
|
markov_ptr &, markov_ptr &, markov_ptr &,
|
||||||
|
std::function<MarkovSampler *(int, int)> &, LikelihoodInfo &)>>
|
||||||
|
generic_map{ // ...
|
||||||
|
|
||||||
|
add your bundle:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
{"MY_BIAS_LIKE", create_generic_bundle<bias::MyBias, MyLikelihood,ptree &>}
|
||||||
|
|
||||||
|
In addition, in
|
||||||
|
``extra/borg/libLSS/samplers/generic/impl_gaussian.cpp``, add
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
#include "mybias.hpp"
|
||||||
|
#include "mylike.hpp"
|
||||||
|
|
||||||
|
as well as
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
FORCE_INSTANCE(bias::MyBias, MyLikelihood, number_of_parameters);
|
||||||
|
|
||||||
|
where ``number_of_parameters`` stands for the number of free parameters
|
||||||
|
this bundle expects (i.e. bias as well as likelihood parameters). *(FS:
|
||||||
|
always impl\_gaussian?)*
|
||||||
|
|
||||||
|
*(FS: I am interpolating here...)* If on the other hand you want to
|
||||||
|
bundle your bias model with an existing likelihood, register it in
|
||||||
|
``extra/borg/src/bias_generator.cpp`` under
|
||||||
|
``LibLSS::setup_biased_density_generator``; e.g. for the Gaussian
|
||||||
|
likelihood:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
{"GAUSSIAN_MYBIAS",
|
||||||
|
mt(generate_biased_density<AdaptBias_Gauss<bias::MyBias>>, nullMapper)},
|
||||||
|
|
||||||
|
|
||||||
|
.. todo::
|
||||||
|
|
||||||
|
A global registry (like ``ForwardRegistry``) would be needed for this
|
||||||
|
mechanism as well. That would save compilation time and avoid modifying the
|
||||||
|
different bundles that rely on the generic framework.
|
||||||
|
|
||||||
|
Make an automatic test case
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
In order to enable the *gradient test* for your bias/likelihood combination, add
|
||||||
|
a section to ``extra/borg/libLSS/tests/borg_gradients.py_config``:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
'mybundle': {
|
||||||
|
'includes':
|
||||||
|
inc + [
|
||||||
|
"libLSS/samplers/generic/generic_hmc_likelihood.hpp",
|
||||||
|
"libLSS/physics/bias/mybias.hpp",
|
||||||
|
# FS: not sure how generic this is
|
||||||
|
"libLSS/physics/adapt_classic_to_gauss.hpp",
|
||||||
|
"libLSS/physics/likelihoods/mylike.hpp"
|
||||||
|
],
|
||||||
|
'likelihood':
|
||||||
|
'LibLSS::GenericHMCLikelihood<LibLSS::bias::MyBias, LibLSS::MyLikelihood>',
|
||||||
|
'model':
|
||||||
|
default_model,
|
||||||
|
'model_args': 'comm, box, 1e-5'
|
||||||
|
},
|
||||||
|
|
||||||
|
|
||||||
|
Define new configuration options
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
If you want to read **custom fields from the ini file**, you should edit
|
||||||
|
``extra/hades/src/likelihood_info.cpp``. Also, set default values in
|
||||||
|
``extra/hades/libLSS/tests/generic_gradient_test.cpp``;
|
||||||
|
``extra/hades/libLSS/tests/setup_hades_test_run.cpp``.
|
||||||
|
|
||||||
|
Bonus point: map the bundle to a forward model
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Since 2.1, all the bias generic models can be mapped to a standard
|
||||||
|
`BORGForwardModel`. The advantage is that they can be recombined in different
|
||||||
|
ways, and notably apply bias before applying specific transforms as redshift
|
||||||
|
space distortions.
|
||||||
|
|
||||||
|
This can be done easily by adding a new line in
|
||||||
|
``extra/borg/libLSS/physics/forwards/adapt_generic_bias.cpp`` in the function ``bias_registrator()``. Here is for
|
||||||
|
example the case of the linear bias model:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
ForwardRegistry::instance().registerFactory("bias::Linear", create_bias<bias::LinearBias>);
|
||||||
|
|
||||||
|
This call creates a new forward model element called ``bias::Linear`` which can
|
||||||
|
be created dynamically. The bias parameters through
|
||||||
|
``BORGForwardModel::setModelParams`` with the dictionnary entry
|
||||||
|
``biasParameters`` which must point to 1d ``boost::multi_array`` of the adequate
|
||||||
|
size. By default the adopted bias parameters are provided by the underlying
|
||||||
|
generic bias model class through ``setup_default()``.
|
||||||
|
|
||||||
|
Of course the amount of information that can be transferred is much more
|
||||||
|
limited. For example the bias model cannot at the moment produce more than one
|
||||||
|
field. All the others will be ignored. To do so would mean transforming the
|
||||||
|
forward model into an object with :math:`N` output pins (:math:`N\geq 2`).
|
||||||
|
|
||||||
|
As a final note, the forward model created that way becomes immediately
|
||||||
|
available in Python through the mechanism provided by
|
||||||
|
`:meth:aquila_borg.forward.models.newModel`. In C++ it can be accessed through the
|
||||||
|
``ForwardRegistry`` (defined in
|
||||||
|
``extra/hades/libLSS/physics/forwards/registry.hpp``).
|
||||||
|
|
||||||
|
Use HADES base class
|
||||||
|
--------------------
|
||||||
|
|
||||||
|
This framework assumes that the model is composed of a set of bias
|
||||||
|
coefficients in ``galaxy_bias_XXX`` (XXX being the number) and that the
|
||||||
|
likelihood only depends on the final matter state. An example of
|
||||||
|
likelihoods implemented on top of it is
|
||||||
|
``extra/hades/libLSS/samplers/hades/hades_linear_likelihood.cpp``, which
|
||||||
|
is a basic Gaussian likelihood.
|
||||||
|
|
||||||
|
The mechanism of applying selection effects is to be done by the new
|
||||||
|
implementation however.
|
||||||
|
|
||||||
|
With this framework one has to override a number of virtual functions. I
|
||||||
|
will discuss that on the specific case of the ``MyNewLikelihood`` which
|
||||||
|
will implement a very rudimentary Gaussian likelihood:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
class MyNewLikelihood : public HadesBaseDensityLikelihood {
|
||||||
|
public:
|
||||||
|
// Type alias for the supertype of this class
|
||||||
|
typedef HadesBaseDensityLikelihood super_t;
|
||||||
|
// Type alias for the supertype of the base class
|
||||||
|
typedef HadesBaseDensityLikelihood::super_t grid_t;
|
||||||
|
|
||||||
|
public:
|
||||||
|
// One has to define a constructor which takes a LikelihoodInfo.
|
||||||
|
MyNewLikelihood(LikelihoodInfo &info);
|
||||||
|
virtual ~MyNewLikelihood();
|
||||||
|
|
||||||
|
// This is called to setup the default bias parameters of a galaxy catalog
|
||||||
|
void setupDefaultParameters(MarkovState &state, int catalog) override;
|
||||||
|
|
||||||
|
// This is called when a mock catalog is required. The function
|
||||||
|
// receives the matter density from the forward model and the state
|
||||||
|
// that needs to be filled with mock data.
|
||||||
|
void
|
||||||
|
generateMockSpecific(ArrayRef const &matter_density, MarkovState &state) override;
|
||||||
|
|
||||||
|
// This evaluates the likelihood based solely on the matter field
|
||||||
|
// that is provided (as well as the eventual bias parameters). One
|
||||||
|
// cannot interrogate the forward model for more fields.
|
||||||
|
// This function must return the logarithm of the *negative* of log l
|
||||||
|
// likelihood
|
||||||
|
double logLikelihoodSpecific(ArrayRef const &matter_field) override;
|
||||||
|
|
||||||
|
// This computes the gradient of the function implemented in
|
||||||
|
// logLikelihoodSpecific
|
||||||
|
void gradientLikelihoodSpecific(
|
||||||
|
ArrayRef const &matter_field, ArrayRef &gradient_matter) override;
|
||||||
|
|
||||||
|
// This is called before having resumed or initialized the chain.
|
||||||
|
// One should create and allocate all auxiliary fields that are
|
||||||
|
// required to run the chain at that moment, and mark the fields
|
||||||
|
// of interest to be stored in the mcmc_XXXX.h5 files.
|
||||||
|
void initializeLikelihood(MarkovState &state) override;
|
||||||
|
};
|
||||||
|
|
||||||
|
The above declaration must go in a ``.hpp`` file such as
|
||||||
|
``my_new_likelihood.hpp``, that would be customary to be placed in
|
||||||
|
``libLSS/samplers/fancy_likelihood``. The source code itself will be
|
||||||
|
placed in ``my_new_likelihood.cpp`` in the same directory.
|
||||||
|
|
||||||
|
Constructor
|
||||||
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
The first function to implement is the constructor of the class.
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
MyNewLikelihood::MyNewLikelihood(LikelihoodInfo &info)
|
||||||
|
: super_t(info, 1 /* number of bias parameter */) {}
|
||||||
|
|
||||||
|
The constructor has to provide the ``info`` to the base class and
|
||||||
|
indicate the number of bias parameters that will be needed.
|
||||||
|
|
||||||
|
Setup default parameter
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
The second function allows the developer to fill up the default values
|
||||||
|
for bias parameters and other auxiliary parameters. They are auxiliary
|
||||||
|
with respect to the density field inference. In the Bayesian framework,
|
||||||
|
they are just regular parameters.
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
void MyNewLikelihood::setupDefaultParameters(MarkovState& state, int catalog) {
|
||||||
|
// Retrieve the bias array from the state dictionnary
|
||||||
|
// This return an "ArrayStateElement *" object
|
||||||
|
// Note that "formatGet" applies string formatting. No need to
|
||||||
|
// call boost::format.
|
||||||
|
auto bias = state.formatGet<ArrayType1d>("galaxy_bias_%d", catalog);
|
||||||
|
// This extracts the actually boost::multi_array from the state element.
|
||||||
|
// We take a reference here.
|
||||||
|
auto &bias_c = *bias->array;
|
||||||
|
// Similarly, if needed, we can retrieve the nmean
|
||||||
|
auto &nmean_c = state.formatGetScalar<double>("galaxy_nmean_%d", catalog);
|
||||||
|
|
||||||
|
// Now we can fill up the array and value.
|
||||||
|
bias_c[0] = 1.0;
|
||||||
|
nmean_c = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
Note in the above that we asked for ``auto&`` reference types for
|
||||||
|
``bias_c`` and ``nmean_c``. The ``auto`` asks the compiler to figure out
|
||||||
|
the type by itself. However it will not build a reference by default.
|
||||||
|
This is achieved by adding the ``&`` symbol. That way any value written
|
||||||
|
into this variable will be reflected in the original container. This
|
||||||
|
**would not** be the case without the reference. Also note that the
|
||||||
|
``galaxy_bias_%d`` is already allocated to hold the number of parameters
|
||||||
|
indicated to the constructor to the base class.
|
||||||
|
|
||||||
|
Initialize the likelihood
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
The initialization done by the base class already takes care of
|
||||||
|
allocating ``galaxy_bias_%d``, ``BORG_final_density``, checking on the
|
||||||
|
size of ``galaxy_data_%d``. One could then do the minimum amount of
|
||||||
|
work, i.e. not even override that function or putting a single statement
|
||||||
|
like this:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
void MyNewLikelihood::initializeLikelihood(MarkovState &state) {
|
||||||
|
super_t::initializeLikelihood(state);
|
||||||
|
}
|
||||||
|
|
||||||
|
If more fields are required to be saved/dumped and allocated, this would
|
||||||
|
otherwise be the perfect place for it. However keep in mind that it is
|
||||||
|
possible that the content of fields in ``MarkovState`` is not
|
||||||
|
initialized. You may rely on the info provided to the constructor in
|
||||||
|
``LikelihoodInfo`` for such cases.
|
||||||
|
|
||||||
|
Evaluate the log likelihood
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Now we arrive at the last piece. The class
|
||||||
|
``HadesBaseDensityLikelihood`` offers a great simplification compared to
|
||||||
|
recoding everything including the management of the forward model for
|
||||||
|
the evaluation of the log likelihood and its adjoint gradient.
|
||||||
|
|
||||||
|
.. warning::
|
||||||
|
|
||||||
|
The function is called logLikelihoodSpecific but it is actually the
|
||||||
|
negative of the log likelihood.
|
||||||
|
|
||||||
|
.. math:: \mathrm{logLikelihoodSpecific}(\delta_\mathrm{m}) = -\log \mathcal{L}(\delta_\mathrm{m})
|
||||||
|
|
||||||
|
This sign is for historical reason as the Hamiltonian Markov Chain
|
||||||
|
algorithm requires the gradient of that function to proceed.
|
||||||
|
|
||||||
|
**[FS: actually when using the generic framework, it seems
|
||||||
|
log\_probability actually returns log( P )...]**
|
||||||
|
|
||||||
|
As an example we will consider here the case of the Gaussian likelihood.
|
||||||
|
The noise in each voxel are all i.i.d. thus we can factorize the
|
||||||
|
likelihood into smaller pieces, one for each voxel:
|
||||||
|
|
||||||
|
.. math:: \mathcal{L}(\{N_{i,g}\}|\{\delta_{i,\text{m}}\}) = \prod \mathcal{L}(N_{i,g}|\delta_{i,\text{m}})
|
||||||
|
|
||||||
|
The likelihood for each voxel is:
|
||||||
|
|
||||||
|
.. math:: \mathcal{L}(N_g|\delta_\text{m},b,\bar{N}) \propto \frac{1}{\sqrt{R\bar{N}}} \exp\left(-\frac{1}{2 R\bar{N}} \left(N_g - R \bar{N}(1+b\delta_m\right)^2 \right)
|
||||||
|
|
||||||
|
We will implement that computation. The first function that we will
|
||||||
|
consider is the evaluation of the log likelihood itself.
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
double
|
||||||
|
MyNewLikelihood::logLikelihoodSpecific(ArrayRef const &delta) {
|
||||||
|
// First create a variable to accumulate the log-likelihood.
|
||||||
|
double logLikelihood = 0;
|
||||||
|
// Gather the information on the final output sizes of the gridded
|
||||||
|
// density.
|
||||||
|
// "model" is provided by the base class, which is of type
|
||||||
|
// std::shared_ptr<BORGForwardModel>, more details in the text
|
||||||
|
size_t const startN0 = model->out_mgr->startN0;
|
||||||
|
size_t const endN0 = startN0 + model->out_mgr->localN0;
|
||||||
|
size_t const N1 = model->out_mgr->N1;
|
||||||
|
size_t const N2 = model->out_mgr->N2;
|
||||||
|
|
||||||
|
// Now we may loop on all catalogs, "Ncat" is also provided
|
||||||
|
// by the base class as well as "sel_field", "nmean", "bias" and
|
||||||
|
// "data"
|
||||||
|
for (int c = 0; c < Ncat; c++) {
|
||||||
|
// This extract the 3d selection array of the catalog "c"
|
||||||
|
// The arrays follow the same scheme as "setupDefaultParameters"
|
||||||
|
auto &sel_array = *(sel_field[c]);
|
||||||
|
// Here we do not request a Read/Write access to nmean. We can copy
|
||||||
|
// the value which is more efficient.
|
||||||
|
double nmean_c = nmean[c];
|
||||||
|
double bias_c = (*(bias[c]))[0];
|
||||||
|
auto &data_c = *(data[c]);
|
||||||
|
|
||||||
|
// Once a catalog is selected we may start doing work on voxels.
|
||||||
|
// The openmp statement is to allow the collapse of the 3-loops
|
||||||
|
#pragma omp parallel for collapse(3) reduction(+:logLikelihood)
|
||||||
|
for (size_t n0 = startN0; n0 < endN0; n0++) {
|
||||||
|
for (size_t n1 = 0; n1 < N1; n1++) {
|
||||||
|
for (size_t n2 = 0; n2 < N2; n2++) {
|
||||||
|
// Grab the selection value in voxel n0xn1xn2
|
||||||
|
double selection = sel_array[n0][n1][n2];
|
||||||
|
|
||||||
|
// if the voxel is non-zero, it must be counted
|
||||||
|
if (selection > 0) {
|
||||||
|
double Nobs = data_c[n0][n1][n2];
|
||||||
|
// bias the matter field
|
||||||
|
double d_galaxy = bias_c * delta[n0][n1][n2];
|
||||||
|
|
||||||
|
// Here is the argument of the exponential
|
||||||
|
logLikelihood += square(selection * nmean_c * (1 + d_galaxy) - Nobs) /
|
||||||
|
(selection * nmean_c) + log(R nmean_c);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return logLikelihood;
|
||||||
|
}
|
||||||
|
|
||||||
|
This completes the likelihood. As one can see there is not much going
|
||||||
|
on. It is basically a sum of squared differences in a triple loop.
|
||||||
|
|
||||||
|
The adjoint gradient defined as
|
||||||
|
|
||||||
|
.. math:: \mathrm{adjoint\_gradient}(\delta_\mathrm{m}) = -\nabla \log \mathcal{L}(\delta_\mathrm{m})
|
||||||
|
|
||||||
|
follows the same logic, except that instead of a scalar, the function
|
||||||
|
returns a vector under the shape of a mesh. Note that ``ArrayRef`` is
|
||||||
|
actually a ``boost::multi_array_ref`` with the adequate type.
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
void MyNewLikelihood::gradientLikelihoodSpecific(
|
||||||
|
ArrayRef const &delta, ArrayRef &grad_array) {
|
||||||
|
// Grab the mesh description as for the likelihood
|
||||||
|
size_t const startN0 = model->out_mgr->startN0;
|
||||||
|
size_t const endN0 = startN0 + model->out_mgr->localN0;
|
||||||
|
size_t const N1 = model->out_mgr->N1;
|
||||||
|
size_t const N2 = model->out_mgr->N2;
|
||||||
|
|
||||||
|
// A shortcut to put zero in all entries of the array.
|
||||||
|
// "fwrap(array)" becomes a vectorized expression
|
||||||
|
fwrap(grad_array) = 0;
|
||||||
|
|
||||||
|
for (int c = 0; c < Ncat; c++) {
|
||||||
|
auto &sel_array = *(sel_field[c]);
|
||||||
|
auto &data_c = *(data[c]);
|
||||||
|
double bias_c = (*bias[c])[0];
|
||||||
|
double nmean_c = nmean[c];
|
||||||
|
|
||||||
|
#pragma omp parallel for collapse(3)
|
||||||
|
for (size_t n0 = startN0; n0 < endN0; n0++) {
|
||||||
|
for (size_t n1 = 0; n1 < N1; n1++) {
|
||||||
|
for (size_t n2 = 0; n2 < N2; n2++) {
|
||||||
|
double deltaElement = delta[n0][n1][n2];
|
||||||
|
double d_galaxy = bias_c * deltaElement;
|
||||||
|
double d_galaxy_prime = bias_c;
|
||||||
|
double response = sel_array[n0][n1][n2];
|
||||||
|
double Nobs = data_c[n0][n1][n2];
|
||||||
|
|
||||||
|
// If selection/mask is zero, we can safely skip that
|
||||||
|
// particular voxel. It will not produce any gradient value.
|
||||||
|
if (response == 0)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
// Otherwise, we accumulate the gradient
|
||||||
|
grad_array[n0][n1][n2] +=
|
||||||
|
(nmean_c * response * (1 + d_galaxy) - Nobs) * d_galaxy_prime
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Adding the code to the build infrastructure
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
If you are in the ``borg`` module, you must open the file named
|
||||||
|
``libLSS/borg.cmake``. It contains the instruction to compile the
|
||||||
|
``borg`` module into ``libLSS``. To do that it is sufficient to add the
|
||||||
|
new source files to the ``EXTRA_LIBLSS`` cmake variable. As one can see
|
||||||
|
from the cmake file there is a variable to indicate the directory of
|
||||||
|
``libLSS`` in ``borg``: it is called ``BASE_BORG_LIBLSS``. One can then
|
||||||
|
add the new source file like this:
|
||||||
|
|
||||||
|
.. code:: CMake
|
||||||
|
|
||||||
|
SET(EXTRA_LIBLSS ${EXTRA_LIBLSS}
|
||||||
|
${BASE_BORG_LIBLSS}/samplers/fancy_likelihood/my_new_likelihood.cpp
|
||||||
|
# The rest is left out only for the purpose of this documentation
|
||||||
|
)
|
||||||
|
|
||||||
|
Then the new file will be built into ``libLSS``.
|
||||||
|
|
||||||
|
Linking the new likelihood to hades
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
For this it is unfortunately necessary to hack into
|
||||||
|
``extra/hades/src/hades_bundle_init.hpp``, which holds the
|
||||||
|
initialization logic for ``hades3`` specific set of likelihood, bias,
|
||||||
|
and forward models. The relevant lines in the source code are the
|
||||||
|
following ones:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
if (lh_type == "LINEAR") {
|
||||||
|
bundle.hades_bundle = std::make_unique<LinearBundle>(like_info);
|
||||||
|
likelihood = bundle.hades_bundle->likelihood;
|
||||||
|
}
|
||||||
|
#ifdef HADES_SUPPORT_BORG
|
||||||
|
else if (lh_type == "BORG_POISSON") {
|
||||||
|
|
||||||
|
In the above ``lh_type`` is a ``std::string`` containing the value of
|
||||||
|
the field ``likelihood`` in the ini file. Here we check whether it is
|
||||||
|
``"LINEAR"`` or ``"BORG_POISSON"``.
|
||||||
|
|
||||||
|
To add a new likelihood ``"NEW_LIKELIHOOD"`` we shall add the following
|
||||||
|
lines:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
if (lh_type == "LINEAR") {
|
||||||
|
bundle.hades_bundle = std::make_unique<LinearBundle>(like_info);
|
||||||
|
likelihood = bundle.hades_bundle->likelihood;
|
||||||
|
}
|
||||||
|
#ifdef HADES_SUPPORT_BORG
|
||||||
|
else if (lh_type == "NEW_LIKELIHOOD") {
|
||||||
|
typedef HadesBundle<MyNewLikelihood> NewBundle;
|
||||||
|
bundle.hades_bundle = std::make_unique<NewBundle>(like_info);
|
||||||
|
likelihood = bundle.hades_bundle->likelihood;
|
||||||
|
}
|
||||||
|
else if (lh_type == "BORG_POISSON") {
|
||||||
|
|
||||||
|
while also adding
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
#include "libLSS/samplers/fancy_likelihood/my_new_likelihood.hpp"
|
||||||
|
|
||||||
|
towards the top of the file.
|
||||||
|
|
||||||
|
The above piece of code define a new bundle using the template class
|
||||||
|
``HadesBundle<T>``. ``T`` can be any class that derives from
|
||||||
|
``HadesBaseDensityLikelihood``. Then this bundle is constructed,
|
||||||
|
providing the likelihood info object in ``like_info``. Finally the built
|
||||||
|
likelihood object is copied into ``likelihood`` for further processing
|
||||||
|
by the rest of the code.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
If you need to query more parameters from the ini file (for example the
|
||||||
|
``[likelihood]`` section), you need to look for them using ``params``.
|
||||||
|
For example ``params.template get<float>("likelihood.k_max")`` will
|
||||||
|
retrieve a float value from the field ``k_max`` in ``[likelihood]``
|
||||||
|
section. You can then store it in ``like_info`` (which is a
|
||||||
|
`std::map <http://www.cplusplus.com/reference/map/map/>`__ in
|
||||||
|
practice)
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
like_info["good_k_max"] = params.template get<float>("likelihood.k_max");
|
||||||
|
|
||||||
|
In your constructor you can then retrieve the value from the new entry
|
||||||
|
as:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
boost::any_cast<float>(like_info["good_k_max"])
|
||||||
|
|
||||||
|
And now you are done! You can now set
|
||||||
|
``likelihood=NEW_LIKELIHOOD`` in the ini file and your new code will be
|
||||||
|
used by hades.
|
||||||
|
|
||||||
|
Implement from scratch
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
*to be written even later*
|
201
docs/source/developer/Code_tutorials/CPP_Multiarray.inc.rst
Normal file
201
docs/source/developer/Code_tutorials/CPP_Multiarray.inc.rst
Normal file
@ -0,0 +1,201 @@
|
|||||||
|
.. _multi_dimensional_array_management:
|
||||||
|
|
||||||
|
Multi-dimensional array management
|
||||||
|
==================================
|
||||||
|
|
||||||
|
Allocating arrays
|
||||||
|
-----------------
|
||||||
|
|
||||||
|
There are several ways of allocating multidimensional arrays dependent
|
||||||
|
on the effect that wants to be achieved.
|
||||||
|
|
||||||
|
.. _for_use_with_fftwmpi:
|
||||||
|
|
||||||
|
For use with FFTW/MPI
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
It is **strongly** recommended to use the class ``FFTW_Manager<T,N>``
|
||||||
|
(see documentation :ref:`here <fftw_manager>`, most of BORG is used assuming
|
||||||
|
that you have T=double, N=3; for 3D) to allocate arrays as the MPI and
|
||||||
|
FFTW needs some specific padding and over-allocation of memory which are
|
||||||
|
difficult to get right at first. Assuming ``mgr`` is such an object then
|
||||||
|
you can allocate an array like this:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
auto array_p = mgr.allocate_array();
|
||||||
|
auto& a = array_p.get_array();
|
||||||
|
|
||||||
|
// a is now a boost::multi_array_ref
|
||||||
|
for (int i = a0; i < a1; i++)
|
||||||
|
for (int j = b0; j < b1; j++)
|
||||||
|
for (int k = c0; k < c1; k++)
|
||||||
|
std::cout << "a has some value " << a[i][j][k] << std::endl;
|
||||||
|
|
||||||
|
With the above statement, keep in mind that the array will be destroyed
|
||||||
|
at the **exit of the context**. It is possible to have more permanent
|
||||||
|
arrays with the following statement:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
auto array_p = mgr.allocate_ptr_array();
|
||||||
|
auto& a = array_p->get_array();
|
||||||
|
|
||||||
|
// array_p is a shared_ptr that can be saved elsewhere
|
||||||
|
// a is now a boost::multi_array_ref
|
||||||
|
|
||||||
|
.. _uninitialized_array:
|
||||||
|
|
||||||
|
Uninitialized array
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Generally it is advised to allocate the array with the type
|
||||||
|
``LibLSS::U_Array<T,N>``. It creates an array that is a much faster to
|
||||||
|
initialize and statistics on memory allocation is gathered.
|
||||||
|
|
||||||
|
The typical usage is the following:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
using namespace LibLSS;
|
||||||
|
|
||||||
|
U_Array<double, 2> x_p(boost::extents[N][M]);
|
||||||
|
auto&x = x_p.get_array();
|
||||||
|
|
||||||
|
The line with ``U_Array`` will allocate the array (at the same time
|
||||||
|
gathering the statistics), the second line provides with you a
|
||||||
|
``boost::multi_array_ref`` object that can directly access all elements
|
||||||
|
as usual (see previous section).
|
||||||
|
|
||||||
|
.. _dumping_an_array_of_scalars:
|
||||||
|
|
||||||
|
Dumping an array of scalars
|
||||||
|
---------------------------
|
||||||
|
|
||||||
|
A significant amount of abstraction has been coded in to dump arrays
|
||||||
|
into HDF5 file the most painless possible. Typically to dump an array
|
||||||
|
you would have the following code.
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
#include <H5Cpp.h>
|
||||||
|
#include <CosmoTool/hdf5_array.hpp>
|
||||||
|
#include <boost/multi_array.hpp>
|
||||||
|
|
||||||
|
void myfunction() {
|
||||||
|
boost::multi_array<double, 2> a(boost::extents[10][4]);
|
||||||
|
|
||||||
|
// Do some initialization of a
|
||||||
|
|
||||||
|
{
|
||||||
|
// Open and truncate myfile.h5 (i.e. removes everything in it)
|
||||||
|
H5::H5File f("myfile.h5", H5F_ACC_TRUNC);
|
||||||
|
// Save 'a' into the dataset "myarray" in the file f.
|
||||||
|
CosmoTool::hdf5_write_array(f, "myarray", a);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
But you need to have your array either be a multi_array or mapped to it
|
||||||
|
through multi_array_ref. Usual types (float, double, int, ...) are
|
||||||
|
supported, as well as complex types of. There is also a mechanism to
|
||||||
|
allow for the
|
||||||
|
|
||||||
|
.. _fuse_array_mechanism:
|
||||||
|
|
||||||
|
FUSE array mechanism
|
||||||
|
--------------------
|
||||||
|
|
||||||
|
The FUSE subsystem is made available through the includes
|
||||||
|
libLSS/tools/fused_array.hpp, libLSS/tools/fuse_wrapper.hpp. They define
|
||||||
|
wrappers and operators to make the writing of expressions on array
|
||||||
|
relatively trivial, parallelized and possibly vectorized if the arrays
|
||||||
|
permit. To illustrate this there are two examples in the library of
|
||||||
|
testcases: test_fused_array.cpp and test_fuse_wrapper.cpp.
|
||||||
|
|
||||||
|
We will start from a most basic example:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
boost::multi_array<double, 1> a(boost::extents[N]);
|
||||||
|
auto w_a = LibLSS::fwrap(a);
|
||||||
|
|
||||||
|
w_a = 1;
|
||||||
|
|
||||||
|
These few lines create a one dimensional array of length N. Then this
|
||||||
|
array is wrapped in the seamless FUSE expression system. It is quite
|
||||||
|
advised to use auto here as the types can be complex and difficult to
|
||||||
|
guess for newcomers. Finally, the last line fills the array with value
|
||||||
|
1. This is a trivial example but we can do better:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
w_a = std::pow(std::cos(w_a*2*M_PI), 2);
|
||||||
|
|
||||||
|
This transforms the content of a by evaluating :math:`cos(2\pi x)^2` for
|
||||||
|
each element :math:`x` of the array wrapped in w_a. This is done without
|
||||||
|
copy using the lazy expression mechanism. It is possiible to save the
|
||||||
|
expression for later:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
auto b = std::pow(std::cos(w_a*2*M_PI), 2);
|
||||||
|
|
||||||
|
Note that nothing is evaluated. This only occurs at the assignment
|
||||||
|
phase. This wrap behaves also mostly like a virtual array:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
(*b)[i]
|
||||||
|
|
||||||
|
accesses computes the i-th value of the expression and nothing else.
|
||||||
|
|
||||||
|
Some other helpers in the libLSS supports natively the fuse mechanism.
|
||||||
|
That is the case for ``RandomNumber::poisson`` for example:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
auto c = fwrap(...);
|
||||||
|
c = rgen.poisson(b);
|
||||||
|
|
||||||
|
This piece of code would compute a poisson realization for a mean value
|
||||||
|
given by the element of the ``b`` expression (which must be a wrapped
|
||||||
|
array or one expression of it) and stores this into ``c``.
|
||||||
|
|
||||||
|
The ``sum`` reduce (parallel reduction) operation is supported by the
|
||||||
|
wrapper:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
double s = c.sum();
|
||||||
|
|
||||||
|
Some arrays could be entirely virtual, i.e. derived from C++
|
||||||
|
expressions. This needs to invoke a lower layer of the FUSE mechanism.
|
||||||
|
Creating a pure virtual array looks like that:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
auto d = LibLSS::fwrap(LibLSS::b_fused_idx<double, 2>(
|
||||||
|
[](size_t i, size_t j)->double {
|
||||||
|
return sqrt(i*i + j*j);
|
||||||
|
}
|
||||||
|
));
|
||||||
|
|
||||||
|
This operation creates a virtual array and wraps it immediately. The
|
||||||
|
virtual array is a double bidimensional array (the two template
|
||||||
|
parameters), and infinite. Its element are computed using the provided
|
||||||
|
lambda function, which obligatorily takes 2 parameters. It is possible
|
||||||
|
to make finite virtual arrays by adding an extent parameter:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
auto d = LibLSS::fwrap(LibLSS::b_fused_idx<double, 2>(
|
||||||
|
[](size_t i, size_t j)->double {
|
||||||
|
return sqrt(i*i + j*j);
|
||||||
|
},
|
||||||
|
boost::extents[N][N]
|
||||||
|
));
|
||||||
|
|
||||||
|
Only in that case it is possible to query the dimension of the array.
|
||||||
|
|
||||||
|
Finally **FUSED mechanism does not yet support automatic dimensional
|
||||||
|
broadcast!**
|
47
docs/source/developer/Code_tutorials/FFTW_manager.inc.rst
Normal file
47
docs/source/developer/Code_tutorials/FFTW_manager.inc.rst
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
.. _fftw_manager:
|
||||||
|
|
||||||
|
FFTW manager
|
||||||
|
============
|
||||||
|
|
||||||
|
Using FFTW, particularly with MPI, can be generally delicate and
|
||||||
|
requiring a lot of intermediate steps. A specific class was created to
|
||||||
|
handle a good fraction of this code pattern that are often used. The
|
||||||
|
class is named ``LibLSS::FFTW_Manager_3d`` and is defined in ``libLSS/tools/mpi_fftw_helper.hpp``. The class
|
||||||
|
is limited to the management of 3d transforms. A generalization for
|
||||||
|
:math:`N` dimensions is also available: ``LibLSS::FFTW_Manager<T,Nd>``.
|
||||||
|
We will only talk about that last generation here.
|
||||||
|
|
||||||
|
.. _initializing_the_manager:
|
||||||
|
|
||||||
|
Initializing the manager
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
The constructor is fairly straightforward to use. The constructor has
|
||||||
|
:math:`N+1` parameters, the first :math:`N` parameters are for
|
||||||
|
specificying the grid dimensions and the last one the MPI communicator.
|
||||||
|
|
||||||
|
.. _allocating_arrays:
|
||||||
|
|
||||||
|
Allocating arrays
|
||||||
|
-----------------
|
||||||
|
|
||||||
|
The manager provides a very quick way to allocate arrays that are padded
|
||||||
|
correctly and incorporates the appropriate limits for MPI. The two
|
||||||
|
functions are ``allocate_array()`` and ``allocate_complex_array()``. The
|
||||||
|
first one allocates the array with the real representation and the
|
||||||
|
second with the complex representation. The returned value are of the
|
||||||
|
type ``UnitializedArray``. A type usage is the following:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
FFTW_Manager<double, 3> mgr(N0, N1, N2, comm);
|
||||||
|
{
|
||||||
|
auto array = mgr.allocate_array();
|
||||||
|
auto& real_array = array.get_array();
|
||||||
|
|
||||||
|
real_array[i][j][k] = something;
|
||||||
|
// The array is totally destroyed when exiting here.
|
||||||
|
//
|
||||||
|
}
|
||||||
|
|
||||||
|
The array allocated that way are designed to be temporary.
|
@ -0,0 +1,415 @@
|
|||||||
|
.. _julia_and_tensorflow:
|
||||||
|
|
||||||
|
Julia and TensorFlow
|
||||||
|
====================
|
||||||
|
|
||||||
|
The ``julia`` language can be used within ``HADES``. It is automatically
|
||||||
|
installed if ``julia`` (at least ``v0.7.0``) is available on the machine
|
||||||
|
and if the ``hmclet`` is pulled into ``extra/``. Note that ``julia`` is
|
||||||
|
a relatively new language and develops quickly - it is also 1 indexed!
|
||||||
|
|
||||||
|
hmclet
|
||||||
|
------
|
||||||
|
|
||||||
|
At the moment, the ``julia`` core is available as part of ``hmclet`` - a
|
||||||
|
small HMC which can be used to sample external parameters, such as bias
|
||||||
|
parameters.
|
||||||
|
|
||||||
|
.. _jl_files:
|
||||||
|
|
||||||
|
.jl files
|
||||||
|
---------
|
||||||
|
|
||||||
|
The ``julia`` code is contained in ``.jl`` files which must contain
|
||||||
|
several things to be used by the ``hmclet``. An example of a linear bias
|
||||||
|
test likelihood can be found in ``extra/hmclet/example/test_like.jl``.
|
||||||
|
|
||||||
|
.. _initialisation_file:
|
||||||
|
|
||||||
|
Initialisation file
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
The ``.ini`` needs to have a few lines added to describe the ``julia``
|
||||||
|
file to use, the name of the module defined in the ``julia`` file and
|
||||||
|
whether to use a ``slice`` sampler or the ``hmclet``. They are added to
|
||||||
|
the ``.ini`` file as
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
[julia]
|
||||||
|
likelihood_path=test_like.jl
|
||||||
|
likelihood_module=julia_test
|
||||||
|
bias_sampler_type=hmclet
|
||||||
|
|
||||||
|
.. _module_name_and_importing_from_liblss:
|
||||||
|
|
||||||
|
Module name and importing from libLSS
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Each ``julia`` file must contain a module (whose name is entered in the
|
||||||
|
``.ini`` file)
|
||||||
|
|
||||||
|
.. code:: julia
|
||||||
|
|
||||||
|
module julia_test
|
||||||
|
|
||||||
|
To be able to import from libLSS (including the state and the print
|
||||||
|
functions) the ``julia`` module needs to contain the ``using``
|
||||||
|
statement, including the points.
|
||||||
|
|
||||||
|
.. code:: julia
|
||||||
|
|
||||||
|
using ..libLSS
|
||||||
|
|
||||||
|
import ..libLSS.State
|
||||||
|
import ..libLSS.GhostPlanes, ..libLSS.get_ghost_plane
|
||||||
|
import ..libLSS.print, ..libLSS.LOG_INFO, ..libLSS.LOG_VERBOSE, ..libLSS.LOG_DEBUG
|
||||||
|
|
||||||
|
The dots are necessary since the second point is to access the current
|
||||||
|
module and the first point is to access the higher level directory.
|
||||||
|
|
||||||
|
.. _importing_modules:
|
||||||
|
|
||||||
|
Importing modules
|
||||||
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Any other ``julia`` module can be included in this ``julia`` code by
|
||||||
|
using
|
||||||
|
|
||||||
|
.. code:: julia
|
||||||
|
|
||||||
|
using MyModule
|
||||||
|
|
||||||
|
where ``MyModule`` can be self defined or installed before calling in
|
||||||
|
HADES using
|
||||||
|
|
||||||
|
.. code:: julia
|
||||||
|
|
||||||
|
using Pkg
|
||||||
|
Pkg.add("MyModule")
|
||||||
|
|
||||||
|
in a ``julia`` terminal.
|
||||||
|
|
||||||
|
.. _necessary_functions:
|
||||||
|
|
||||||
|
Necessary functions
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
A bunch of different functions are necessary in the ``julia`` code to be
|
||||||
|
used in the ``hmclet``. These are:
|
||||||
|
|
||||||
|
.. code:: julia
|
||||||
|
|
||||||
|
function initialize(state)
|
||||||
|
print(LOG_INFO, "Likelihood initialization in Julia")
|
||||||
|
# This is where hmclet parameters can be initialised in the state
|
||||||
|
NCAT = libLSS.get(state, "NCAT", Int64, synchronous=true) # Number of catalogs
|
||||||
|
number_of_parameters = 2 # Number of parameters
|
||||||
|
for i=1:NCAT
|
||||||
|
hmclet_parameters = libLSS.resize_array(state, "galaxy_bias_"*repr(i - 1), number_of_parameters, Float64)
|
||||||
|
hmclet_parameters[:] = 1
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
function get_required_planes(state::State)
|
||||||
|
print(LOG_INFO, "Check required planes")
|
||||||
|
# This is where the planes are gathered when they live on different mpi nodes
|
||||||
|
return Array{UInt64,1}([])
|
||||||
|
end
|
||||||
|
|
||||||
|
function likelihood(state::State, ghosts::GhostPlanes, array::AbstractArray{Float64,3})
|
||||||
|
print(LOG_INFO, "Likelihood evaluation in Julia")
|
||||||
|
# Here is where the likelihood is calculated and returned.
|
||||||
|
# This can be a call to likelihood_bias() which is also a necessary function
|
||||||
|
NCAT = libLSS.get(state, "NCAT", Int64, synchronous=true)
|
||||||
|
L = Float64(0.)
|
||||||
|
for i=1:NCAT
|
||||||
|
hmclet_parameters = libLSS.get_array_1d(state, "galaxy_bias_"*repr(i - 1), Float64)
|
||||||
|
L += likelihood_bias(state, ghosts, array, i, hmclet_parameters)
|
||||||
|
end
|
||||||
|
return L
|
||||||
|
end
|
||||||
|
|
||||||
|
function generate_mock_data(state::State, ghosts::GhostPlanes, array::AbstractArray{Float64,3})
|
||||||
|
print(LOG_INFO, "Generate mock")
|
||||||
|
# Mock data needs to be generated also
|
||||||
|
NCAT = libLSS.get(state, "NCAT", Int64, synchronous=true)
|
||||||
|
for i=1:NCAT
|
||||||
|
data = libLSS.get_array_3d(state, "galaxy_data_"*sc, Float64)
|
||||||
|
generated_data = function_to_generate_data() # We can use other functions which are defined within the julia module
|
||||||
|
for i=1:size(data)[1],j=1:size(data)[2],k=1:size(data)[3]
|
||||||
|
data[i, j, k] = generated_data[i, j, k] + libLSS.gaussian(state) # We can use functions defined in libLSS
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
function adjoint_gradient(state::State, array::AbstractArray{Float64,3}, ghosts::GhostPlanes, ag::AbstractArray{Float64,3})
|
||||||
|
print(LOG_VERBOSE, "Adjoint gradient in Julia")
|
||||||
|
# The gradient of the likelihood with respect to the input array
|
||||||
|
NCAT = libLSS.get(state, "NCAT", Int64, synchronous=true)
|
||||||
|
ag[:,:,:] .= 0 # Watch out - this . before the = is necessary... extremely necessary!
|
||||||
|
for i=1:NCAT
|
||||||
|
# Calculate the adjoint gradient here and update ag
|
||||||
|
# Make sure not to update any gradients which are not in the selection
|
||||||
|
selection = libLSS.get_array_3d(state, "galaxy_sel_window_"*repr(i - 1), Float64)
|
||||||
|
mask = selection .> 0
|
||||||
|
adjoint_gradient = function_to_calculate_adjoint_gradient()
|
||||||
|
ag[mask] += adjoint_gradient[mask]
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
function likelihood_bias(state::State, ghosts::GhostPlanes, array, catalog_id, catalog_bias_tilde)
|
||||||
|
# The likelihood after biasing the input array
|
||||||
|
L = function_to_calculate_likelihood()
|
||||||
|
return L
|
||||||
|
end
|
||||||
|
|
||||||
|
function get_step_hint(state, catalog_id, bias_id)
|
||||||
|
# Guess for the initialisation of the hmclet mass matrix or the slice sample step size
|
||||||
|
return 0.1
|
||||||
|
end
|
||||||
|
|
||||||
|
function log_prior_bias(state, catalog_id, bias_tilde)
|
||||||
|
# Prior for the bias parameters
|
||||||
|
return 0.
|
||||||
|
end
|
||||||
|
|
||||||
|
function adjoint_bias(state::State, ghosts::GhostPlanes, array, catalog_id, catalog_bias_tilde, adjoint_gradient_bias)
|
||||||
|
# Calculate the gradient of the likelihood with respect to the parameters in the hmclet
|
||||||
|
adjoint_gradient_bias[:] .= function_to_calculate_gradient_with_respect_to_bias()
|
||||||
|
end
|
||||||
|
|
||||||
|
.. _tensorflow_in_julia:
|
||||||
|
|
||||||
|
TensorFlow in julia
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
One amazing advantage of having ``julia`` built into ``HADES`` is that
|
||||||
|
we can now use ``TensorFlow``. ``TensorFlow`` is a very powerful tensor
|
||||||
|
based computational language which has the exact same syntax for running
|
||||||
|
on GPUs and CPUs. The version of ``TensorFlow.jl`` is not officially
|
||||||
|
supported, but is relatively well maintained, although it is based on
|
||||||
|
``v1.4`` whilst the current version is well beyond that. One can use a
|
||||||
|
newer vesion of ``TensorFlow`` by installing it from source and placing
|
||||||
|
it in the ``julia`` ``TensorFlow`` directory, however doing this does
|
||||||
|
not give you access to all the commands available in ``TensorFlow``. For
|
||||||
|
example, ``TensorFlow.subtract()`` and ``TensorFlow.divide()`` do not
|
||||||
|
exist. Fortunately, a lot of ``julia`` functions work on ``TensorFlow``
|
||||||
|
tensors (such as ``-``, ``.-``, ``/`` and ``./``).
|
||||||
|
|
||||||
|
There is a ``TensorFlow`` implementation of ``test_like.jl`` (discussed
|
||||||
|
above) in ``extra/hmclet/example/test_like_TF.jl``.
|
||||||
|
|
||||||
|
The essence of ``TensorFlow`` is to build a graph of tensors connected
|
||||||
|
by computations. Once the graph is built then results are accessed by
|
||||||
|
passing values through the graph. An example graph could be:
|
||||||
|
|
||||||
|
.. code:: julia
|
||||||
|
|
||||||
|
using TensorFlow
|
||||||
|
using Distributions # To be used for initialising variable values
|
||||||
|
|
||||||
|
= TensorFlow.placeholder(Float64, shape = [100, 1], name = "a") # This is a tensor which contains no value and has a shape
|
||||||
|
# of [100, 1]
|
||||||
|
b = TensorFlow.placeholder(Float64, shape = (), name = "b") # This is a tensor which contains no value or shape
|
||||||
|
|
||||||
|
c = TensorFlow.placeholder(Float64, shape = [1, 10], name = "c") # This is a tensor which has no value and has a shape of [1, 10]
|
||||||
|
|
||||||
|
variable_scope("RandomVariable"; initializer=Normal(0., 0.1)) do
|
||||||
|
global d = TensorFlow.get_variable("d", Int64[10], Float64) # This is a variable tensor which can be initialised to a value
|
||||||
|
end # and has a shape of [10]. It must be global so it has maintains
|
||||||
|
# outside of the scope
|
||||||
|
e = TensorFlow.constant(1.:10., dtype = Float64, name = "e") # This is a tensor of constant value with shape [10]
|
||||||
|
|
||||||
|
f = TensorFlow.matmul(a, c, name = "f") # Matrix multiplication of a and c with output shape [100, 10]
|
||||||
|
|
||||||
|
#g = TensorFlow.matmul(b, c, name = "g") # Matrix multiplication of b and c
|
||||||
|
# !THIS WILL FAIL SINCE b HAS NO SHAPE! Instead one can use
|
||||||
|
g = TensorFlow.identity(b .* c, name = "g") # Here we make use of the overload matrix multiplication
|
||||||
|
# function in julia, the tensor will say it has shape [1, 10]
|
||||||
|
# but this might not be true. We use identity() to give the
|
||||||
|
# tensor a name.
|
||||||
|
|
||||||
|
h = TensorFlow.add(f, e, name = "h") # Addition of f and e
|
||||||
|
|
||||||
|
i = TensorFlow.identity(f - e, name = "i") # Subtraction of f and e
|
||||||
|
|
||||||
|
j = TensorFlow.identity(f / e, name = "j") # Matrix division of f and e
|
||||||
|
|
||||||
|
k = TensorFlow.identity(j ./ i, name = "k") # Elementwise division of j by i
|
||||||
|
|
||||||
|
We now have lots of tensors defined, but notice that these are tensors
|
||||||
|
and are not available as valued quantities until they are run. For
|
||||||
|
example running these tensors gives
|
||||||
|
|
||||||
|
.. code:: julia
|
||||||
|
|
||||||
|
a
|
||||||
|
> <Tensor a:1 shape=(100, 1) dtype=Float64>
|
||||||
|
b
|
||||||
|
> <Tensor b:1 shape=() dtype=Float64> # Note this is not the real shape of this tensor
|
||||||
|
c
|
||||||
|
> <Tensor c:1 shape=(1, 10) dtype=Float64>
|
||||||
|
d
|
||||||
|
> <Tensor d:1 shape=(10) dtype=Float64>
|
||||||
|
e
|
||||||
|
> <Tensor e:1 shape=(10) dtype=Float64>
|
||||||
|
f
|
||||||
|
> <Tensor f:1 shape=(100, 10) dtype=Float64>
|
||||||
|
g
|
||||||
|
> <Tensor g:1 shape=(1, 10) dtype=Float64> # Note this is not the real shape of this tensor either
|
||||||
|
h
|
||||||
|
> <Tensor h:1 shape=(100, 10) dtype=Float64>
|
||||||
|
i
|
||||||
|
> <Tensor i:1 shape=(100, 10) dtype=Float64>
|
||||||
|
j
|
||||||
|
> <Tensor j:1 shape=(100, 10) dtype=Float64>
|
||||||
|
k
|
||||||
|
> <Tensor k:1 shape=(100, 10) dtype=Float64>
|
||||||
|
|
||||||
|
To actually run any computations a session is needed
|
||||||
|
|
||||||
|
.. code:: julia
|
||||||
|
|
||||||
|
sess = Session(allow_growth = true)
|
||||||
|
|
||||||
|
The ``allow_growth`` option prevents ``TensorFlow`` for taking up the
|
||||||
|
entire memory of a GPU.
|
||||||
|
|
||||||
|
Any constant value tensors can now be accessed by running the tensor in
|
||||||
|
the session
|
||||||
|
|
||||||
|
.. code:: julia
|
||||||
|
|
||||||
|
run(sess, TensorFlow.get_tensor_by_name("e"))
|
||||||
|
> 10-element Array{Float64,1}:
|
||||||
|
> 1.0
|
||||||
|
> 2.0
|
||||||
|
> 3.0
|
||||||
|
> 4.0
|
||||||
|
> 5.0
|
||||||
|
> 6.0
|
||||||
|
> 7.0
|
||||||
|
> 8.0
|
||||||
|
> 9.0
|
||||||
|
> 10.0
|
||||||
|
run(sess, e)
|
||||||
|
> 10-element Array{Float64,1}:
|
||||||
|
> 1.0
|
||||||
|
> 2.0
|
||||||
|
> 3.0
|
||||||
|
> 4.0
|
||||||
|
> 5.0
|
||||||
|
> 6.0
|
||||||
|
> 7.0
|
||||||
|
> 8.0
|
||||||
|
> 9.0
|
||||||
|
> 10.0
|
||||||
|
|
||||||
|
Notice how we can call the tensor by its name in the graph (which is the
|
||||||
|
proper way to do things) or by its variable name. If we want to call an
|
||||||
|
output to a computation we need to supply all necessary input tensors
|
||||||
|
|
||||||
|
.. code:: julia
|
||||||
|
|
||||||
|
distribution = Normal()
|
||||||
|
onehundredbyone = reshape(rand(distribution, 100), (100, 1))
|
||||||
|
onebyten = reshape(rand(distribution, 10), (1, 10))
|
||||||
|
|
||||||
|
run(sess, TensorFlow.get_tensor_by_name("f"), Dict(TensorFlow.get_tensor_by_name("a")=>onehundredbyone, TensorFlow.get_tensor_by_name("c")=>onebyten))
|
||||||
|
> 100×10 Array{Float64,2}:
|
||||||
|
> ... ...
|
||||||
|
run(sess, f, Dict(a=>onehundredbyone, c=>onebyten))
|
||||||
|
> 100×10 Array{Float64,2}:
|
||||||
|
> ... ...
|
||||||
|
run(sess, TensorFlow.get_tensor_by_name("k"), Dict(TensorFlow.get_tensor_by_name("a")=>onehundredbyone, TensorFlow.get_tensor_by_name("c")=>onebyten))
|
||||||
|
> 100×10 Array{Float64,2}:
|
||||||
|
> ... ...
|
||||||
|
run(sess, k, Dict(a=>onehundredbyone, c=>onebyten))
|
||||||
|
> 100×10 Array{Float64,2}:
|
||||||
|
> ... ...
|
||||||
|
|
||||||
|
Any unknown shape tensor needs to be fed in with the correct shape, but
|
||||||
|
can in principle be any shape. If there are any uninitialised values in
|
||||||
|
the graph they need initialising otherwise the code will output an error
|
||||||
|
|
||||||
|
.. code:: julia
|
||||||
|
|
||||||
|
run(sess, TensorFlow.get_tensor_by_name("RandomVariable/d"))
|
||||||
|
> Tensorflow error: Status: Attempting to use uninitialized value RandomVariable/d
|
||||||
|
|
||||||
|
Notice that the variable built within ``variable_scope`` has the scope
|
||||||
|
name prepended to the tensor name. The initialisation of the tensor can
|
||||||
|
be done with ``TensorFlow.global_variables_initializer()``:
|
||||||
|
|
||||||
|
.. code:: julia
|
||||||
|
|
||||||
|
run(sess, TensorFlow.global_variables_initializer())
|
||||||
|
|
||||||
|
Once this has been run then tensor ``d`` will have a value. This value
|
||||||
|
can only be accessed by running the tensor in the session
|
||||||
|
|
||||||
|
.. code:: julia
|
||||||
|
|
||||||
|
run(sess, TensorFlow.get_tensor_by_name("RandomVariable/d"))
|
||||||
|
> 1×10 Array{Float64,2}:
|
||||||
|
> 0.0432947 -0.208361 0.0554441 … -0.017653 -0.0239981 -0.0339648
|
||||||
|
run(sess, d)
|
||||||
|
> 1×10 Array{Float64,2}:
|
||||||
|
> 0.0432947 -0.208361 0.0554441 … -0.017653 -0.0239981 -0.0339648
|
||||||
|
|
||||||
|
This is a brief overview of how to use ``TensorFlow``. The ``HADES``
|
||||||
|
``hmclet`` likelihood code sets up all of the graph in the
|
||||||
|
initialisation phase
|
||||||
|
|
||||||
|
.. code:: julia
|
||||||
|
|
||||||
|
function setup(N0, N1, N2)
|
||||||
|
global adgrad, wgrad
|
||||||
|
p = [TensorFlow.placeholder(Float64, shape = (), name = "bias"), TensorFlow.placeholder(Float64, shape = (), name = "noise")]
|
||||||
|
δ = TensorFlow.placeholder(Float64, shape = Int64[N0, N1, N2], name = "density")
|
||||||
|
g = TensorFlow.placeholder(Float64, shape = Int64[N0, N1, N2], name = "galaxy")
|
||||||
|
s = TensorFlow.placeholder(Float64, shape = Int64[N0, N1, N2], name = "selection")
|
||||||
|
gaussian = TensorFlow.placeholder(Float64, shape = Int64[N0, N1, N2], name = "gaussian_field")
|
||||||
|
mask = TensorFlow.placeholder(Bool, shape = Int64[N0, N1, N2], name = "mask")
|
||||||
|
mask_ = TensorFlow.reshape(mask, N0 * N1 * N2, name = "flat_mask")
|
||||||
|
g_ = TensorFlow.identity(TensorFlow.boolean_mask(TensorFlow.reshape(g, N0 * N1 * N2), mask_), name = "flat_masked_galaxy")
|
||||||
|
s_ = TensorFlow.identity(TensorFlow.boolean_mask(TensorFlow.reshape(s, N0 * N1 * N2), mask_), name = "flat_masked_selection")
|
||||||
|
output = TensorFlow.add(1., TensorFlow.multiply(p[1], δ), name = "biased_density")
|
||||||
|
mock = TensorFlow.multiply(s, output, name = "selected_biased_density")
|
||||||
|
mock_ = TensorFlow.identity(TensorFlow.boolean_mask(TensorFlow.reshape(mock, N0 * N1 * N2), mask_), name = "flat_masked_selected_biased_density")
|
||||||
|
mock_galaxy = TensorFlow.add(mock, TensorFlow.multiply(TensorFlow.multiply(TensorFlow.sqrt(TensorFlow.exp(p[2])), s), gaussian), name = "mock_galaxy")
|
||||||
|
ms = TensorFlow.reduce_sum(TensorFlow.cast(mask, Float64), name = "number_of_voxels")
|
||||||
|
loss = TensorFlow.identity(TensorFlow.add(TensorFlow.multiply(0.5, TensorFlow.reduce_sum(TensorFlow.square(g_ - mock_) / TensorFlow.multiply(TensorFlow.exp(p[2]), s_))), TensorFlow.multiply(0.5, TensorFlow.multiply(ms, p[2]))) - TensorFlow.exp(p[1]) - TensorFlow.exp(p[2]), name = "loss")
|
||||||
|
adgrad = TensorFlow.gradients(loss, δ)
|
||||||
|
wgrad = [TensorFlow.gradients(loss, p[i]) for i in range(1, length = size(p)[1])]
|
||||||
|
end
|
||||||
|
|
||||||
|
Notice here that in ``TensorFlow``, the gradients are \*super\* easy to
|
||||||
|
calculate since it amounts to a call to ``TensorFlow.gradients(a, b)``
|
||||||
|
which is equivalent to da/db (its actually sum(da/db) so sometimes you
|
||||||
|
have to do a bit more leg work.
|
||||||
|
|
||||||
|
Now, whenever the likelihood needs to be calculated whilst running
|
||||||
|
``HADES`` the syntax is a simple as
|
||||||
|
|
||||||
|
.. code:: julia
|
||||||
|
|
||||||
|
function likelihood(state::State, ghosts::GhostPlanes, array::AbstractArray{Float64,3})
|
||||||
|
print(LOG_INFO, "Likelihood evaluation in Julia")
|
||||||
|
L = Float64(0.)
|
||||||
|
for catalog=1:libLSS.get(state, "NCAT", Int64, synchronous=true)
|
||||||
|
L += run(sess, TensorFlow.get_tensor_by_name("loss"),
|
||||||
|
Dict(TensorFlow.get_tensor_by_name("bias")=>libLSS.get_array_1d(state, "galaxy_bias_"*repr(catalog - 1), Float64)[1],
|
||||||
|
TensorFlow.get_tensor_by_name("noise")=>libLSS.get_array_1d(state, "galaxy_bias_"*repr(catalog - 1), Float64)[2],
|
||||||
|
TensorFlow.get_tensor_by_name("density")=>array,
|
||||||
|
TensorFlow.get_tensor_by_name("galaxy")=>libLSS.get_array_3d(state, "galaxy_data_"*repr(catalog - 1), Float64),
|
||||||
|
TensorFlow.get_tensor_by_name("selection")=>libLSS.get_array_3d(state, "galaxy_sel_window_"*repr(catalog - 1), Float64),
|
||||||
|
TensorFlow.get_tensor_by_name("mask")=>libLSS.get_array_3d(state, "galaxy_sel_window_"*repr(catalog - 1), Float64).>0.))
|
||||||
|
end
|
||||||
|
print(LOG_VERBOSE, "Likelihood is " * repr(L))
|
||||||
|
return L
|
||||||
|
end
|
||||||
|
|
||||||
|
If ``TensorFlow`` is installed to use the GPU, then this code will
|
||||||
|
automatically distribute to the GPU.
|
135
docs/source/developer/Code_tutorials/New_core_program.inc.rst
Normal file
135
docs/source/developer/Code_tutorials/New_core_program.inc.rst
Normal file
@ -0,0 +1,135 @@
|
|||||||
|
.. _new_core_program:
|
||||||
|
|
||||||
|
Writing a new ARES core program
|
||||||
|
===============================
|
||||||
|
|
||||||
|
.. _what_is_a_core_program:
|
||||||
|
|
||||||
|
What is a core program ?
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
A core program is in charge of initializing the sampling machine,
|
||||||
|
loading the data in their structures and running the main sampling loop.
|
||||||
|
There are two default core programs at the moment: ARES3 (in
|
||||||
|
src/ares3.cpp) and HADES3 (extra/hades/src/hades3.cpp). ARES3 implements
|
||||||
|
the classical ARES sampling framework, which includes linear modeling,
|
||||||
|
bias, foreground and powerspectrum sampling. HADES3 implements the
|
||||||
|
non-linear density inference machine: classical HADES likelihood, BORG
|
||||||
|
LPT, BORG 2LPT, BORG PM, and different variant of bias functions.
|
||||||
|
|
||||||
|
.. _why_write_a_new_one:
|
||||||
|
|
||||||
|
Why write a new one ?
|
||||||
|
---------------------
|
||||||
|
|
||||||
|
Because you are thinking of a radically different way of presenting the
|
||||||
|
data, or because your model is based on different assumptions you may
|
||||||
|
have to redesign the way data are load and initialized. Also if you are
|
||||||
|
thinking of a different way of sampling the different parameters (or
|
||||||
|
more than usual) then you may have to implement a new bundle.
|
||||||
|
|
||||||
|
.. _prepare_yourself:
|
||||||
|
|
||||||
|
Prepare yourself
|
||||||
|
----------------
|
||||||
|
|
||||||
|
A core program is composed of different elements that can be taken from
|
||||||
|
different existing parts. We can look at ares3.cpp for an example. The
|
||||||
|
main part (except the splash screen) is:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
#define SAMPLER_DATA_INIT "../ares_init.hpp"
|
||||||
|
#define SAMPLER_BUNDLE "../ares_bundle.hpp"
|
||||||
|
#define SAMPLER_BUNDLE_INIT "../ares_bundle_init.hpp"
|
||||||
|
#define SAMPLER_NAME "ARES3"
|
||||||
|
#define SAMPLER_MOCK_GENERATOR "../ares_mock_gen.hpp"
|
||||||
|
#include "common/sampler_base.cpp"
|
||||||
|
|
||||||
|
As you can see a number of defines are set up before including the
|
||||||
|
common part, called "common/sampler_base.cpp". These defines are doing
|
||||||
|
the following:
|
||||||
|
|
||||||
|
- ``SAMPLER_DATA_INIT`` specifies the include file that holds the
|
||||||
|
definition for data initializer. This corresponds to two functions:
|
||||||
|
|
||||||
|
- ::
|
||||||
|
|
||||||
|
template void sampler_init_data(MPI_Communication *mpi_world, MarkovState& state, PTree& params),
|
||||||
|
|
||||||
|
which is in charge of allocating the adequate arrays for storing
|
||||||
|
input data into the ``state`` dictionnary. The actual names of
|
||||||
|
these fields are sampler dependent. In ares and hades, they are
|
||||||
|
typically called "galaxy_catalog_%d" and "galaxy_data_%d" (with %d
|
||||||
|
being replaced by an integer). This function is always called even
|
||||||
|
in the case the code is being resumed from a former run.
|
||||||
|
- ::
|
||||||
|
|
||||||
|
template void sampler_load_data(MPI_Communication *mpi_world, MarkovState& state, PTree& params, MainLoop& loop),
|
||||||
|
|
||||||
|
which is in charge of loading the data into the structures. This
|
||||||
|
function is only called during the first initialization of the
|
||||||
|
chain.
|
||||||
|
|
||||||
|
- ``SAMPLER_BUNDLE`` defines the sampler bundle which are going to be
|
||||||
|
used. Only the structure definition of ``SamplerBundle`` should be
|
||||||
|
given here.
|
||||||
|
- ``SAMPLER_BUNDLE_INIT`` defines two functions working on initializing
|
||||||
|
the bundle:
|
||||||
|
|
||||||
|
- ::
|
||||||
|
|
||||||
|
template void sampler_bundle_init(MPI_Communication *mpi_world, ptree& params, SamplerBundle& bundle, MainLoop& loop),
|
||||||
|
|
||||||
|
which does the real detailed initialization, including the
|
||||||
|
sampling loop program.
|
||||||
|
- ::
|
||||||
|
|
||||||
|
void sampler_setup_ic(SamplerBundle& bundle, MainLoop& loop),
|
||||||
|
|
||||||
|
which allows for more details on the initial conditions to be set
|
||||||
|
up.
|
||||||
|
|
||||||
|
- ``SAMPLER_NAME`` must a be a static C string giving the name of this
|
||||||
|
core program.
|
||||||
|
- ``SAMPLER_MOCK_GENERATOR`` specifies a filename where
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
template void prepareMockData(PTree& ptree, MPI_Communication *comm, MarkovState& state, CosmologicalParameters& cosmo_params, SamplerBundle& bundle)
|
||||||
|
|
||||||
|
is defined. "ares_mock_gen.hpp" is a single gaussian random field
|
||||||
|
generator with the selection effect applied to data.
|
||||||
|
|
||||||
|
.. _creating_a_new_one:
|
||||||
|
|
||||||
|
Creating a new one
|
||||||
|
------------------
|
||||||
|
|
||||||
|
.. _create_the_skeleton:
|
||||||
|
|
||||||
|
Create the skeleton
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. _create_the_sampler_bundle:
|
||||||
|
|
||||||
|
Create the sampler bundle
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. _initializing_data_structures:
|
||||||
|
|
||||||
|
Initializing data structures
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. _filling_data_structures:
|
||||||
|
|
||||||
|
Filling data structures
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. _attach_the_core_program_to_cmake:
|
||||||
|
|
||||||
|
Attach the core program to cmake
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
Build
|
||||||
|
-----
|
122
docs/source/developer/Code_tutorials/Types.inc.rst
Normal file
122
docs/source/developer/Code_tutorials/Types.inc.rst
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
.. _ares_types:
|
||||||
|
|
||||||
|
Types used in the ARES code
|
||||||
|
===========================
|
||||||
|
|
||||||
|
A lot of the useful type 'aliases' are actually defined in ``libLSS/samplers/core/types_samplers.hpp``. We can
|
||||||
|
discuss a few of those types here.
|
||||||
|
|
||||||
|
LibLSS::multi_array
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
template<typename T, size_t N>
|
||||||
|
using multi_array = boost::multi_array<T, N, LibLSS::track_allocator<T>>;
|
||||||
|
|
||||||
|
This is a type alias for boost::multi_array which uses the default
|
||||||
|
allocator provided by LibLSS to track allocation. It is advised to use
|
||||||
|
it so that it is possible to investigate memory consumption
|
||||||
|
automatically in future. It is perfectly legal not to use it, however
|
||||||
|
you will those features in your report.
|
||||||
|
|
||||||
|
LibLSS::ArrayType
|
||||||
|
-----------------
|
||||||
|
|
||||||
|
This is a type to hold, and store in MCMC file, 3d array targeted to be
|
||||||
|
used in FFT transforms. The definition is
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
typedef ArrayStateElement<double, 3, FFTW_Allocator<double>, true > ArrayType;
|
||||||
|
|
||||||
|
It happens that ArrayType is misnamed as it is only a shell for the
|
||||||
|
type. In future, we can expect it to be renamed to something else like
|
||||||
|
ArrayTypeElement (or something else). We can see that it is a double
|
||||||
|
array, with 3 dimensions. It requires an FFTW_Allocator and it is a
|
||||||
|
spliced array to be reconstructed for mcmc files (last 'true').
|
||||||
|
|
||||||
|
Allocating the element automatically requires the array to be allocated
|
||||||
|
at the same time. An example for that is as follow:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
s_field =new ArrayType(extents[range(startN0,startN0+localN0)][N1][N2], allocator_real);
|
||||||
|
s_field->setRealDims(ArrayDimension(N0, N1, N2));
|
||||||
|
|
||||||
|
To access to the underlying `multi_array` one needs to access to the member variable `array`. In the case of the above `s_field`, it would be:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
auto& my_array = *s_field->array;
|
||||||
|
// Now we can access the array
|
||||||
|
std::cout << my_array[startN0][0][0] << std::endl;
|
||||||
|
|
||||||
|
.. warning::
|
||||||
|
|
||||||
|
Do not store a pointer to the above `my_array`. The array member variable
|
||||||
|
is a shared pointer which can be safely stored with the following type
|
||||||
|
`std::shared_ptr<LibLSS::ArrayType::ArrayType>`.
|
||||||
|
|
||||||
|
|
||||||
|
LibLSS::CArrayType
|
||||||
|
------------------
|
||||||
|
|
||||||
|
This is a type to hold, and store in MCMC file, 3d complex array
|
||||||
|
targeted to be used in FFT transforms. The definition is
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
typedef ArrayStateElement<std::complex<double>, 3, FFTW_Allocator<std::complex<double> >, true > CArrayType;
|
||||||
|
|
||||||
|
It happens that ArrayType is misnamed as it is only a shell for the
|
||||||
|
type. In future, we can expect it to be renamed to something else like
|
||||||
|
CArrayTypeElement (or something else). We can see that it is a double
|
||||||
|
array, with 3 dimensions. It requires an FFTW_Allocator and it is a
|
||||||
|
spliced array to be reconstructed for mcmc files (last 'true').
|
||||||
|
|
||||||
|
Allocating the element automatically requires the array to be allocated
|
||||||
|
at the same time. An example for that is as follow:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
s_hat_field = new CArrayType(base_mgr->extents_complex(), allocator_complex);
|
||||||
|
s_hat_field->setRealDims(ArrayDimension(N0, N1, N2_HC));
|
||||||
|
|
||||||
|
LibLSS::Uninit_FFTW_Complex_Array
|
||||||
|
---------------------------------
|
||||||
|
|
||||||
|
The types above are for arrays designated to be saved in MCMC file. To
|
||||||
|
allocator \*temporary\* arrays that still needs to be run through FFTW,
|
||||||
|
the adequate type is:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
typedef UninitializedArray<FFTW_Complex_Array, FFTW_Allocator<std::complex<double> > > Uninit_FFTW_Complex_Array;
|
||||||
|
|
||||||
|
This is a helper type because
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
boost::multi_array
|
||||||
|
|
||||||
|
wants to do **slow** preinitialization of the large array that we use.
|
||||||
|
To circumvent the uninitialization the trick is to create a
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
boost::multi_array_ref
|
||||||
|
|
||||||
|
on a memory allocated by an helper class. UninitializedArray is built
|
||||||
|
for that however it comes at the cost of adding one step before using
|
||||||
|
the array:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
Uninit_FFTW_Complex_Array gradient_psi_p(extents[range(startN0,startN0+localN0)][N1][N2_HC],
|
||||||
|
allocator_complex);
|
||||||
|
Uninit_FFTW_Complex_Array::array_type& gradient_psi = gradient_psi_p.get_array();
|
||||||
|
|
||||||
|
Here 'gradient_psi_p' is the holder of the array (i.e. if it gets
|
||||||
|
destroyed, the array itself is destroyed). But if you want to use the
|
||||||
|
array you need to first get it with 'get_array'.
|
9
docs/source/developer/ares_modules.rst
Normal file
9
docs/source/developer/ares_modules.rst
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
ARES modules
|
||||||
|
============
|
||||||
|
|
||||||
|
ARES is typically the root project of many other sub-project or sub-modules. This is notably the case of the following modules:
|
||||||
|
|
||||||
|
- **hades**: this module declares and implements some of the fundamental API for manipulating general likelihood and deterministic forward models in the ARES/BORG framework. Notably important posterior samplers like the Hamiltonian Markov Chain algorithm are implemented there.
|
||||||
|
- **borg**: this module deals more with the physical aspect and the statistics of large scale structures. As an highlight it holds the code for implementing first and second order lagrangian perturbation theory, and the particle mesh (with tCOLA) model.
|
||||||
|
- **python**: this modules implements the python bindings, both as an external module for other python VM, or with an embedded python VM to interpret likelihoods and configuration written in python.
|
||||||
|
- **hmclet**:
|
171
docs/source/developer/code_architecture.rst
Normal file
171
docs/source/developer/code_architecture.rst
Normal file
@ -0,0 +1,171 @@
|
|||||||
|
Code architecture
|
||||||
|
=================
|
||||||
|
|
||||||
|
Slides of the tutorial
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
See `this file <https://www.aquila-consortium.org/wiki/index.php/File:ARES_code.pdf>`__.
|
||||||
|
Some of these slides are starting to get outdated. Check the doc pages in case of doubt.
|
||||||
|
|
||||||
|
|
||||||
|
Overall presentation
|
||||||
|
--------------------
|
||||||
|
|
||||||
|
The ARES3 framework is divided into a main library (libLSS) and several
|
||||||
|
core program (ares3, hades3 at the moment).
|
||||||
|
|
||||||
|
A step-by-step tutorial on how to create a new core program is described
|
||||||
|
:ref:`here <new_core_program>`.
|
||||||
|
|
||||||
|
Code units
|
||||||
|
----------
|
||||||
|
|
||||||
|
The units of the code are whenever possible in "physical" units (i.e.
|
||||||
|
velocities often in km/s, density contrasts, ...). The rational being
|
||||||
|
that theory papers are often expressed, or easily expressable, in those
|
||||||
|
units while it kind be hard to follow all the required steps to make the
|
||||||
|
units work in the global numerical schemes of ARES. So the equations are
|
||||||
|
more easily readable and matchable to equations. As an example, the
|
||||||
|
Fourier transform of density contrast must have the unit of a volume.
|
||||||
|
The density fluctuation power spectrum is also a volume.
|
||||||
|
|
||||||
|
That can also however introduce some unexpected complexity.
|
||||||
|
|
||||||
|
ares3
|
||||||
|
~~~~~
|
||||||
|
|
||||||
|
All the code rely on the ARES3 code framework. At the basis it is a
|
||||||
|
library (libLSS) and a common code base for different sampling scheemes
|
||||||
|
(e.g. ARES, ARES-foreground, ATHENA, HADES, BORG). The sampling schemes
|
||||||
|
being quite sensitive to the implementation details they are not yet
|
||||||
|
fully parametrizable by the user and only a few degree of freedoms are
|
||||||
|
allowed through the configuration file. The configuration file comes as
|
||||||
|
a Windows INI file, though that may evolve later.
|
||||||
|
|
||||||
|
libLSS
|
||||||
|
~~~~~~
|
||||||
|
|
||||||
|
The libLSS library provides different elements to build a full sampling
|
||||||
|
scheme and the description of a posterior. The different components are
|
||||||
|
organized in a hierarchical tree. C++ templates are quite heavily used
|
||||||
|
though classical C++ virtual inheritancy is also present to make the
|
||||||
|
code more digestible without loss of performance. Some tests are present
|
||||||
|
in libLSS/tests. They are useful to both check that the library behaves
|
||||||
|
as it should and to serve as an entry point for newbies.
|
||||||
|
|
||||||
|
The LibLSS library is itself divided in several big branches:
|
||||||
|
|
||||||
|
- data: holds the framework data model, it holds the description of
|
||||||
|
galaxy surveys into its individual components
|
||||||
|
- mcmc: Holds the abstract description of elements that can be
|
||||||
|
serialized into a MCMC file or the restart file. There is no specific
|
||||||
|
implementation here, only definition of what is an array, a random
|
||||||
|
number generator, etc.
|
||||||
|
- physics: it contains modules for handling more specific physics
|
||||||
|
computations likes cosmology or dynamics.
|
||||||
|
- samplers: generic branch that holds the different samplers of libLSS
|
||||||
|
- tools: a mixed bag of tools that have different use in libLSS
|
||||||
|
|
||||||
|
data
|
||||||
|
^^^^
|
||||||
|
|
||||||
|
- ``spectro_gals.hpp``: Abstract definition of a galaxy survey
|
||||||
|
(spectroscopic, but also photo-z possible).
|
||||||
|
- ``window3d.hpp``: Algorithm to compute the selection in 3d volume
|
||||||
|
from 2d+1d information.
|
||||||
|
- ``galaxies.hpp``: Define structure that describe a galaxy in a
|
||||||
|
survey.
|
||||||
|
- ``projection.hpp``: Nearest grid point projection of galaxies from a
|
||||||
|
survey to a 3d grid.
|
||||||
|
- ``linear_selection.hpp``: Implements a radial selection function
|
||||||
|
defined piecewise, with linear interpolation
|
||||||
|
- ``schechter_completeness.hpp``
|
||||||
|
|
||||||
|
tools
|
||||||
|
^^^^^
|
||||||
|
|
||||||
|
"tools" is a grab all bag of tools and core infrastructure that allows
|
||||||
|
writing the rest of the code. In particular it contains the definition
|
||||||
|
of the ``console`` object. Among the most useful tools
|
||||||
|
are the following:
|
||||||
|
|
||||||
|
- the :ref:`FFTW manager <fftw_manager>` class, to help with management
|
||||||
|
of parallelism, plan creation, etc with FFTW
|
||||||
|
- the :ref:`FUSEd array subsystem <fuse_array_mechanism>`, which enables lazy
|
||||||
|
evaluation of multi-dimensional arrays.
|
||||||
|
|
||||||
|
mpi
|
||||||
|
^^^
|
||||||
|
|
||||||
|
libLSS provides an MPI class interface with reduces to dummy function
|
||||||
|
calls when no MPI is present. This allows to write the code once for MPI
|
||||||
|
and avoid any ifdefs spoiling the source code.
|
||||||
|
|
||||||
|
|
||||||
|
"State" Dictionnary information
|
||||||
|
------------------------------~
|
||||||
|
|
||||||
|
libLSS/samplers/core/types_samplers.hpp gives all the default classes
|
||||||
|
specialization and types used in ARES/HADES/BORG.
|
||||||
|
|
||||||
|
- (ArrayType) ``galaxy_data_%d``: store the binned observed galaxy
|
||||||
|
density or luminosity density.
|
||||||
|
- (SelArrayType) ``galaxy_sel_window_%d``: 3d selection window
|
||||||
|
- (SelArrayType) ``galaxy_synthetic_sel_window_%d``: 3d selection
|
||||||
|
window with foreground corrections applied (ARES)
|
||||||
|
- (synchronized double) ``galaxy_nmean_%d``: normalization factor of
|
||||||
|
the bias function (can be mean density, it can be ignored for some
|
||||||
|
bias models like the ManyPower bias model in the generic framework)
|
||||||
|
- (ArrayType1d) ``galaxy_bias_%d``: store the bias parameters
|
||||||
|
- (ArrayType) ``s_field``: Store the real representation of the
|
||||||
|
Gaussian random initial conditions, scaled at :math:`z=0`.
|
||||||
|
- (CArrayType) ``s_hat_field``: Store the complex representation of
|
||||||
|
``s_field``
|
||||||
|
- (ArrayType1d) ``powerspectrum``: Finite resolution power spectrum in
|
||||||
|
physical unit (Mpc/h)^3
|
||||||
|
- (ArrayType1d) ``k_modes``: :math:`k (h/\text{Mpc})` modes
|
||||||
|
corresponding to the power spectrum stored in ``powerspectrum``. The
|
||||||
|
exact meaning is sampler dependent.
|
||||||
|
- (ArrayType) ``k_keys``: A 3d array indicating for each element of the
|
||||||
|
Fourier representation of a field how it is related to the power
|
||||||
|
spectrum. That allows for doing something like
|
||||||
|
``abs(s_field[i][j][k])^2/P[k_keys[i][j][k]]`` to get the prior value
|
||||||
|
associated with the mode in ``i, j, k``.
|
||||||
|
- (SLong) ``N0``,\ ``N1``,\ ``N2`` base size of the 3d grid, i.e.
|
||||||
|
parameter space dimensions
|
||||||
|
- (SDouble) ``L0``,\ ``L1``,\ ``L2`` physical size of the 3d grid,
|
||||||
|
units of Mpc/h, comoving length.
|
||||||
|
- (ObjectStateElement) ``cosmology``, holds a structure giving the
|
||||||
|
currently assumed cosmology.
|
||||||
|
- (ArrayType) ``foreground_3d_%d``, a 3d grid corresponding to the
|
||||||
|
extruded foreground contamination in data. The '%d' runs across all
|
||||||
|
possible foreground specified in the configuration file.
|
||||||
|
- (SLong) ``MCMC_STEP``, the identifier of the current MCMC element.
|
||||||
|
- (RandomStateElement) ``random_generator``, the common, multi-threaded
|
||||||
|
and multi-tasked, random number generator.
|
||||||
|
|
||||||
|
**BORG specific**
|
||||||
|
|
||||||
|
- (ArrayType) ``BORG_final_density``: Final result of the forward model
|
||||||
|
before likelihood comparison to data
|
||||||
|
- (ArrayType1d) ``BORG_vobs``: 3 component 1d array that contains the 3
|
||||||
|
component of the additional velocity vector required to fit redshift
|
||||||
|
density of galaxies.
|
||||||
|
- (ObjectStateElement) ``BORG_model`` (
|
||||||
|
- (double) ``hmc_Elh``, minus log-likelihood evaluated by HMC
|
||||||
|
- (double) ``hmc_Eprior``, minus log-prior evaluated by HMC
|
||||||
|
- (bool) ``hmc_force_save_final``, force the saving of the next final
|
||||||
|
density
|
||||||
|
- (int) ``hmc_bad_sample``, the number of bad HMC samples since last
|
||||||
|
saved MCMC
|
||||||
|
- (SLong) ``hades_attempt_count``, number of attempted HMC move since
|
||||||
|
last saved MCMC
|
||||||
|
- (SLong) ``hades_accept_count``, number of accepted HMC move since
|
||||||
|
last saved MCMC
|
||||||
|
- (ArrayType) ``hades_mass`` diagonal mass matrix for HMC
|
||||||
|
|
||||||
|
**ARES specific**
|
||||||
|
|
||||||
|
- (ArrayType) ``messenger_field``: store the messenger field array
|
||||||
|
- (SDouble) ``messenger_tau``: store the scalar value giving the
|
||||||
|
covariance of the messenger field.
|
341
docs/source/developer/code_tutorials.rst
Normal file
341
docs/source/developer/code_tutorials.rst
Normal file
@ -0,0 +1,341 @@
|
|||||||
|
Code tutorials
|
||||||
|
##############
|
||||||
|
|
||||||
|
.. include:: Code_tutorials/Types.inc.rst
|
||||||
|
|
||||||
|
.. include:: Code_tutorials/FFTW_manager.inc.rst
|
||||||
|
|
||||||
|
.. _reading_in_meta_parameters_and_arrays:
|
||||||
|
|
||||||
|
Reading in meta-parameters and arrays
|
||||||
|
=====================================
|
||||||
|
|
||||||
|
If one wishes to access the the content of ARES MCMC files in C++,
|
||||||
|
functions are available in CosmoTool and LibLSS. For example:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
#include <iostream>
|
||||||
|
#include <boost/multi_array.hpp> //produce arrays
|
||||||
|
#include "CosmoTool/hdf5_array.hpp" //read h5 atributes as said arrays
|
||||||
|
#include "libLSS/tools/hdf5_scalar.hpp" //read h5 attributes as scalars
|
||||||
|
#include <H5Cpp.h> //access h5 files
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace LibLSS;
|
||||||
|
|
||||||
|
int main()
|
||||||
|
{
|
||||||
|
typedef boost::multi_array<double, 3> array3_type;
|
||||||
|
|
||||||
|
//access mcmc and restart files
|
||||||
|
H5::H5File meta("restart.h5_0", H5F_ACC_RDONLY);
|
||||||
|
H5::H5File f("mcmc_0.h5", H5F_ACC_RDONLY);
|
||||||
|
|
||||||
|
//read the number of pixels of the cube as integrer values (x,y,z)
|
||||||
|
int N0 = LibLSS::hdf5_load_scalar<int>(meta, "scalars/N0");
|
||||||
|
int N1 = LibLSS::hdf5_load_scalar<int>(meta, "scalars/N1");
|
||||||
|
int N2 = LibLSS::hdf5_load_scalar<int>(meta, "scalars/N2");
|
||||||
|
|
||||||
|
array3_type density(boost::extents[N0][N1][N2]);
|
||||||
|
|
||||||
|
//read the density field as a 3d array
|
||||||
|
CosmoTool::hdf5_read_array(f, "scalars/s_field", density);
|
||||||
|
}
|
||||||
|
|
||||||
|
.. _obtaining_timing_statistics:
|
||||||
|
|
||||||
|
Obtaining timing statistics
|
||||||
|
===========================
|
||||||
|
|
||||||
|
By default the statistics are not gathered. It is possible (and advised
|
||||||
|
during development and testing) to activate them through a build.sh
|
||||||
|
option ``--perf``. In that case, each "ConsoleContext" block is timed
|
||||||
|
separately. In the C++ code, a console context behaves like this:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
/* blabla */
|
||||||
|
{
|
||||||
|
LibLSS::ConsoleContext<LOG_DEBUG> ctx("costly computation");
|
||||||
|
|
||||||
|
/* Computations */
|
||||||
|
ctx.print("Something I want to say");
|
||||||
|
} /* Exiting context */
|
||||||
|
/* something else */
|
||||||
|
|
||||||
|
Another variant that automatically notes down the function name and the
|
||||||
|
filename is
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
/* blabla */
|
||||||
|
{
|
||||||
|
LIBLSS_AUTO_CONTEXT(LOG_DEBUG, ctx);
|
||||||
|
/* Computations */
|
||||||
|
ctx.print("Something I want to say");
|
||||||
|
} /* Exiting context */
|
||||||
|
/* something else */
|
||||||
|
|
||||||
|
A timer is started at the moment the ConsoleContext object is created.
|
||||||
|
The timer is destroyed at the "Exiting context" stage. The result is
|
||||||
|
marked in a separate hash table. Be aware that in production mode you
|
||||||
|
should turn off the performance measurements as they take time for
|
||||||
|
functions that are called very often. You can decide on a log level
|
||||||
|
different than LOG_DEBUG (it can be LOG_VERBOSE, LOG_INFO, ...), it is
|
||||||
|
the default level for any print call used with the context.
|
||||||
|
|
||||||
|
The string given to console context is used as an identifier, so please
|
||||||
|
use something sensible. At the moment the code gathering performances is
|
||||||
|
not aware of how things are recursively called. So you will only get one
|
||||||
|
line per context. Once you have run an executable based on libLSS it
|
||||||
|
will produce a file called "timing_stats.txt" in the current working
|
||||||
|
directory. It is formatted like this:
|
||||||
|
|
||||||
|
.. code:: text
|
||||||
|
|
||||||
|
Cumulative timing spent in different context
|
||||||
|
--------------------------------------------
|
||||||
|
Context, Total time (seconds)
|
||||||
|
|
||||||
|
BORG LPT MODEL 2 0.053816
|
||||||
|
BORG LPT MODEL SIMPLE 2 0.048709
|
||||||
|
BORG forward model 2 0.047993
|
||||||
|
Classic CIC projection 2 0.003018
|
||||||
|
(...)
|
||||||
|
|
||||||
|
It consists in three columns, separated by a tab. The first column is
|
||||||
|
the name of the context. The second column is the number of times this
|
||||||
|
context has been called. The last and third column is the cumulative
|
||||||
|
time taken by this context, in seconds. At the moment the output is not
|
||||||
|
sorted but it may be in future. You want the total time to be as small
|
||||||
|
as possible. This time may be large for two reasons: you call the
|
||||||
|
context an insane amount of time, or you call it a few times but each
|
||||||
|
one is very costly. The optimization to achieve is then up to you.
|
||||||
|
|
||||||
|
|
||||||
|
.. include:: Code_tutorials/CPP_Multiarray.inc.rst
|
||||||
|
|
||||||
|
|
||||||
|
MPI tools
|
||||||
|
=========
|
||||||
|
|
||||||
|
Automatic particle exchange between MPI tasks
|
||||||
|
---------------------------------------------
|
||||||
|
|
||||||
|
It is often useful for code doing N-body simulations to exchange the
|
||||||
|
ownership of particles and all their attributes. The BORG submodule has
|
||||||
|
a generic framework to handle these cases. It is composed of the
|
||||||
|
following parts:
|
||||||
|
|
||||||
|
- a ``BalanceInfo`` structure (in
|
||||||
|
``libLSS/physics/forwards/particle_balancer/particle_distribute.hpp``)
|
||||||
|
which holds temporary information required to do the balancing, and
|
||||||
|
eventually undo it for adjoint gradients. It has an empty constructor
|
||||||
|
and a special function ``allocate`` which must take an MPI
|
||||||
|
communicator and the amount of particles that are to be considered
|
||||||
|
(including extra buffering).
|
||||||
|
- generic distribute / undistribute functions called respectively
|
||||||
|
``particle_redistribute`` and ``particle_undistribute``.
|
||||||
|
- a generic attribute management system to remove buffer copies.
|
||||||
|
|
||||||
|
We can start from an example taken from ``test_part_swapper.cpp``:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
BalanceInfo info;
|
||||||
|
NaiveSelector selector;
|
||||||
|
boost::multi_vector<double, 2> in_positions;
|
||||||
|
size_t numRealPositions, Nparticles;
|
||||||
|
|
||||||
|
/* Fill in_positions... */
|
||||||
|
|
||||||
|
info.allocate(comm, Nparticles);
|
||||||
|
|
||||||
|
info.localNumParticlesBefore = numRealPositions;
|
||||||
|
particle_redistribute(info, in_positions, selector);
|
||||||
|
/* info.localNumParticlesAfter is filled */
|
||||||
|
|
||||||
|
In the code above all the initializations are skipped. The load balancer
|
||||||
|
is initialized with ``allocate``. Then the actual number of particles
|
||||||
|
that is really used in the input buffer is indicated by filling
|
||||||
|
``localNumParticlesBefore``. Then ``particle_redistribute`` is invoked.
|
||||||
|
The particles may be completely reshuffled in that operation. The real
|
||||||
|
number of viable particles is indicated in ``localNumParticlesAfter``.
|
||||||
|
Finally, but importantly, the balancing decision is taken by
|
||||||
|
``selector``, which at the moment must be a functor and bases its
|
||||||
|
decision on the position alone. In future it is possible to use an
|
||||||
|
attribute instead.
|
||||||
|
|
||||||
|
Now it is possible to pass an arbitrary number of attributes, living in
|
||||||
|
separate array-like objects. The example is similar as previously:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
BalanceInfo info;
|
||||||
|
NaiveSelector selector;
|
||||||
|
boost::multi_vector<double, 2> in_positions;
|
||||||
|
boost::multi_vector<double, 2> velocities;
|
||||||
|
size_t numRealPositions, Nparticles;
|
||||||
|
|
||||||
|
/* Fill in_positions... */
|
||||||
|
|
||||||
|
info.allocate(comm, Nparticles);
|
||||||
|
|
||||||
|
info.localNumParticlesBefore = numRealPositions;
|
||||||
|
particle_redistribute(info, in_positions, selector,
|
||||||
|
make_attribute_helper(Particles::vector(velocities))
|
||||||
|
);
|
||||||
|
/* info.localNumParticlesAfter is filled */
|
||||||
|
|
||||||
|
The code will allocate automatically a little amount of temporary memory
|
||||||
|
to accommodate for I/O operations. Two kind of attribute are supported
|
||||||
|
by default, though it is extendable by creating new adequate classes:
|
||||||
|
|
||||||
|
- scalar: a simple 1d array of single elements (float, double, whatever
|
||||||
|
is supported by the automatic MPI translation layer and does not rely
|
||||||
|
on dynamic allocations).
|
||||||
|
- vector: a simple 2d array of the shape Nx3 of whatever elements
|
||||||
|
supported by the automatic MPI translation layer.
|
||||||
|
|
||||||
|
.. _ghost_planes:
|
||||||
|
|
||||||
|
Ghost planes
|
||||||
|
------------
|
||||||
|
|
||||||
|
The BORG module has a special capabilities to handle ghost planes, i.e.
|
||||||
|
(N-1)d-planes of a Nd cube that are split for MPI work. This happens
|
||||||
|
typically when using FFTW for which only a slab of planes are available
|
||||||
|
locally and the code needs some other information from the other planes
|
||||||
|
to do local computation. An example of this case is the computation of
|
||||||
|
gradient: one needs one extra plane at each edge of the slab to be able
|
||||||
|
to compute the gradient. The ghost plane mechanism tries to automate the
|
||||||
|
boring part of gathering information and eventually redistributing the
|
||||||
|
adjoint gradient of that same operation. The header is
|
||||||
|
``libLSS/tools/mpi/ghost_planes.hpp`` and is exporting one templated
|
||||||
|
structure:
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
template<typename T, size_t Nd>
|
||||||
|
struct GhostPlanes: GhostPlaneTypes<T, Nd> {
|
||||||
|
template<typename PlaneList,typename PlaneSet, typename DimList>
|
||||||
|
void setup(
|
||||||
|
MPI_Communication* comm_,
|
||||||
|
PlaneList&& planes, PlaneSet&& owned_planes,
|
||||||
|
DimList&& dims,
|
||||||
|
size_t maxPlaneId_);
|
||||||
|
|
||||||
|
void clear_ghosts();
|
||||||
|
|
||||||
|
template<typename T0, size_t N>
|
||||||
|
void synchronize(boost::multi_array_ref<T0,N> const& planes);
|
||||||
|
|
||||||
|
template<typename T0, size_t N>
|
||||||
|
void synchronize_ag(boost::multi_array_ref<T0,N>& ag_planes);
|
||||||
|
|
||||||
|
ArrayType& ag_getPlane(size_t i);
|
||||||
|
ArrayType& getPlane(size_t i);
|
||||||
|
};
|
||||||
|
|
||||||
|
Many comments are written in the code. Note that ``Nd`` above designate
|
||||||
|
the number of dimension for a **plane**. So if you manipulate 3d-boxes,
|
||||||
|
you want to indicate ``Nd=2``. The typical work flow of using
|
||||||
|
ghostplanes is the following:
|
||||||
|
|
||||||
|
- GhostPlanes object creation
|
||||||
|
- call setup method to indicate what are the provided data and
|
||||||
|
requirements
|
||||||
|
- do stuff
|
||||||
|
- call synchronize before needing the ghost planes
|
||||||
|
- use the ghost planes with getPlane()
|
||||||
|
- Repeat synchronize if needed
|
||||||
|
|
||||||
|
There is an adjoint gradient variant of the synchronization step which
|
||||||
|
does sum reduction of the adjoint gradient arrays corresponding to the
|
||||||
|
ghost planes.
|
||||||
|
|
||||||
|
An example C++ code is
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
std::vector<size_t> here_planes{/* list of the planes that are on the current MPI node */};
|
||||||
|
std::vector<size_t> required_planes{/* list of the planes that you need to do computation on this node */};
|
||||||
|
ghosts.setup(comm, required_planes, here_planes, std::array<int,2>{128,128} /* That's the dimension of the plane, here 2d */, 64 /* That's the total number of planes over all nodes */);
|
||||||
|
|
||||||
|
/* A is a slab with range in [startN0,startN0+localN0]. This function will synchronize the data over all nodes. */
|
||||||
|
ghosts.synchronize(A);
|
||||||
|
|
||||||
|
/* ghosts.getPlane(plane_id) will return a 2d array containing the data of the ghost plane 'plane_id'. Note that the data of A are not accessible through that function. */
|
||||||
|
|
||||||
|
|
||||||
|
The ``synchronize`` and ``synchronize_ag`` accepts an optional argument
|
||||||
|
to indicate what kind of synchronization the user wants. At the moment
|
||||||
|
two synchronization are supported GHOST_COPY and GHOST_ACCUMULATE.
|
||||||
|
GHOST_COPY is the classic mode, which indicates the missing planes has
|
||||||
|
to be copied from a remote task to the local memory. It specified that
|
||||||
|
the adjoint gradient will accumulate information from the different
|
||||||
|
tasks. Note that the array ``A`` is a slab. It means that if you do not use
|
||||||
|
the FFTW helper mechanism you should allocate it using the following
|
||||||
|
pattern for 3d arrays
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
// Some alias for convenience
|
||||||
|
using boost::extents;
|
||||||
|
typedef boost::multi_array_types::extent_range e_range;
|
||||||
|
|
||||||
|
/* To use a classical multi_array allocation, may be slow */
|
||||||
|
boost::multi_array<double, 2> A(extents[e_range(startN0, localN0)][N1][N2]);
|
||||||
|
|
||||||
|
/* To allocate using the uninitialized array mechanism */
|
||||||
|
U_Array A_p(extents[e_range(startN0, localN0)][N1][N2]);
|
||||||
|
auto& A = A_p.get_array();
|
||||||
|
// Note that A_p is destroyed at the end of the current context if you
|
||||||
|
// use that.
|
||||||
|
|
||||||
|
/* To allocate using the uninitialized array mechanism, and shared_ptr */
|
||||||
|
std::shared_ptr<U_Array> A_p = std::make_shared<U_Array>(extents[e_range(startN0, localN0)][N1][N2]);
|
||||||
|
auto& A = A_p->get_array();
|
||||||
|
|
||||||
|
// If A_p is transferred somewhere else, then it will not be deallocated.
|
||||||
|
|
||||||
|
For 2d arrays, just remove one dimension in all the above code.
|
||||||
|
|
||||||
|
The use of the adjoint gradient part is very similar
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
ghosts.clear_ghosts();
|
||||||
|
|
||||||
|
/* declare gradient, fill up with the local information on the slab */
|
||||||
|
/* if there is information to deposit on 'plane' use the special array as follow*/
|
||||||
|
ghosts.ag_getPlane(plane)[j][k] = some_value;
|
||||||
|
|
||||||
|
/* finish the computation with synchronize_ag, the gradient will compute */
|
||||||
|
ghosts.synchronize_ag(gradient);
|
||||||
|
|
||||||
|
/* now the gradient holds the complete gradient that must resides on the local slab and the computation may continue */
|
||||||
|
|
||||||
|
You can check ``extra/borg/libLSS/samplers/julia/julia_likelihood.cpp``
|
||||||
|
for a more detailed usage for the Julia binding. This tool is also used
|
||||||
|
by the ManyPower bias model though in a much more complicated fashion
|
||||||
|
(``extra/borg/libLSS/physics/bias/many_power.hpp``).
|
||||||
|
|
||||||
|
.. include:: Code_tutorials/Julia_and_TensorFlow.inc.rst
|
||||||
|
|
||||||
|
.. include:: Code_tutorials/New_core_program.inc.rst
|
||||||
|
..
|
||||||
|
.. include:: Code_tutorials/Adding_a_new_likelihood_in_C++.inc.rst
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Adding a new likelihood/bias combination in BORG
|
||||||
|
================================================
|
||||||
|
|
||||||
|
*To be written...*
|
||||||
|
|
||||||
|
Useful resources
|
||||||
|
================
|
||||||
|
|
||||||
|
- `Google code of conduct in C++ <https://google.github.io/styleguide/cppguide.html>`__
|
120
docs/source/developer/contributing_to_this_documentation.rst
Normal file
120
docs/source/developer/contributing_to_this_documentation.rst
Normal file
@ -0,0 +1,120 @@
|
|||||||
|
Contributing to this documentation
|
||||||
|
==================================
|
||||||
|
|
||||||
|
The present documentation for *ARES*-*HADES*-*BORG* is a joint endeavour from many members of the `Aquila Consortium <https://aquila-consortium.org/>`_.
|
||||||
|
|
||||||
|
The purpose of this page is to describe some technical aspects that are specific to our documentation. Useful general links are provided in the :ref:`last section <useful_resources_documentation>`.
|
||||||
|
|
||||||
|
|
||||||
|
Source files, Sphinx, and Read the Docs
|
||||||
|
---------------------------------------
|
||||||
|
|
||||||
|
Source files and online edition
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Source files of the present documentation are located in the `public ARES repository on Bitbucket <https://bitbucket.org/bayesian_lss_team/ares/>`_, in a subdirectory called ``docs/``. Their extension is ``.rst``.
|
||||||
|
|
||||||
|
The easiest way to contribute to the documentation is to directly edit source files online with Bitbucket, by navigating to them in the git repository and using the button `edit` in the top right-hand corner. Alternatively, clicking on the link `Edit on Bitbucket` on Read the Docs will take to the same page. Editing online with Bitbucket will automatically create a pull request to the branch that is shown in the top left-hand corner of the editor.
|
||||||
|
|
||||||
|
Sphinx and Read the Docs
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
The present documentation is based on **Sphinx**, a powerful documentation generator using python. The source format is **reStructuredText** (RST). It is hosted by **Read the Docs** (https://readthedocs.org), which provides some convenient features:
|
||||||
|
|
||||||
|
- the documentation is built every time a commit is pushed to the |a| repository,
|
||||||
|
- documentation for several versions is maintained (the current version is visible in green at the bottom of left bar in Read the Docs pages),
|
||||||
|
- automatic code generation can be generated (in the future).
|
||||||
|
|
||||||
|
|
||||||
|
Off-line edition and creation of a pull request
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
To build the documentation locally, go to ``docs/`` and type
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
make html
|
||||||
|
|
||||||
|
You will need a python environment with Sphinx; see for example `this page on how to get started with Sphinx <https://docs.readthedocs.io/en/stable/intro/getting-started-with-sphinx.html>`_. Output HTML pages are generated in ``docs/_build/html``.
|
||||||
|
|
||||||
|
You can edit or add any file in ``docs/source/`` locally. Once you have finished preparing your edits of the documentation, please make sure to solve any Sphinx warning.
|
||||||
|
|
||||||
|
You can then commit your changes to a new branch (named for instance ``yourname/doc``) and create a pull request as usual (see :ref:`development_with_git`). Please make sure to create a pull request to the correct branch, corresponding to the version of the code that you are documenting.
|
||||||
|
|
||||||
|
Once your pull request is merged, the documentation will be automatically built on Read the Docs.
|
||||||
|
|
||||||
|
|
||||||
|
Contributing new pages
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
reStructuredText files
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
The easiest way to contribute a new page is to directly write a reStructuredText document and place it somewhere in ``docs/source``. Give it a ``.rst`` extension and add it somewhere in the table of contents in ``docs/source/index.rst`` or in sub-files such as ``docs/source/user/extras.rst``.
|
||||||
|
|
||||||
|
To include figures, add the image (jpg, png, etc.) in a subdirectory of ``docs/source``. As all images are ultimately included in the |a| repository, please be carefull with image sizes.
|
||||||
|
|
||||||
|
reStructuredText syntax
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
A RestructuredText primer is available `here <https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html>`_.
|
||||||
|
|
||||||
|
The order of headings used throughout the |a| documentation is the following:
|
||||||
|
|
||||||
|
.. code:: text
|
||||||
|
|
||||||
|
######### part
|
||||||
|
********* chapter
|
||||||
|
========= sections
|
||||||
|
--------- subsections
|
||||||
|
~~~~~~~~~ subsubsections
|
||||||
|
^^^^^^^^^
|
||||||
|
'''''''''
|
||||||
|
|
||||||
|
Included reStructuredText files
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
- **Extension**. If you write a page that is included in another page (using the RST directive ``.. include::``), make sure that its extension is ``.inc.rst``, not simply ``.rst`` (otherwise Sphinx will generate an undesired HTML page and may throw warnings).
|
||||||
|
- **Figures**. If there are figures in your "included" pages, use the "absolute" path (in the Sphinx sense, i.e. relative to ``docs/source/``) instead of the relative path, otherwise Sphinx will throw warnings and may not properly display your figures on Read the Docs (even if they are properly displayed on your local machine). For instance, in ``docs/source/user/postprocessing/ARES_basic_outputs.inc.rst``, one shall use
|
||||||
|
|
||||||
|
.. code:: rst
|
||||||
|
|
||||||
|
.. image:: /user/postprocessing/ARES_basic_outputs_files/ares_basic_outputs_12_1.png
|
||||||
|
|
||||||
|
instead of
|
||||||
|
|
||||||
|
.. code:: rst
|
||||||
|
|
||||||
|
.. image:: ARES_basic_outputs_files/ares_basic_outputs_12_1.png
|
||||||
|
|
||||||
|
Markdown pages
|
||||||
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
If you have a page in Markdown format (for example, created in the **Aquila CodiMD**) that you wish to include in the documentation, you shall convert it to reStructuredText format. There exists automatic tools to do so, for instance `CloudConvert <https://cloudconvert.com/md-to-rst>`_ (online) or `M2R <https://github.com/miyakogi/m2r>`_ (on Github). It is always preferable to check the reStructuredText output.
|
||||||
|
|
||||||
|
Jupyter notebooks
|
||||||
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- **Conversion to RST**. If you have Jupyter/IPython notebooks that you wish to include in the documentation, Jupyter offers a `command <https://nbconvert.readthedocs.io>`_ to convert to reStructuredText:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
jupyter nbconvert --to RST your_notebook.ipynb
|
||||||
|
|
||||||
|
The output will be named ``your_notebook.rst`` and any image will be placed in ``your_notebook_files/*.png``. These files can be directly included in ``docs/source/`` after minimal editing.
|
||||||
|
|
||||||
|
- **nbsphinx**. Alternatively, you can use the nbsphinx extension for Sphinx (https://nbsphinx.readthedocs.io/) which allows you to directly add the names of ``*.ipynb`` files to the `toctree`, but offers less flexibility.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
.. _useful_resources_documentation:
|
||||||
|
|
||||||
|
Useful resources
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- `Read the Docs documentation <https://docs.readthedocs.io/en/stable/index.html>`__
|
||||||
|
- `Installing Sphinx <https://www.sphinx-doc.org/en/master/usage/installation.html>`__
|
||||||
|
- `Getting Started with Sphinx <https://docs.readthedocs.io/en/stable/intro/getting-started-with-sphinx.html>`__
|
||||||
|
- `reStructuredText Primer <https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html>`__
|
||||||
|
- Markdown conversion: `CloudConvert <https://cloudconvert.com/md-to-rst>`__, `M2R <https://github.com/miyakogi/m2r>`__
|
||||||
|
- `Jupyter nbconvert <https://nbconvert.readthedocs.io>`_, `nbsphinx <https://nbsphinx.readthedocs.io/>`__
|
45
docs/source/developer/copyright_and_authorship.rst
Normal file
45
docs/source/developer/copyright_and_authorship.rst
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
Copyright and authorship
|
||||||
|
========================
|
||||||
|
|
||||||
|
ARES/BORG is developed under CECIL/v2.1 license, which is compatible
|
||||||
|
with the GNU Public License (GPL). The GPL is fundamentally based on
|
||||||
|
Anglo-Saxon law and is not fully compatible with European laws. However
|
||||||
|
CECIL implies GPL protections and it is available in at least two
|
||||||
|
European languages, French and English. Keep in mind that in principle
|
||||||
|
your moral rights on the software that you write is your sole ownership,
|
||||||
|
while the exploitation rights may belong to the entity which has paid
|
||||||
|
your salary/equipment during the development phase. An interesting
|
||||||
|
discussion on French/European author protection is given
|
||||||
|
`here <http://isidora.cnrs.fr/IMG/pdf/2014-07-07_-_Droit_d_auteur_des_chercheurs_Logiciels_Bases_de_Donne_es_et_Archives_Ouvertes_-_Grenoble_ssc.pdf>`__
|
||||||
|
(unfortunately only in French, if anybody finds an equivalent in English
|
||||||
|
please post it here).
|
||||||
|
|
||||||
|
How to specify copyright info in source code ?
|
||||||
|
----------------------------------------------
|
||||||
|
|
||||||
|
As the main author of the code is becoming diverse it is important to
|
||||||
|
mark fairly who is/are the main author(s) of a specific part of the
|
||||||
|
code. The current situation is the following:
|
||||||
|
|
||||||
|
- if an "ARES TAG" is found in the source code, it is used to fill up
|
||||||
|
copyright information. For example
|
||||||
|
|
||||||
|
.. code:: c++
|
||||||
|
|
||||||
|
// ARES TAG: authors_num = 2
|
||||||
|
// ARES TAG: name(0) = Guilhem Lavaux
|
||||||
|
// ARES TAG: email(0) = guilhem.lavaux@iap.fr
|
||||||
|
// ARES TAG: year(0) = 2014-2018
|
||||||
|
// ARES TAG: name(1) = Jens Jasche
|
||||||
|
// ARES TAG: email(1) = jens.jasche@fysik.su.se
|
||||||
|
// ARES TAG: year(1) = 2009-2018
|
||||||
|
|
||||||
|
this indicates that two authors are principal authors, with their name,
|
||||||
|
email and year of writing.
|
||||||
|
|
||||||
|
- In addition to the principal authors, minor modifications are noted
|
||||||
|
by the script and additional names/emails are put in the 'Additional
|
||||||
|
Contributions' sections
|
||||||
|
- by default Guilhem Lavaux and Jens Jasche are marked as the main
|
||||||
|
authors. When all the files are marked correctly this default will
|
||||||
|
disappear and an error will be raised when no tag is found.
|
245
docs/source/developer/development_with_git.rst
Normal file
245
docs/source/developer/development_with_git.rst
Normal file
@ -0,0 +1,245 @@
|
|||||||
|
.. _development_with_git:
|
||||||
|
|
||||||
|
Development with git
|
||||||
|
====================
|
||||||
|
|
||||||
|
In case you are not familiar with the git version control system please
|
||||||
|
also consult the corresponding tutorial on git for bitbucket/atlassian
|
||||||
|
`here <https://www.atlassian.com/git/tutorials/what-is-version-control>`__.
|
||||||
|
|
||||||
|
In the following we will assume that your working branch is called
|
||||||
|
"my_branch". In addition the "master" branch should reflect the "master"
|
||||||
|
of the "blss" repository (the reference repository). Further in the
|
||||||
|
following we will consider the ARES main infrastructure here.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
:code:`get-aquila-modules.sh` sets up git hooks to verify the quality of the code
|
||||||
|
that is committed to the repository. It relies in particular on :code:`clang-format`. On GNU/Linux system,
|
||||||
|
you may download static binaries of clang-format `here <https://aur.archlinux.org/packages/clang-format-static-bin/>`__.
|
||||||
|
|
||||||
|
|
||||||
|
Slides of the tutorial
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
See `this file <https://www.aquila-consortium.org/wiki/index.php/File:ARES_git.pdf>`__.
|
||||||
|
|
||||||
|
Finding the current working branch
|
||||||
|
----------------------------------
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git branch
|
||||||
|
|
||||||
|
Branching (and creating a new branch) from current branch
|
||||||
|
---------------------------------------------------------
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git checkout -b new_branch
|
||||||
|
|
||||||
|
This will create a branch from current state move to the new branch
|
||||||
|
"new_branch"
|
||||||
|
|
||||||
|
Setting up remote
|
||||||
|
-----------------
|
||||||
|
|
||||||
|
First we add the remote:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git remote add blss git@bitbucket.org:bayesian_lss_team/ares.git
|
||||||
|
|
||||||
|
Next we can fetch:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git fetch blss
|
||||||
|
|
||||||
|
Pulling updates
|
||||||
|
---------------
|
||||||
|
|
||||||
|
Be sure that you are in the master branch
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git checkout master
|
||||||
|
|
||||||
|
Pull any updates from blss
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git pull blss master
|
||||||
|
|
||||||
|
Here you may get merge problem due to submodules if you have touched the
|
||||||
|
.gitmodules of your master branch. In that case you should revert the
|
||||||
|
.gitmodules to its pristine status:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git checkout blss/master -- .gitmodules
|
||||||
|
|
||||||
|
This line has checked out the file .gitmodules from the blss/master
|
||||||
|
branch and has overwritten the current file.
|
||||||
|
|
||||||
|
And then do a submodule sync:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git submodule sync
|
||||||
|
|
||||||
|
And an update:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git submodule update
|
||||||
|
|
||||||
|
Now your master branch is up to date with blss. You can push it to
|
||||||
|
bitbucket:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git push
|
||||||
|
|
||||||
|
This will update the master branch of *your fork* on bitbucket. Now you
|
||||||
|
can move to your private branch (e.g. "my_branch").
|
||||||
|
|
||||||
|
Rebase option for adjusting
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Rebasing is better if you intend to create a pull request for the
|
||||||
|
feature branch to the master. That ensures that no spurious patch will
|
||||||
|
be present coming from the main branch which would create a merge
|
||||||
|
conflict.
|
||||||
|
|
||||||
|
Now you can rebase your branch on the new master using:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git rebase master
|
||||||
|
|
||||||
|
Merging option
|
||||||
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
If you want to merge between two branches (again you should not merge
|
||||||
|
from master to avoid polluting with extra commits):
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git merge other_branch
|
||||||
|
|
||||||
|
Pushing modifications, procedures for pull requests
|
||||||
|
---------------------------------------------------
|
||||||
|
|
||||||
|
Cherry picking
|
||||||
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
It is possible to cherry pick commits in a git branch. Use "git
|
||||||
|
cherry-pick COMMIT_ID" to import the given commit to the current branch.
|
||||||
|
The patch is applied and directly available for a push.
|
||||||
|
|
||||||
|
Procedure for a pull request
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This section describes the procedure of how to create your own developer
|
||||||
|
branch from the ARES master repository. Go to the master branch (which
|
||||||
|
should reflect BLSS master branch):
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git checkout blss/master
|
||||||
|
|
||||||
|
Create a branch (e.g. 'your_branch') with:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git checkout -b your_branch
|
||||||
|
|
||||||
|
Import commits, either with git merge:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git merge your_branch
|
||||||
|
|
||||||
|
or with cherry-picking:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git cherry-pick this_good_commit
|
||||||
|
git cherry-pick this_other_commit
|
||||||
|
|
||||||
|
where this_good_commit and this_other_commit refer to the actual commits
|
||||||
|
that you want to pick from the repository
|
||||||
|
|
||||||
|
Push the branch:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git push origin your_branch
|
||||||
|
|
||||||
|
and create the pull request.
|
||||||
|
|
||||||
|
Please avoid at maximum to contaminate the pull request with the
|
||||||
|
specificity of your own workspace (e.g. gitmodules update etc).
|
||||||
|
|
||||||
|
Using tags
|
||||||
|
----------
|
||||||
|
|
||||||
|
To add a tag locally and push it:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git tag <tagname>
|
||||||
|
git push --tags
|
||||||
|
|
||||||
|
To delete a local tag:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git tag --delete >tagname>
|
||||||
|
|
||||||
|
To delete a remote tag:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git push --delete <remote> <tagname>
|
||||||
|
|
||||||
|
or
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git push <remote> :<tagname>
|
||||||
|
|
||||||
|
Reference [1]_.
|
||||||
|
|
||||||
|
.. _archivingrestoring_a_branch:
|
||||||
|
|
||||||
|
Archiving/restoring a branch
|
||||||
|
----------------------------
|
||||||
|
|
||||||
|
The proper way to do archive a branch is to use tags. If you delete the
|
||||||
|
branch after you have tagged it then you've effectively kept the branch
|
||||||
|
around but it won't clutter your branch list. If you need to go back to
|
||||||
|
the branch just check out the tag. It will effectively restore the
|
||||||
|
branch from the tag.
|
||||||
|
|
||||||
|
To archive and delete the branch:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git tag archive/<branchname> <branchname>
|
||||||
|
git branch -D <branchname>
|
||||||
|
|
||||||
|
To restore the branch some time later:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git checkout -b <branchname> archive/<branchname>
|
||||||
|
|
||||||
|
The history of the branch will be preserved exactly as it was when you
|
||||||
|
tagged it. Reference [2]_.
|
||||||
|
|
||||||
|
.. [1]
|
||||||
|
https://stackoverflow.com/questions/5480258/how-to-delete-a-remote-tag
|
||||||
|
|
||||||
|
.. [2]
|
||||||
|
https://stackoverflow.com/questions/1307114/how-can-i-archive-git-branches
|
8
docs/source/developer/life_cycles_of_objects.rst
Normal file
8
docs/source/developer/life_cycles_of_objects.rst
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
Code architecture
|
||||||
|
=================
|
||||||
|
|
||||||
|
Slides of the tutorial
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
See `this file <https://www.aquila-consortium.org/wiki/index.php/File:ARES_code.pdf>`__.
|
||||||
|
Some of these slides are starting to get outdated. Check the doc pages in case of doubt.
|
159
docs/source/index.rst
Normal file
159
docs/source/index.rst
Normal file
@ -0,0 +1,159 @@
|
|||||||
|
|a| is the main component of the Bayesian Large Scale Structure inference
|
||||||
|
pipeline. The present version of the ARES framework is 2.1. Please consult
|
||||||
|
:ref:`CHANGES overview` for an overview of the different improvements over the
|
||||||
|
different versions.
|
||||||
|
|
||||||
|
|a| is written in C++14 and has been parallelized with OpenMP and MPI. It currently compiles with major compilers (gcc, intel, clang).
|
||||||
|
|
||||||
|
Table of contents
|
||||||
|
-----------------
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 1
|
||||||
|
:caption: Theory
|
||||||
|
|
||||||
|
theory/ARES
|
||||||
|
theory/BORG
|
||||||
|
theory/ARES&BORG_FFT_normalization
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 1
|
||||||
|
:caption: User documentation
|
||||||
|
|
||||||
|
changes
|
||||||
|
user/building
|
||||||
|
user/inputs
|
||||||
|
user/outputs
|
||||||
|
user/running
|
||||||
|
user/postprocessing
|
||||||
|
user/extras
|
||||||
|
user/clusters
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 1
|
||||||
|
:caption: Developer documentation
|
||||||
|
|
||||||
|
developer/development_with_git
|
||||||
|
developer/code_architecture
|
||||||
|
developer/life_cycles_of_objects
|
||||||
|
developer/ares_modules
|
||||||
|
developer/code_tutorials
|
||||||
|
developer/contributing_to_this_documentation
|
||||||
|
developer/copyright_and_authorship
|
||||||
|
|
||||||
|
Citing
|
||||||
|
------
|
||||||
|
|
||||||
|
If you are using |a| for your project, please cite the following articles for ARES2 and ARES3:
|
||||||
|
|
||||||
|
* Jasche, Kitaura, Wandelt, 2010, MNRAS, 406, 1 (arxiv 0911.2493)
|
||||||
|
* Jasche & Lavaux, 2015, MNRAS, 447, 2 (arxiv 1402.1763)
|
||||||
|
* Lavaux & Jasche, 2016, MNRAS, 455, 3 (arxiv 1509.05040)
|
||||||
|
* Jasche & Lavaux, 2019, A&A, 625, A64 (arxiv 1806.11117)
|
||||||
|
|
||||||
|
However, bear in mind that depending on the features that you are using you may want to cite other papers as well.
|
||||||
|
Here is a non-exhaustive list of those articles:
|
||||||
|
|
||||||
|
* Model development:
|
||||||
|
|
||||||
|
* HADES epoch:
|
||||||
|
|
||||||
|
* HMC, exponential transform, linear bias: Jasche, Kitaura, Wandelt, 2010, 406, 1 (arXiv 0911.2493)
|
||||||
|
* HMC, exponential transform, power law bias:
|
||||||
|
|
||||||
|
* Jasche, Leclercq, Wandelt, 2015
|
||||||
|
* Jasche, Wandelt, 2012, MNRAS, 425, 1042 (arXiv 1106.2757)
|
||||||
|
|
||||||
|
* Foreground/Robustification:
|
||||||
|
|
||||||
|
* Jasche, Lavaux, 2017, A&A (arXiv:1706.08971)
|
||||||
|
* Porqueres, Kodi Ramanah, Jasche, Lavaux, 2019, A&A (arXiv: 1812.05113)
|
||||||
|
|
||||||
|
* Cosmic expansion model:
|
||||||
|
|
||||||
|
* Kodi Ramanah, Lavaux, Jasche, Wandelt, 2019, A&A (arXiv: 1808.07496)
|
||||||
|
|
||||||
|
* Photometric redshifts
|
||||||
|
|
||||||
|
* HADES with Photo-Z: Jasche & Wandelt, 2012, MNRAS, 425, 1042 (arXiv: 1106.2757)
|
||||||
|
|
||||||
|
* Galaxy shear:
|
||||||
|
|
||||||
|
* Porqueres, Heavens, Mortlock & Lavaux, 2021, MNRAS, 502, 3035 (arXiv 2011.07722)
|
||||||
|
* Porqueres, Heavens, Mortlock & Lavaux, 2022, MNRAS, 509, 3194 (arXiv 2108.04825)
|
||||||
|
|
||||||
|
* Cosmic velocity field:
|
||||||
|
|
||||||
|
* Prideaux-Ghee, Leclercq, Lavaux, Heavens, Jasche, 2022, MNRAS (arXiv: 2204.00023)
|
||||||
|
* Boruah, Lavaux, Hudson, 2022, MNRAS (arXiv 2111.15535)
|
||||||
|
|
||||||
|
* BORG-PM
|
||||||
|
|
||||||
|
* Jasche & Lavaux, 2019, A&A, 625, A64 (arXiv 1806.11117)
|
||||||
|
|
||||||
|
* EFT bias model and likelihood
|
||||||
|
|
||||||
|
* Schmidt, Elsner, Jasche, Nguyen, Lavaux, JCAP 01, 042 (2019) (arXiv:1808.02002)
|
||||||
|
* Schmidt, Cabass, Jasche, Lavaux, JCAP 11, 008 (2020) (arXiv:2004.06707)
|
||||||
|
|
||||||
|
* Data applications
|
||||||
|
|
||||||
|
* SDSS Main Galaxy sample:
|
||||||
|
* SDSS3 LRG sample:
|
||||||
|
|
||||||
|
* Lavaux, Jasche & Leclercq, 2019, arXiv:1909.06396
|
||||||
|
|
||||||
|
* 2M++ sample:
|
||||||
|
|
||||||
|
* Lavaux & Jasche, 2016, MNRAS, 455, 3 (arXiv 1509.05040)
|
||||||
|
* Jasche & Lavaux, 2019, A&A, 625, A64 (arXiv 1806.11117)
|
||||||
|
|
||||||
|
|
||||||
|
*HADES* and *BORG* papers have a different listing.
|
||||||
|
|
||||||
|
For a full listing of publications from the Aquila consortium, please check the
|
||||||
|
`Aquila website <https://aquila-consortium.org/publications/>`_.
|
||||||
|
|
||||||
|
Acknowledgements
|
||||||
|
----------------
|
||||||
|
|
||||||
|
|
||||||
|
This work has been funded by the following grants and institutions over the
|
||||||
|
years:
|
||||||
|
|
||||||
|
* The DFG cluster of excellence "Origin and Structure of the Universe"
|
||||||
|
(http://www.universe-cluster.de).
|
||||||
|
* Institut Lagrange de Paris (grant ANR-10-LABX-63, http://ilp.upmc.fr) within
|
||||||
|
the context of the Idex SUPER subsidized by the French government through
|
||||||
|
the Agence Nationale de la Recherche (ANR-11-IDEX-0004-02).
|
||||||
|
* BIG4 (ANR-16-CE23-0002) (https://big4.iap.fr)
|
||||||
|
* The "Programme National de Cosmologie et Galaxies" (PNCG, CNRS/INSU)
|
||||||
|
* Through the grant code ORIGIN, it has received support from
|
||||||
|
the "Domaine d'Interet Majeur (DIM) Astrophysique et Conditions d'Apparitions
|
||||||
|
de la Vie (ACAV)" from Ile-de-France region.
|
||||||
|
* The Starting Grant (ERC-2015-STG 678652) "GrInflaGal" of the European Research Council.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
.. Indices and tables
|
||||||
|
.. ==================
|
||||||
|
..
|
||||||
|
.. * :ref:`genindex`
|
||||||
|
.. * :ref:`modindex`
|
||||||
|
.. * :ref:`search`
|
||||||
|
|
||||||
|
.. Order of headings used throughout the documentation:
|
||||||
|
|
||||||
|
######### part
|
||||||
|
********* chapter
|
||||||
|
========= sections
|
||||||
|
--------- subsections
|
||||||
|
~~~~~~~~~ subsubsections
|
||||||
|
^^^^^^^^^
|
||||||
|
'''''''''
|
||||||
|
|
||||||
|
.. toctree-filt::
|
||||||
|
:maxdepth: 1
|
||||||
|
:caption: Python reference documentation
|
||||||
|
|
||||||
|
:aquila:pythonref.rst
|
20
docs/source/pythonref.rst
Normal file
20
docs/source/pythonref.rst
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
.. automodule:: aquila_borg
|
||||||
|
:members:
|
||||||
|
|
||||||
|
.. automodule:: aquila_borg.cosmo
|
||||||
|
:members:
|
||||||
|
|
||||||
|
.. automodule:: aquila_borg.likelihood
|
||||||
|
:members:
|
||||||
|
|
||||||
|
.. automodule:: aquila_borg.samplers
|
||||||
|
:members:
|
||||||
|
|
||||||
|
.. automodule:: aquila_borg.forward
|
||||||
|
:members:
|
||||||
|
|
||||||
|
.. automodule:: aquila_borg.forward.models
|
||||||
|
:members:
|
||||||
|
|
||||||
|
.. automodule:: aquila_borg.forward.velocity
|
||||||
|
:members:
|
28
docs/source/theory/ARES&BORG_FFT_normalization.rst
Normal file
28
docs/source/theory/ARES&BORG_FFT_normalization.rst
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
FFT normalization in ARES/BORG
|
||||||
|
==============================
|
||||||
|
|
||||||
|
This page is to summarize the convention used for normalizing Fourier
|
||||||
|
transform, and the rational behind it.
|
||||||
|
|
||||||
|
The discrete fourier transform is defined, for a cubic box of mesh size
|
||||||
|
:math:`N` as\
|
||||||
|
|
||||||
|
.. math:: x_{\vec{i}} = \mathcal{F}_{\vec{i},\vec{a}} x_{\vec{a}} = \sum_{\vec{a}} \exp\left(\frac{2\pi}{N} \vec{i}.\vec{a}\right)
|
||||||
|
|
||||||
|
In cosmology we are mostly interested in the continuous infinite Fourier
|
||||||
|
transform\
|
||||||
|
|
||||||
|
.. math:: \delta(\vec{x}) = \iiint \frac{\text{d}\vec{k}}{(2\pi)^3} \exp(i \vec{x}.\vec{k}) \hat{\delta}(\vec{k})\;.
|
||||||
|
|
||||||
|
It can be shown that the continuous transform, under reasonable
|
||||||
|
conditions, can be approximated and matched normalized to the following
|
||||||
|
expression in the discrete case:
|
||||||
|
|
||||||
|
:math:`\delta(\vec{x}) = \frac{1}{L^3} \sum_{\vec{k}} \exp\left(i\frac{2\pi}{L} \vec{x} .\vec{k} \right) \hat{\delta}\left(\vec{k}\frac{2\pi}{L}\right)`\ This
|
||||||
|
leads to define the following operator for the discrete Fourier
|
||||||
|
transform:
|
||||||
|
|
||||||
|
:math:`F = \frac{1}{L^3} \mathcal{F}`\ which admit the following
|
||||||
|
inverse:
|
||||||
|
|
||||||
|
:math:`F^{-1} = L^3 \mathcal{F}^{-1} = \left(\frac{L}{N}\right)^3 \mathcal{F}^\dagger`
|
70
docs/source/theory/ARES.rst
Normal file
70
docs/source/theory/ARES.rst
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
.. _introduction_to_bayesian_large_scale_structure_inference:
|
||||||
|
|
||||||
|
Introduction to ARES
|
||||||
|
====================
|
||||||
|
|
||||||
|
The Algorithm for REconstruction and Sampling (ARES) is a full Bayesian
|
||||||
|
large scale structure inference method targeted at precision recovery of
|
||||||
|
cosmological power-spectra from three dimensional galaxy redshift
|
||||||
|
surveys. Specifically it performs joint inferences of three dimensional
|
||||||
|
density fields, cosmological power spectra as well as luminosity
|
||||||
|
dependent galaxy biases and corresponding noise levels for different
|
||||||
|
galaxy populations in the survey.
|
||||||
|
|
||||||
|
In order to provide full Bayesian uncertainty quantification the
|
||||||
|
algorithm explores the joint posterior distribution of all these
|
||||||
|
quantities via an efficient implementation of high dimensional Markov
|
||||||
|
Chain Monte Carlo methods in a block sampling scheme. In particular the
|
||||||
|
sampling consists in generating from a Wiener posterior distribution
|
||||||
|
random realizations of three dimensional density fields constrained by
|
||||||
|
data in the form of galaxy number counts. Following each generation, we
|
||||||
|
produce conditioned random realizations of the power-spectrum, galaxy
|
||||||
|
biases and noise levels through several sampling steps. Iterating these
|
||||||
|
sampling steps correctly yields random realizations from the joint
|
||||||
|
posterior distribution. In this fashion the ARES algorithm accounts for
|
||||||
|
all joint and correlated uncertainties between all inferred quantities
|
||||||
|
and allows for accurate inferences from galaxy surveys with non-trivial
|
||||||
|
survey geometries. Classes of galaxies with different biases are treated
|
||||||
|
as separate sub samples, allowing even for combined analyses of more
|
||||||
|
than one galaxy survey.
|
||||||
|
|
||||||
|
For further information please consult our publications that are listed
|
||||||
|
`here <https://www.aquila-consortium.org/publications/>`__.
|
||||||
|
|
||||||
|
.. _implementation_the_ares3_code:
|
||||||
|
|
||||||
|
Implementation: the ARES3 code
|
||||||
|
------------------------------
|
||||||
|
|
||||||
|
The ARES3 package comes with a basic flavour within the binary program
|
||||||
|
"ares3". "ares3" is an implementation of the algorithm outlined in the
|
||||||
|
paper "Matrix free Large scale Bayesian inference" (Jasche & Lavaux
|
||||||
|
2014)
|
||||||
|
|
||||||
|
The ARES3 serves as a basis for number of extensions and modules. The
|
||||||
|
minimal extension is the foreground sampler mechanism, that allows to
|
||||||
|
fit some model of foreground contamination in large scale structure
|
||||||
|
data. The second main module is the *HADES* sampler, which
|
||||||
|
incorporates the HMC base definition and implementation alongside some
|
||||||
|
likelihood models. The third module is the :ref:`BORG <introduction_to_borg>` sampler. It
|
||||||
|
is a much more advanced likelihood analysis which incorporates
|
||||||
|
non-linear dynnamics of the Large scale structures.
|
||||||
|
|
||||||
|
.. _ares_model:
|
||||||
|
|
||||||
|
ARES model
|
||||||
|
----------
|
||||||
|
|
||||||
|
The model implemented in ARES is the most simple 'linear' model. The
|
||||||
|
density field is supposed to be a pure Gaussian random field, which
|
||||||
|
linearly biased, selected and with a Gaussian error model. For a single
|
||||||
|
catalog, the forward model corresponds to:
|
||||||
|
|
||||||
|
:math:`N^\mathrm{g}_p = \bar{N} R_p (1 + b \delta_p) + n_p` with
|
||||||
|
:math:`\langle n_p n_{p'} \rangle = R_p \bar{N} \delta^K_{p, p'}`
|
||||||
|
|
||||||
|
:math:`\delta^K` is the Kronecker symbol, :math:`R_p` is the linear
|
||||||
|
response of the survey, i.e. the 3d completeness, :math:`b` the linear
|
||||||
|
bias and :math:`\bar{N}` the mean number of galaxies per grid element.
|
||||||
|
Effectively :math:`\bar{N}` will absorb the details of the normalization
|
||||||
|
of :math:`R_p`.
|
24
docs/source/theory/BORG.rst
Normal file
24
docs/source/theory/BORG.rst
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
.. _introduction_to_borg:
|
||||||
|
|
||||||
|
Introduction to BORG
|
||||||
|
====================
|
||||||
|
|
||||||
|
The BORG3 (Bayesian Origin Reconstruction from Galaxies) model is a
|
||||||
|
submodule of the ARES3 framework. It shares the same infrastructure ,
|
||||||
|
I/O system and general mechanism. BORG3 relies also on HADES3 package
|
||||||
|
which implements an efficient Hamiltonian Markov Chain sampler of the
|
||||||
|
density field at fixed power spectrum and fixed selection effects.
|
||||||
|
|
||||||
|
More specifically, BORG3 implements the forward and adjoint gradient
|
||||||
|
model for different dynamical model: Lagrangian perturbation theory,
|
||||||
|
Second order Lagrangian perturbation theory, Linearly Evolving Potential
|
||||||
|
and full Particle Mesh. On top of that redshift space distortions are
|
||||||
|
supported by adding a translation to intermediate particle
|
||||||
|
representations.
|
||||||
|
|
||||||
|
On top of that BORG3 provides different likelihood model to relate the
|
||||||
|
matter density field to the galaxy density field: Gaussian white noise,
|
||||||
|
Poisson noise (with non-linear truncated power-law bias model), Negative
|
||||||
|
binomial likelihood.
|
||||||
|
|
||||||
|
Finally BORG3 fully supports MPI with scaling at least up to 1024 cores.
|
509
docs/source/user/building.rst
Normal file
509
docs/source/user/building.rst
Normal file
@ -0,0 +1,509 @@
|
|||||||
|
.. _building:
|
||||||
|
|
||||||
|
Building
|
||||||
|
########
|
||||||
|
|
||||||
|
Prerequisites
|
||||||
|
=============
|
||||||
|
|
||||||
|
* cmake ≥ 3.13
|
||||||
|
* automake
|
||||||
|
* libtool
|
||||||
|
* pkg-config
|
||||||
|
* gcc ≥ 7 , or intel compiler (≥ 2018), or Clang (≥ 7)
|
||||||
|
* wget (to download dependencies; the flag ``--use-predownload`` can be
|
||||||
|
used to bypass this dependency)
|
||||||
|
|
||||||
|
Optional requirements are:
|
||||||
|
|
||||||
|
* An `OpenMP <http://www.openmp.org>`_-enabled compiler (with OpenMP >= 2.0)
|
||||||
|
|
||||||
|
|a| does not require any preinstalled external libraries; it will download
|
||||||
|
and compile all necessary dependencies by default.
|
||||||
|
|
||||||
|
|
||||||
|
Python scripts have been tested with the following:
|
||||||
|
|
||||||
|
* Python == 3.5
|
||||||
|
* healpy == 1.10.3 (Guilhem has also a special version of healpy on Github `here <https://github.com/glavaux/healpy>`__)
|
||||||
|
* HDF5Py == 2.7.0
|
||||||
|
* Numexpr == 2.6.2
|
||||||
|
* Numba == 0.33.0 - 0.35.0
|
||||||
|
|
||||||
|
In addition the vtktools binding in ares_tools has been used with
|
||||||
|
Paraview ≥ 5.2 . It should be safe to use to upload data into paraview
|
||||||
|
from numpy arrays.
|
||||||
|
|
||||||
|
.. _downloading_and_setting_up_for_building:
|
||||||
|
|
||||||
|
Downloading and setting up for building
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
The first step to obtain and build ares is to clone the git repository
|
||||||
|
for bitbucket. On some supercomputing system, it is impossible to access
|
||||||
|
internet directly. The first clone should then be on your
|
||||||
|
laptop/workstation and then replicate it on the distant machine. Please
|
||||||
|
check next section for more details. If the computer has access to
|
||||||
|
internet this is easy:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git clone --recursive git@bitbucket.org:bayesian_lss_team/ares.git
|
||||||
|
|
||||||
|
Note that if you forget the "--recursive" option either start from
|
||||||
|
scratch or do a
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git submodule init; git submodule update
|
||||||
|
|
||||||
|
Then you may want to choose a branch that interest you. At the time of
|
||||||
|
this writing (April 13th, 2021), there are 4 "main" branches:
|
||||||
|
|
||||||
|
* main (the bleeding edge variant of ARES)
|
||||||
|
* release/1.0
|
||||||
|
* release/2.0alpha
|
||||||
|
* release/2.1
|
||||||
|
|
||||||
|
The :code:`release/*` branches are stable, which means the existing code cannot
|
||||||
|
change significatively notably to alter API or features. Bug fixes can still go
|
||||||
|
in there, and exceptionally some late merging of features. The general advice
|
||||||
|
when starting is branch against the latest revision. Though if you particularly
|
||||||
|
need a feature of :code:`main`. There are of course lots of other sub-branches
|
||||||
|
for the different features and other development branches of each member of the
|
||||||
|
collaboration.
|
||||||
|
|
||||||
|
Normally you will want to choose . Otherwise you may change branch
|
||||||
|
by running ``git checkout THE_BRANCH_NAME_THAT_YOU_WANT``. Once you are
|
||||||
|
on the branch that you want you may run the ``get-aquila-modules.sh``
|
||||||
|
script. The first step consists in running
|
||||||
|
``bash get-aquila-modules.sh --clone``, this will clone all the
|
||||||
|
classical Aquila private modules in the "extra/" subdirectory. The
|
||||||
|
second step is to ensure that all branches are setup correctly by
|
||||||
|
running ``bash get-aquila-modules.sh --branch-set``.
|
||||||
|
|
||||||
|
Now that the modules have been cloned and setup we may now move to
|
||||||
|
building.
|
||||||
|
|
||||||
|
As a word of caution, Do not touch the gitmodules files. Whenever you
|
||||||
|
need to do changes create a new branch in either of the main repository
|
||||||
|
or the modules and work in that branch.
|
||||||
|
|
||||||
|
sync submodules:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
cd ares
|
||||||
|
git submodule sync
|
||||||
|
git submodule update --init --recursive
|
||||||
|
|
||||||
|
.. _supercomputer_without_outgoing_access_to_internet:
|
||||||
|
|
||||||
|
Supercomputer without outgoing access to internet
|
||||||
|
=================================================
|
||||||
|
|
||||||
|
If the supercomputer does not accept to let you create connection to
|
||||||
|
internet (i.e. TGCC in France), things are bit more complicated. The
|
||||||
|
first clone of ares and its modules should be done on your
|
||||||
|
laptop/workstation. Make it a clean variant for example:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git clone --recursive git@bitbucket.org:bayesian_lss_team/ares.git ares_clean
|
||||||
|
|
||||||
|
Then proceed again with
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
bash get-aquila-modules.sh --clone
|
||||||
|
bash get-aquila-modules.sh --branch-set
|
||||||
|
bash build.sh --download-deps
|
||||||
|
|
||||||
|
Now replicate that tree to the computer:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
cd ..
|
||||||
|
rsync -av ares_clean THE_COMPUTER:
|
||||||
|
|
||||||
|
And now you can proceed as usual for building
|
||||||
|
|
||||||
|
**However** for updating later the GIT tree later, we have two special
|
||||||
|
commands available in get-aquila-modules.sh. On your laptop/workstation,
|
||||||
|
run the following from the ares top source directory:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
bash get-aquila-modules.sh --send-pack THE_COMPUTER ares_clean origin
|
||||||
|
|
||||||
|
This will send the content of the current git tree (including the
|
||||||
|
registered modules in .aquila-modules) from the remote ``origin`` to
|
||||||
|
remote directory ``ares_clean`` on the computer ``THE_COMPUTER``.
|
||||||
|
However the checked out branch will not be remotely merged! A second
|
||||||
|
operation is required. Now login on the distant computer and run
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
bash get-aquila-modules.sh --local-merge origin
|
||||||
|
|
||||||
|
This will merge all the corresponding branches from origin to the
|
||||||
|
checked out branches. If everything is ok you should not get any error
|
||||||
|
messages. Error can happen if you modified the branches in an
|
||||||
|
incompatible way. In that case you have to fix the git merge in the
|
||||||
|
usual way (edit files, add them, commit).
|
||||||
|
|
||||||
|
.. _the_build.sh_script:
|
||||||
|
|
||||||
|
The build.sh script
|
||||||
|
===================
|
||||||
|
|
||||||
|
To help with the building process, there is a script called build.sh in
|
||||||
|
the top directory. It will ensure cmake is called correctly with all the
|
||||||
|
adequate parameters. At the same time it does cleaning of the build
|
||||||
|
directory if needed.
|
||||||
|
|
||||||
|
The most basic scenario for building is the following:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
bash build.sh
|
||||||
|
bash build.sh --download-deps
|
||||||
|
cd build
|
||||||
|
make
|
||||||
|
|
||||||
|
|
||||||
|
Please pay attention warnings and error messages. The most important are color
|
||||||
|
marked. Notably some problems may occur if two versions of the same compiler
|
||||||
|
are used for C and C++.
|
||||||
|
|
||||||
|
The full usage is the following (obtained with ``bash build.sh -h``):
|
||||||
|
|
||||||
|
.. code:: text
|
||||||
|
|
||||||
|
Ensure the current directory is ARES
|
||||||
|
This is the build helper. The arguments are the following:
|
||||||
|
|
||||||
|
--cmake CMAKE_BINARY instead of searching for cmake in the path,
|
||||||
|
use the indicated binary
|
||||||
|
|
||||||
|
--without-openmp build without openmp support (default with)
|
||||||
|
--with-mpi build with MPI support (default without)
|
||||||
|
--c-compiler COMPILER specify the C compiler to use (default to cc)
|
||||||
|
--cxx-compiler COMPILER specify the CXX compiler to use (default to c++)
|
||||||
|
--julia JULIA_BINARY specify the full path of julia interpreter
|
||||||
|
--build-dir DIRECTORY specify the build directory (default to "build/" )
|
||||||
|
--debug build for full debugging
|
||||||
|
--no-debug-log remove all the debug output to increase speed on parallel
|
||||||
|
filesystem.
|
||||||
|
--perf add timing instructions and report in the log files
|
||||||
|
|
||||||
|
--extra-flags FLAGS extra flags to pass to cmake
|
||||||
|
--download-deps Predownload dependencies
|
||||||
|
--use-predownload Use the predownloaded dependencies. They must be in
|
||||||
|
downloads/
|
||||||
|
--no-predownload Do not use predownloaded dependencies in downloads/
|
||||||
|
--purge Force purging the build directory without asking
|
||||||
|
questions.
|
||||||
|
--native Try to activate all optimizations supported by the
|
||||||
|
running CPU.
|
||||||
|
--python[=PATH] Enable the building of the python extension. If PATH
|
||||||
|
is provided it must point to the executable of your
|
||||||
|
choice for (e.g `/usr/bin/python3.9`)
|
||||||
|
--with-julia Build with Julia support (default false)
|
||||||
|
--hades-python Enable hades-python (implies --python)
|
||||||
|
--skip-building-tests Do not build all the tests
|
||||||
|
|
||||||
|
Advanced usage:
|
||||||
|
|
||||||
|
--eclipse Generate for eclipse use
|
||||||
|
--ninja Use ninja builder
|
||||||
|
--update-tags Update the TAGS file
|
||||||
|
--use-system-boost[=PATH] Use the boost install available from the system. This
|
||||||
|
reduces your footprint but also increases the
|
||||||
|
possibilities of miscompilation and symbol errors.
|
||||||
|
--use-system-fftw[=PATH] Same but for FFTW3. We require the prefix path.
|
||||||
|
--use-system-gsl Same but for GSL
|
||||||
|
--use-system-eigen=PATH Same but for EIGEN. Here we require the prefix path of
|
||||||
|
the installation.
|
||||||
|
--use-system-hdf5[=PATH] Same but for HDF5. Require an HDF5 with C++ support.
|
||||||
|
The path indicate the prefix path of the installation of HDF5
|
||||||
|
(e.g. /usr/local or /usr). By default it will use
|
||||||
|
environment variables to guess it (HDF5_ROOT)
|
||||||
|
|
||||||
|
After the configuration, you can further tweak the configuration using ccmake
|
||||||
|
(if available on your system).
|
||||||
|
|
||||||
|
Note that on some superclusters it is not possible to download files
|
||||||
|
from internet. You can only push data using SSH, but not run any wget,
|
||||||
|
curl or git pull. To account for that limitation, there are two options:
|
||||||
|
"download-deps" and "use-predownload". You should run "bash build.sh
|
||||||
|
--download-deps" on, e.g., your laptop or workstation and upload the
|
||||||
|
created "downloads" directory into the ARES source tree on the
|
||||||
|
supercomputer without touching anything inside that directory. Once you
|
||||||
|
did that you can build on the supercomputer login node, by adding
|
||||||
|
"--use-predownload" flag to build.sh in addition to others that you
|
||||||
|
need. If you want to compile with full MPI support, you have to give
|
||||||
|
'--with-mpi' as an argument to build.sh.
|
||||||
|
|
||||||
|
If you have built ARES before grabbing all the extra modules, it is fine
|
||||||
|
you can still recover your previous build. For that just go to your
|
||||||
|
build directory and run ``${CMAKE} .`` with ${CMAKE} being the cmake
|
||||||
|
executable that you have used originally. If you did not specify
|
||||||
|
anything just use 'cmake'.
|
||||||
|
|
||||||
|
A typical successful completion of the configuration ends like that:
|
||||||
|
|
||||||
|
.. code:: text
|
||||||
|
|
||||||
|
Configuration done.
|
||||||
|
Move to /home/lavaux/PROJECTS/ares/build and type 'make' now.
|
||||||
|
Please check the configuration of your MPI C compiler. You may need
|
||||||
|
to set an environment variable to use the proper compiler.
|
||||||
|
Some example (for SH/BASH shells):
|
||||||
|
OpenMPI:
|
||||||
|
OMPI_CC=cc
|
||||||
|
OMPI_CXX=c++
|
||||||
|
export OMPI_CC OMPI_CXX
|
||||||
|
|
||||||
|
It tells you that you should move to the build directory (by default it
|
||||||
|
is a subdirectory called "build/" in the root of the ARES source code).
|
||||||
|
There is a potential pitfall when using some MPI C compiler. They have
|
||||||
|
been installed by the system administrator to work by default with
|
||||||
|
another compiler (for example Intel C Compiler) though they work
|
||||||
|
completely fine also with another one (like GCC). In that case you have
|
||||||
|
to force the MPI C compiler to use the one that you chose with the
|
||||||
|
indicated environment variable, otherwise you will risk having
|
||||||
|
inconsistent generated code and errors at the final binary building.
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
cd build ; make
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
* Use make parallelism if possible using the '-j'option. The number
|
||||||
|
indicates the number of CPU cores to use in parallel to compile all the source
|
||||||
|
code. For example ``make all -j4`` to compile using 4 parallel tasks. We have
|
||||||
|
not yet caught all the detailed dependencies and it may happen there is a
|
||||||
|
failure. Just execute 'make' again to ensure that everything is in order
|
||||||
|
(it should be).
|
||||||
|
* Use ``make VERBOSE=1`` to see exactly what the compilation is doing
|
||||||
|
|
||||||
|
Upon success of the compilation you will find executables in the ``src/`` subdirectory. Notably::
|
||||||
|
|
||||||
|
./src/ares3
|
||||||
|
|
||||||
|
|
||||||
|
.. _git_procedures:
|
||||||
|
|
||||||
|
Git procedures
|
||||||
|
==============
|
||||||
|
|
||||||
|
.. _general_checkup_management:
|
||||||
|
|
||||||
|
General checkup / management
|
||||||
|
----------------------------
|
||||||
|
|
||||||
|
.. code:: text
|
||||||
|
|
||||||
|
bash get-aquila-modules.sh --status
|
||||||
|
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||||
|
This script can be run only by Aquila members.
|
||||||
|
if your bitbucket login is not accredited the next operations will fail.
|
||||||
|
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||||
|
Checking GIT status...
|
||||||
|
Root tree (branch master) : good. All clear.
|
||||||
|
Module ares_fg (branch master) : good. All clear.
|
||||||
|
Module borg (branch master) : good. All clear.
|
||||||
|
Module dm_sheet (branch master) : good. All clear.
|
||||||
|
Module hades (branch master) : good. All clear.
|
||||||
|
Module hmclet (branch master) : good. All clear.
|
||||||
|
|
||||||
|
.. _git_submodules:
|
||||||
|
|
||||||
|
Git submodules
|
||||||
|
--------------
|
||||||
|
|
||||||
|
Contents of file 'BASE/ares/.gitmodules'
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
[submodule 'external/cosmotool']
|
||||||
|
path = external/cosmotool
|
||||||
|
url = https://bitbucket.org/glavaux/cosmotool.git
|
||||||
|
|
||||||
|
.. _frequently_encountered_problems_fep:
|
||||||
|
|
||||||
|
Frequently Encountered Problems (FEP)
|
||||||
|
=====================================
|
||||||
|
|
||||||
|
.. _non_linked_files:
|
||||||
|
|
||||||
|
Non-linked files
|
||||||
|
----------------
|
||||||
|
|
||||||
|
Problem
|
||||||
|
~~~~~~~
|
||||||
|
|
||||||
|
* Not being able to compile after transferring to a supercluster
|
||||||
|
* Error as following:
|
||||||
|
|
||||||
|
.. figure:: /user/building/Terminal_output.png
|
||||||
|
:alt: /user/building/Terminal_output.png
|
||||||
|
:width: 400px
|
||||||
|
|
||||||
|
Terminal_output.png
|
||||||
|
|
||||||
|
* Complains about not finding cfitsio in external/cfitsio while the
|
||||||
|
cfitsio is actually in external/cfitsio.
|
||||||
|
* Folder external/cfitsio:
|
||||||
|
|
||||||
|
.. figure:: /user/building/Terminal_output-2.png
|
||||||
|
:alt: /user/building/Terminal_output-2.png
|
||||||
|
:width: 400px
|
||||||
|
|
||||||
|
Terminal_output-2.png
|
||||||
|
|
||||||
|
Solution
|
||||||
|
~~~~~~~~
|
||||||
|
|
||||||
|
Purging all the .o and .a in external/cfitsio, and force a rebuild of
|
||||||
|
libcfitsio by removing the file
|
||||||
|
{BUILD}/external_build/cfitsio-prefix/src/cfitsio-stamp/cfitsio-build
|
||||||
|
and type make
|
||||||
|
|
||||||
|
MPI_CXX not found
|
||||||
|
-----------------
|
||||||
|
|
||||||
|
Problem
|
||||||
|
~~~~~~~
|
||||||
|
|
||||||
|
MPI_C is found but MPI_CXX is not found by CMake. The output of build.sh
|
||||||
|
contains something like:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
-- Found MPI_C: /path/to/libmpi.so (found version "3.1")
|
||||||
|
-- Could NOT find MPI_CXX (missing: MPI_CXX_WORKS)
|
||||||
|
-- Found MPI_Fortran: /path/to/libmpi_usempif08.so (found version "3.1")
|
||||||
|
|
||||||
|
.. _solution_1:
|
||||||
|
|
||||||
|
Solution
|
||||||
|
~~~~~~~~
|
||||||
|
|
||||||
|
You probably have two versions of MPI (the one you intend to use, e.g.
|
||||||
|
your installation of OpenMPI) and one which pollutes the environment
|
||||||
|
(e.g. your anaconda). Therefore the compilation of the MPI C++ test
|
||||||
|
program (``build/CMakeFiles/FindMPI/test_mpi.cpp``) by CMake fails. To
|
||||||
|
troubleshoot:
|
||||||
|
|
||||||
|
* Check the commands that defined your environment variables using
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
set | grep -i MPI
|
||||||
|
|
||||||
|
* check the paths used in ``CPATH``, ``CPP_FLAGS``, etc. for spurious
|
||||||
|
MPI headers (e.g. ``mpi.h``)
|
||||||
|
* control the file ``build/CMakeFiles/CMakeError.txt`` if it exists
|
||||||
|
|
||||||
|
.. _building_at_hpc_facilities:
|
||||||
|
|
||||||
|
Building at HPC facilities
|
||||||
|
--------------------------
|
||||||
|
|
||||||
|
First, if possible, clone ARES base directory with git on the target
|
||||||
|
system:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git clone git@bitbucket.org:bayesian_lss_team/ares.git
|
||||||
|
|
||||||
|
Initialize the submodules:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
cd ares
|
||||||
|
git submodule init
|
||||||
|
git submodule update
|
||||||
|
|
||||||
|
Obtain the additional Aquila modules:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
bash get-aquila-modules.sh --clone
|
||||||
|
|
||||||
|
Here either on your laptop/workstation or on the target system if it
|
||||||
|
allows all outgoing internet connection you can run the following
|
||||||
|
command:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
bash build.sh --download-deps
|
||||||
|
|
||||||
|
A typical problem is that some of the dependencies have not been
|
||||||
|
downloaded correctly. You should check if all dependencies are available
|
||||||
|
in the directory "/downloads". If you downloaded on your local computer,
|
||||||
|
you must upload downloads directory on the target system in the
|
||||||
|
ares/downloads subdirectory.
|
||||||
|
|
||||||
|
Check which modules are available
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
module avail
|
||||||
|
|
||||||
|
Choose the compiler or build environment. Also load the CMake module and
|
||||||
|
Python3.
|
||||||
|
|
||||||
|
**Important note:** The intel compiler requires basic infrastructure
|
||||||
|
provided by GCC. Default environment may be very old and thus a modern
|
||||||
|
Intel Compiler (19 or 20) would be using old libraries from GCC 4.x. You
|
||||||
|
have to load the gcc compiler first (gcc>7.x) and then load the intel
|
||||||
|
compiler. You can check the compatibility with "icc -v" and see the
|
||||||
|
version of gcc that is used by intel.
|
||||||
|
|
||||||
|
.. _permissions_quota_etc:
|
||||||
|
|
||||||
|
Permissions, quota, etc
|
||||||
|
-----------------------
|
||||||
|
|
||||||
|
Some supercomputing facilities has peculiar quota system. You have to
|
||||||
|
belong to a group to get access to full disk quota (e.g. TGCC in
|
||||||
|
France). You can switch groups using "newgrp name_of_the_group" and
|
||||||
|
excecute all commands in the spawn shell.
|
||||||
|
|
||||||
|
.. _external_hdf5_not_found:
|
||||||
|
|
||||||
|
External HDF5 not found
|
||||||
|
-----------------------
|
||||||
|
|
||||||
|
Problem
|
||||||
|
~~~~~~~
|
||||||
|
|
||||||
|
When running build.sh (particularly with the flag
|
||||||
|
``--use-system-hdf5``), cmake gives some errors, such as
|
||||||
|
|
||||||
|
.. code:: text
|
||||||
|
|
||||||
|
CMake Error: The following variables are used in this project, but they are set to NOTFOUND.
|
||||||
|
Please set them or make sure they are set and tested correctly in the CMake files:
|
||||||
|
HDF5_CXX_INCLUDE_DIR (ADVANCED)
|
||||||
|
|
||||||
|
CMake Error in libLSS/CMakeLists.txt:
|
||||||
|
Found relative path while evaluating include directories of "LSS":
|
||||||
|
|
||||||
|
"HDF5_CXX_INCLUDE_DIR-NOTFOUND"
|
||||||
|
|
||||||
|
Solution
|
||||||
|
~~~~~~~~
|
||||||
|
|
||||||
|
* HDF5 must be compiled with the flags ``--enable-shared`` and
|
||||||
|
``--enable-cxx``.
|
||||||
|
* the environment variable ``HDF5_ROOT`` must point to the HDF5 prefix
|
||||||
|
directory, and cmake should use it from version 3.12 (see also cmake
|
||||||
|
policy CMP0074 and `this commit
|
||||||
|
2ebe5e9 <https://bitbucket.org/bayesian_lss_team/ares/commits/2ebe5e9c323e30ece0caa124a0b705f3b1241273>`__).
|
||||||
|
|
||||||
|
.. include:: building/building_May_2020.inc.rst
|
770
docs/source/user/building/Aquila_tutorial_0.ipynb
Normal file
770
docs/source/user/building/Aquila_tutorial_0.ipynb
Normal file
File diff suppressed because one or more lines are too long
BIN
docs/source/user/building/Terminal_output-2.png
Normal file
BIN
docs/source/user/building/Terminal_output-2.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 288 KiB |
BIN
docs/source/user/building/Terminal_output.png
Normal file
BIN
docs/source/user/building/Terminal_output.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 372 KiB |
196
docs/source/user/building/building_May_2020.inc.rst
Normal file
196
docs/source/user/building/building_May_2020.inc.rst
Normal file
@ -0,0 +1,196 @@
|
|||||||
|
Installing BORG for the Aquila meeting (May 2020)
|
||||||
|
=================================================
|
||||||
|
|
||||||
|
This note provides a step by step instruction for downloading and
|
||||||
|
installing the BORG software package. This step-by-step instruction has
|
||||||
|
been done using a MacBook Air running OS X El Capitan. I encourage
|
||||||
|
readers to modify this description as may be required to install BORG on
|
||||||
|
a different OS. Please indicate all necessary modifications and which OS
|
||||||
|
was used.
|
||||||
|
|
||||||
|
Some prerequisites
|
||||||
|
------------------
|
||||||
|
|
||||||
|
The total installation will take approximately **7-8 GByte** of disk
|
||||||
|
space. Software prerequisites:
|
||||||
|
|
||||||
|
cmake≥ 3.10 automake libtool pkg-config gcc ≥ 7 , or intel compiler (≥
|
||||||
|
2018), or Clang (≥ 7) wget (to download dependencies; the flag
|
||||||
|
--use-predownload can be used to bypass this dependency if you already
|
||||||
|
have downloaded the required files in the ``downloads`` directory)
|
||||||
|
|
||||||
|
Clone the repository from BitBucket
|
||||||
|
-----------------------------------
|
||||||
|
|
||||||
|
To clone the ARES repository execute the following git command in a
|
||||||
|
console:
|
||||||
|
``{r, engine='bash', count_lines} git clone --recursive git@bitbucket.org:bayesian_lss_team/ares.git``
|
||||||
|
|
||||||
|
After the clone is successful, you shall change directory to ``ares``,
|
||||||
|
and execute:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
bash get-aquila-modules.sh --clone
|
||||||
|
|
||||||
|
Ensure that correct branches are setup for the submodules using:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
bash get-aquila-modules.sh --branch-set
|
||||||
|
|
||||||
|
If you want to check the status of the currently checked out ARES and
|
||||||
|
its modules, please run:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
bash get-aquila-modules.sh --status
|
||||||
|
|
||||||
|
You should see the following output:
|
||||||
|
|
||||||
|
.. code:: text
|
||||||
|
|
||||||
|
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||||
|
This script can be run only by Aquila members.
|
||||||
|
if your bitbucket login is not accredited the next operations will fail.
|
||||||
|
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||||
|
Checking GIT status...
|
||||||
|
|
||||||
|
Root tree (branch master) : good. All clear.
|
||||||
|
Module ares_fg (branch master) : good. All clear.
|
||||||
|
Module borg (branch master) : good. All clear.
|
||||||
|
Module dm_sheet (branch master) : good. All clear.
|
||||||
|
Module hades (branch master) : good. All clear.
|
||||||
|
Module hmclet (branch master) : good. All clear.
|
||||||
|
Module python (branch master) : good. All clear.
|
||||||
|
|
||||||
|
Building BORG
|
||||||
|
-------------
|
||||||
|
|
||||||
|
To save time and bandwidth it is advised to pre-download the
|
||||||
|
dependencies that will be used as part of the building procedure. You
|
||||||
|
can do that with
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
bash build.sh --download-deps
|
||||||
|
|
||||||
|
That will download a number of tar.gz which are put in the
|
||||||
|
``downloads/`` folder.
|
||||||
|
|
||||||
|
Then you can configure the build itself:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
bash build.sh --cmake CMAKE_BINARY --c-compiler YOUR_PREFERRED_C_COMPILER --cxx-compiler YOUR_PREFERRED_CXX_COMPILER --use-predownload
|
||||||
|
|
||||||
|
Add ``--with-mpi`` to add MPI support. E.g. (This probably needs to be
|
||||||
|
adjusted for your computer.):
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
bash build.sh --cmake /usr/local/Cellar/cmake/3.17.1/bin/cmake --c-compiler /usr/local/bin/gcc-10 --cxx-compiler /usr/local/bin/g++-10 --use-predownload
|
||||||
|
|
||||||
|
Once the configure is successful you should see a final output similar
|
||||||
|
to this:
|
||||||
|
|
||||||
|
.. code:: text
|
||||||
|
|
||||||
|
------------------------------------------------------------------
|
||||||
|
|
||||||
|
Configuration done.
|
||||||
|
Move to /Volumes/EXTERN/software/borg_fresh/ares/build and type 'make' now.
|
||||||
|
Please check the configuration of your MPI C compiler. You may need
|
||||||
|
to set an environment variable to use the proper compiler.
|
||||||
|
|
||||||
|
Some example (for SH/BASH shells):
|
||||||
|
- OpenMPI:
|
||||||
|
OMPI_CC=/usr/local/bin/gcc-9
|
||||||
|
OMPI_CXX=/usr/local/bin/g++-9
|
||||||
|
export OMPI_CC OMPI_CXX
|
||||||
|
|
||||||
|
------------------------------------------------------------------
|
||||||
|
|
||||||
|
It tells you to move to the default build directory using ``cd build``,
|
||||||
|
after what you can type ``make``. To speed up the compilation you can
|
||||||
|
use more computing power by adding a ``-j`` option. For example
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
make -j4
|
||||||
|
|
||||||
|
will start 4 compilations at once (thus keep 4 cores busy all the time
|
||||||
|
typically). Note, that the compilation can take some time.
|
||||||
|
|
||||||
|
Running a test example
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
The ARES repository comes with some standard examples for LSS analysis.
|
||||||
|
Here we will use a simple standard unit example for BORG. From your ARES
|
||||||
|
base directory change to the examples folder:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
cd examples
|
||||||
|
|
||||||
|
We will copy a few files to a temporary directory for executing the run. We
|
||||||
|
will assume here that ``$SOME_DIRECTORY`` is a directory that you have created
|
||||||
|
for the purpose of this tutorial. Please replace any occurence of it by the
|
||||||
|
path of your choice in the scripts below. We will also assume that ``$ARES``
|
||||||
|
represents the source directory path of the ares tree.
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
mkdir $SOME_DIRECTORY
|
||||||
|
cp 2mpp-chain.ini $SOME_DIRECTORY
|
||||||
|
cp completeness_12_5.fits.gz completeness_11_5.fits.gz 2MPP.txt $SOME_DIRECTORY
|
||||||
|
cd $SOME_DIRECTORY
|
||||||
|
|
||||||
|
In the above, we have copied the ini file describing the run, then the data
|
||||||
|
file (survey mask) and 2M++ data file for BORG. To start a BORG run just
|
||||||
|
execute the following code in the console:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
$ARES/build/src/hades3 INIT 2mpp-chain.ini.txt
|
||||||
|
|
||||||
|
BORG will now execute a simple MCMC. You can interupt calculation at any
|
||||||
|
time. To resume the run you can just type:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
$ARES/build/src/hades3 RESUME borg_unit_example.ini
|
||||||
|
|
||||||
|
You need at least on the order of 1000 samples to pass the initial
|
||||||
|
warm-up phase of the sampler. As the execution of the code will consume
|
||||||
|
about 2GB of your storage, we suggest to execute BORG in a directory
|
||||||
|
with sufficient free hard disk storage.
|
||||||
|
|
||||||
|
You can also follow the Aquila tutorial
|
||||||
|
---------------------------------------
|
||||||
|
|
||||||
|
You can find a tutorial on running and analysing a BORG run in the scripts
|
||||||
|
directory of the ARES base directory:
|
||||||
|
``$ARES/docs/users/building/Aquila_tutorial_0.ipynb``. It is a jupyter
|
||||||
|
notebook, so please have a `jupyter <https://jupyter.org>`_ running. We
|
||||||
|
provide access to the content of this notebook directly through this `link to the notebook <building/Aquila_tutorial_0.ipynb>`_.
|
||||||
|
It illustrates how to read and
|
||||||
|
plot some of the data produced by BORG.
|
||||||
|
|
||||||
|
Switching to another branch
|
||||||
|
---------------------------
|
||||||
|
|
||||||
|
Follow these steps to switch your ares clone to another branch (starting
|
||||||
|
from the ``ares/`` directory):
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git checkout user/fancy_branch
|
||||||
|
git pull
|
||||||
|
# (the above step should only be necessary if you are not on a fresh clone and have not pulled recently)
|
||||||
|
bash get-aquila-modules.sh --branch-set
|
||||||
|
bash get-aquila-modules.sh --status
|
||||||
|
# ( verify that it responds with "all clear" on all repos)
|
||||||
|
bash get-aquila-modules.sh --pull
|
||||||
|
# ready to build: (make clean optional)
|
||||||
|
cd build ; make clean ; make
|
9
docs/source/user/clusters.rst
Normal file
9
docs/source/user/clusters.rst
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
Clusters
|
||||||
|
########
|
||||||
|
|
||||||
|
.. _clusters:
|
||||||
|
|
||||||
|
.. include:: clusters/Horizon.inc.rst
|
||||||
|
.. include:: clusters/Occigen.inc.rst
|
||||||
|
.. include:: clusters/Imperial_RCS.inc.rst
|
||||||
|
.. include:: clusters/SNIC.inc.rst
|
321
docs/source/user/clusters/Horizon.inc.rst
Normal file
321
docs/source/user/clusters/Horizon.inc.rst
Normal file
@ -0,0 +1,321 @@
|
|||||||
|
.. _horizon:
|
||||||
|
|
||||||
|
Horizon
|
||||||
|
=======
|
||||||
|
|
||||||
|
Compiling and using ARES/BORG on Horizon
|
||||||
|
----------------------------------------
|
||||||
|
|
||||||
|
Modules
|
||||||
|
~~~~~~~
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
module purge
|
||||||
|
module load gcc/7.4.0
|
||||||
|
module load openmpi/3.0.3-ifort-18.0
|
||||||
|
module load fftw/3.3.8-gnu
|
||||||
|
module load hdf5/1.10.5-gcc5
|
||||||
|
module load cmake
|
||||||
|
module load boost/1.68.0-gcc6
|
||||||
|
module load gsl/2.5
|
||||||
|
module load julia/1.1.0
|
||||||
|
|
||||||
|
Building
|
||||||
|
~~~~~~~~
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
bash build.sh --use-predownload --use-system-hdf5 --use-system-gsl --build-dir /data34/lavaux/BUILD_ARES --c-compiler gcc --cxx-compiler g++
|
||||||
|
|
||||||
|
Running
|
||||||
|
~~~~~~~
|
||||||
|
|
||||||
|
Jupyter on Horizon
|
||||||
|
------------------
|
||||||
|
|
||||||
|
Jupyter is not yet installed by default on the horizon cluster. But it
|
||||||
|
offers a nice remote interface for people:
|
||||||
|
|
||||||
|
- with slow and/or unreliable connections,
|
||||||
|
- who wants to manage a notebook that can be annotated directly inline
|
||||||
|
with Markdown, and then later converted to html or uploaded to the
|
||||||
|
wiki with the figures included,
|
||||||
|
- Use ipyparallel more efficiently
|
||||||
|
|
||||||
|
They are not for:
|
||||||
|
|
||||||
|
- people who does not like notebooks for one reason or the other
|
||||||
|
|
||||||
|
Installation
|
||||||
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
|
We use python 3.5, here. Load the following modules;
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
module load intel/16.0-python-3.5.2 gcc/5.3.0
|
||||||
|
|
||||||
|
Then we are going to install jupyter locally:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
pip3.5 install --user jupyter-client==5.0.1 jupyter-contrib-core==0.3.1 jupyter-contrib-nbextensions==0.2.8 jupyter-core==4.3.0 jupyter-highlight-selected-word==0.0.11 jupyter-latex-envs==1.3.8.4 jupyter-nbextensions-configurator==0.2.5
|
||||||
|
|
||||||
|
At the moment (22 June 2017), I am using the above versions but later may well
|
||||||
|
work without problems.
|
||||||
|
|
||||||
|
Automatic port forwarding and launch of Jupyter instance
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Jupyter can be cumbersome to start reliably, automatically and in a
|
||||||
|
consistent fashion. Guilhem Lavaux has written two scripts (`here <https://www.aquila-consortium.org/wiki/index.php/File:Jupyter_horizon.zip>`__) that
|
||||||
|
can help in that regard. The first script (``jupyter.sh``) has to be
|
||||||
|
left in the home directory on Horizon, it helps at starting a new
|
||||||
|
jupyter job and reporting where it is located and how to contact it. The
|
||||||
|
two scripts are here: . The second script has to be kept on the local
|
||||||
|
station (i.e. the laptop of the user or its workstation). It triggers
|
||||||
|
the opening of ssh tunnels, start jobs and forward ports. The second
|
||||||
|
script (``.horizon-env.sh``) should be loaded from ``.bashrc`` with a
|
||||||
|
command like source ``${HOME}/.horizon-env.sh``. After such steps are
|
||||||
|
taken several things are possible. First to start a jupyter on horizon
|
||||||
|
you may run juphorizon. It will give the following output:
|
||||||
|
|
||||||
|
.. code:: text
|
||||||
|
|
||||||
|
~ $ juphoriz
|
||||||
|
Forwarding 10000 to b20:8888
|
||||||
|
|
||||||
|
Now you use your web-browser and connect to
|
||||||
|
`localhost:10000 <https://localhost:10000>`__. You also know that your jupyter is on
|
||||||
|
beyond20 (port 8888).
|
||||||
|
|
||||||
|
To stop the session do the following:
|
||||||
|
|
||||||
|
.. code:: text
|
||||||
|
|
||||||
|
~ $ stopjup
|
||||||
|
Do you confirm that you want to stop the session ? [y/N]
|
||||||
|
y
|
||||||
|
Jupyter stopped
|
||||||
|
|
||||||
|
If you run it a second time you will get:
|
||||||
|
|
||||||
|
.. code:: text
|
||||||
|
|
||||||
|
[guilhem@gondor] ~ $ stopjup
|
||||||
|
Do you confirm that you want to stop the session ? [y/N]
|
||||||
|
y
|
||||||
|
No port forwarding indication. Must be down.
|
||||||
|
|
||||||
|
which means that the port forwarding information has been cleared out
|
||||||
|
and the script does not know exactly how to proceed. So it does nothing.
|
||||||
|
If you still have a job queued on the system it is your responsability
|
||||||
|
to close it off to avoid using an horizon node for nothing.
|
||||||
|
|
||||||
|
Two other commands are available:
|
||||||
|
|
||||||
|
- ``shuthorizon``, it triggers the shutdown of the tunnel to horizon.
|
||||||
|
Be careful as no checkings are done at the moment. So if you have
|
||||||
|
port forwarding they will be cancelled and you will have to set them
|
||||||
|
up manually again.
|
||||||
|
- ``hssh``, this opens a new ssh multi-plex connection to horizon. It
|
||||||
|
will not ask for your password as it uses the multiplexer available
|
||||||
|
in ssh. Note that it is not possible to start an X11 forwarding using
|
||||||
|
this.
|
||||||
|
|
||||||
|
IPyParallel
|
||||||
|
-----------
|
||||||
|
|
||||||
|
Now we need to install ipyparallel:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
pip3.5 install --user ipyparallel
|
||||||
|
$HOME/.local/bin/ipcluster nbextension enable
|
||||||
|
|
||||||
|
Use `this pbs template <https://www.aquila-consortium.org/wiki/index.php/File:Pbs.engine.template.txt>`__.
|
||||||
|
|
||||||
|
You have to put several files in your $HOME/.ipython/profile_default:
|
||||||
|
|
||||||
|
- `IPCluster configuration <https://www.aquila-consortium.org/wiki/index.php/File:IPython_ipcluster_config_py.txt>`__
|
||||||
|
as *ipcluster_config.py*. This file indicates how to interact with
|
||||||
|
the computer cluster administration. Notable it includes a link to
|
||||||
|
aforementioned template for PBS. I have removed all the extra
|
||||||
|
untouched configuration options. However in the original file
|
||||||
|
installed by ipyparallel you will find all the other possible knobs.
|
||||||
|
- `IPCluster
|
||||||
|
configuration <https://www.aquila-consortium.org/wiki/index.php/File:IPython_ipcontroller_config_py.txt>`__ as
|
||||||
|
*ipcontroller_config.py*. This file is used to start up the
|
||||||
|
controller aspect which talks to all engines. It is fairly minor as I
|
||||||
|
have kept the controller on the login node to talk to engines on
|
||||||
|
compute nodes.
|
||||||
|
- `IPCluster configuration <https://www.aquila-consortium.org/wiki/index.php/File:IPython_ipengine_config_py.txt>`__ as
|
||||||
|
*ipengine_config.py*. This file is used to start up the engines on
|
||||||
|
compute nodes. The notable option is to indicate to listen to any
|
||||||
|
incoming traffic.
|
||||||
|
|
||||||
|
The documentation to ipyparallel is available from readthedocs
|
||||||
|
`here <http://ipyparallel.readthedocs.io/en/6.0.2/>`__.
|
||||||
|
|
||||||
|
Once you have put all the files in place you can start a new PBS-backed
|
||||||
|
kernel:
|
||||||
|
|
||||||
|
.. code:: text
|
||||||
|
|
||||||
|
$ ipcluster start -n 16
|
||||||
|
|
||||||
|
With the above files, that will start one job of 16 cores. If you have
|
||||||
|
chosen 32, then it would have been 2 MPI-task of 16 cores each one, etc.
|
||||||
|
|
||||||
|
To start using with ipyparallel open a new python kernel (either from
|
||||||
|
ipython, or more conveniently from jupyter notebook):
|
||||||
|
|
||||||
|
.. code:: text
|
||||||
|
|
||||||
|
import ipyparallel as ipp
|
||||||
|
c = ipp.Client()
|
||||||
|
|
||||||
|
Doing this will connect your kernel with a running ipyparallel batch
|
||||||
|
instance. ``c`` will hold a dispatcher object from which you can
|
||||||
|
instruct engines what to do.
|
||||||
|
|
||||||
|
IPyParallel comes with magic commands for IPython
|
||||||
|
`3 <http://ipyparallel.readthedocs.io/en/6.0.2/magics.html>`__. They are
|
||||||
|
great to dispatch all your commands, however you must be aware that the
|
||||||
|
contexts is different from your main ipython kernel. Any objects has to
|
||||||
|
be first transmitted to the remote engine first. Check that page
|
||||||
|
carefully to learn how to do that.
|
||||||
|
|
||||||
|
MPIRUN allocation
|
||||||
|
-----------------
|
||||||
|
|
||||||
|
These are tips provided by Stephane Rouberol for specifying finely the
|
||||||
|
core/socket association of a given MPI/OpenMP computation.
|
||||||
|
|
||||||
|
.. code:: text
|
||||||
|
|
||||||
|
# default is bind to *socket*
|
||||||
|
mpirun -np 40 --report-bindings /bin/true 2>&1 | sed -e 's/.*rank \([[:digit:]]*\) /rank \1 /' -e 's/bound.*://' | sort -n -k2 | sed -e 's/ \([[:digit:]]\) / \1 /'
|
||||||
|
|
||||||
|
rank 0 [B/B/B/B/B/B/B/B/B/B][./././././././././.][./././././././././.][./././././././././.]
|
||||||
|
rank 1 [./././././././././.][B/B/B/B/B/B/B/B/B/B][./././././././././.][./././././././././.]
|
||||||
|
(...)
|
||||||
|
|
||||||
|
.. code:: text
|
||||||
|
|
||||||
|
# we can bind to core
|
||||||
|
mpirun -np 40 --bind-to core --report-bindings /bin/true 2>&1 | sed -e 's/.*rank \([[:digit:]]*\) /rank \1 /' -e 's/bound.*://' | sort -n -k2 | sed -e 's/ \([[:digit:]]\) / \1
|
||||||
|
|
||||||
|
rank 0 [B/././././././././.][./././././././././.][./././././././././.][./././././././././.]
|
||||||
|
rank 1 [./././././././././.][B/././././././././.][./././././././././.][./././././././././.]
|
||||||
|
(...)
|
||||||
|
|
||||||
|
.. code:: text
|
||||||
|
|
||||||
|
# we can bind to core + add optimization for nearest-neighbour comms (put neighbouring ranks on the same socket)
|
||||||
|
mpirun -np 40 --bind-to core -map-by slot:PE=1 --report-bindings /bin/true 2>&1 | sed -e 's/.*rank \([[:digit:]]*\) /rank \1 /' -e 's/bound.*://' | sort -n -k2 | sed -e 's/ \([[:digit:]]\) / \1
|
||||||
|
|
||||||
|
rank 0 [B/././././././././.][./././././././././.][./././././././././.][./././././././././.]
|
||||||
|
rank 1 [./B/./././././././.][./././././././././.][./././././././././.][./././././././././.]
|
||||||
|
|
||||||
|
.. code:: text
|
||||||
|
|
||||||
|
# -----------------------------------------------------------
|
||||||
|
# case 2: 1 node, nb of ranks < number of cores (hybrid code)
|
||||||
|
# -----------------------------------------------------------
|
||||||
|
|
||||||
|
beyond08: ~ > mpirun -np 12 -map-by slot:PE=2 --report-bindings /bin/true 2>&1 | sort -n -k 4
|
||||||
|
[beyond08.iap.fr:34077] MCW rank 0 bound to socket 0[core 0[hwt 0]], socket 0[core 1[hwt 0]]: [B/B/./././././././.][./././././././././.][./././././././././.][./././././././././.]
|
||||||
|
[beyond08.iap.fr:34077] MCW rank 1 bound to socket 0[core 2[hwt 0]], socket 0[core 3[hwt 0]]: [././B/B/./././././.][./././././././././.][./././././././././.][./././././././././.]
|
||||||
|
[beyond08.iap.fr:34077] MCW rank 2 bound to socket 0[core 4[hwt 0]], socket 0[core 5[hwt 0]]: [././././B/B/./././.][./././././././././.][./././././././././.][./././././././././.]
|
||||||
|
|
||||||
|
.. code:: text
|
||||||
|
|
||||||
|
beyond08: ~ > mpirun -np 12 -map-by socket:PE=2 --report-bindings /bin/true 2>&1 | sort -n -k 4
|
||||||
|
[beyond08.iap.fr:34093] MCW rank 0 bound to socket 0[core 0[hwt 0]], socket 0[core 1[hwt 0]]: [B/B/./././././././.][./././././././././.][./././././././././.][./././././././././.]
|
||||||
|
[beyond08.iap.fr:34093] MCW rank 1 bound to socket 1[core 10[hwt 0]], socket 1[core 11[hwt 0]]: [./././././././././.][B/B/./././././././.][./././././././././.][./././././././././.]
|
||||||
|
[beyond08.iap.fr:34093] MCW rank 2 bound to socket 2[core 20[hwt 0]], socket 2[core 21[hwt 0]]: [./././././././././.][./././././././././.][B/B/./././././././.][./././././././././.]
|
||||||
|
|
||||||
|
.. code:: text
|
||||||
|
|
||||||
|
beyond08: ~ > mpirun -np 12 -map-by socket:PE=2 --rank-by core --report-bindings /bin/true 2>&1 | sort -n -k 4
|
||||||
|
[beyond08.iap.fr:34108] MCW rank 0 bound to socket 0[core 0[hwt 0]], socket 0[core 1[hwt 0]]: [B/B/./././././././.][./././././././././.][./././././././././.][./././././././././.]
|
||||||
|
[beyond08.iap.fr:34108] MCW rank 1 bound to socket 0[core 2[hwt 0]], socket 0[core 3[hwt 0]]: [././B/B/./././././.][./././././././././.][./././././././././.][./././././././././.]
|
||||||
|
[beyond08.iap.fr:34108] MCW rank 2 bound to socket 0[core 4[hwt 0]], socket 0[core 5[hwt 0]]: [././././B/B/./././.][./././././././././.][./././././././././.][./././././././././.]
|
||||||
|
[beyond08.iap.fr:34108] MCW rank 3 bound to socket 1[core 10[hwt 0]], socket 1[core 11[hwt 0]]: [./././././././././.][B/B/./././././././.][./././././././././.][./././././././././.]
|
||||||
|
|
||||||
|
Fighting the shared node curse
|
||||||
|
------------------------------
|
||||||
|
|
||||||
|
Horizon compute nodes are each made of a mother motherboard with 4 cpus
|
||||||
|
setup on it. The physical access to the resources is transparently
|
||||||
|
visible from any of the CPU. Unfortunately each memory bank is attached
|
||||||
|
physically to a preferred CPU. For a typical node with 512 GB of RAM,
|
||||||
|
each CPU gets 128 GB. If one of the CPU needs access to physical RAM
|
||||||
|
space hosted by another CPU, then the latency is significantly higher.
|
||||||
|
The Linux kernel wants to minimize this kind of problem so it will try
|
||||||
|
hard to relocated the processes so that memory access is not
|
||||||
|
delocalised, kicking out at the same time any computations already in
|
||||||
|
progress on that cpu. This results in computations residing on some CPU
|
||||||
|
to affect computations on another CPU.
|
||||||
|
|
||||||
|
The situation can be even worse if two computations are sharing the same
|
||||||
|
CPU (which holds each N cores, 8 < N < 14). In that case the
|
||||||
|
computations are fighting for CPU and memory resources. For pure
|
||||||
|
computation that is generally less of a problem, but this case is not so
|
||||||
|
frequent on computer designed to handle the analysis of large N-body
|
||||||
|
simulations.
|
||||||
|
|
||||||
|
To summarise, without checking and allocating that your computations are
|
||||||
|
sitting wholly on a CPU socket you may have catastrophic performance
|
||||||
|
degradation (I have experienced a few times at least a factor 10).
|
||||||
|
|
||||||
|
There are ways of avoiding this problem:
|
||||||
|
|
||||||
|
- check the number of cores available on the compute nodes and try your
|
||||||
|
best to allocate a single CPU socket. For example, beyond40cores
|
||||||
|
queue is composed of nodes of 10 cores x 4 cpus. You should then ask
|
||||||
|
to PBS "-l nodes=1:beyond40cores:ppn=10", which will give you 10
|
||||||
|
cores, i.e. a whole CPU socket.
|
||||||
|
- think that if you need 256 GB, then you should use the 2 cpu sockets
|
||||||
|
in practice. So allocate 2 N cores (as in the previous cases, we
|
||||||
|
would need 20 cores, even if in the end only one CPU is doing
|
||||||
|
computation).
|
||||||
|
- Use numactl to get informed and enforce the resources allocation. For
|
||||||
|
example, typing "numactl -H" on beyond08 gives the following:
|
||||||
|
|
||||||
|
.. code:: text
|
||||||
|
|
||||||
|
available: 4 nodes (0-3)
|
||||||
|
node 0 cpus: 0 1 2 3 4 5 6 7 8 9
|
||||||
|
node 0 size: 131039 MB
|
||||||
|
node 0 free: 605 MB
|
||||||
|
node 1 cpus: 10 11 12 13 14 15 16 17 18 19
|
||||||
|
node 1 size: 131072 MB
|
||||||
|
node 1 free: 99 MB
|
||||||
|
node 2 cpus: 20 21 22 23 24 25 26 27 28 29
|
||||||
|
node 2 size: 131072 MB
|
||||||
|
node 2 free: 103 MB
|
||||||
|
node 3 cpus: 30 31 32 33 34 35 36 37 38 39
|
||||||
|
node 3 size: 131072 MB
|
||||||
|
node 3 free: 108 MB
|
||||||
|
node distances:
|
||||||
|
node 0 1 2 3
|
||||||
|
0: 10 21 30 21
|
||||||
|
1: 21 10 21 30
|
||||||
|
2: 30 21 10 21
|
||||||
|
3: 21 30 21 10
|
||||||
|
|
||||||
|
It states that the compute node is composed of 4 "nodes" (=CPU socket
|
||||||
|
here). The logical CPU affected to each physical CPU is given by "node X
|
||||||
|
cpus". The first line indicate that the Linux kernel logical cpu "0 1 2
|
||||||
|
... 9" are affected to the physical CPU 0. At the same time the node 0
|
||||||
|
has "node 0 size" RAM physically attached. The amount of free RAM on
|
||||||
|
this node is shown by "node 0 free". Finally there is a node distance
|
||||||
|
matrix. It tells the user how far are each node from each other in terms
|
||||||
|
of communication speed. It can be seen that there may be up to a factor
|
||||||
|
3 penalty for communication between node 0 and node 2.
|
||||||
|
|
||||||
|
Scratch space
|
||||||
|
-------------
|
223
docs/source/user/clusters/Imperial_RCS.inc.rst
Normal file
223
docs/source/user/clusters/Imperial_RCS.inc.rst
Normal file
@ -0,0 +1,223 @@
|
|||||||
|
.. _imperial_rcs:
|
||||||
|
|
||||||
|
Imperial RCS
|
||||||
|
============
|
||||||
|
|
||||||
|
This page contains notes on how to compile and run |a| (and extensions) on `Imperial Research Computing Services <https://www.imperial.ac.uk/admin-services/ict/self-service/research-support/rcs/>`_.
|
||||||
|
|
||||||
|
.. _gain_access_to_imperial_rcs:
|
||||||
|
|
||||||
|
Gain access to Imperial RCS
|
||||||
|
---------------------------
|
||||||
|
|
||||||
|
See `this page <https://www.imperial.ac.uk/admin-services/ict/self-service/research-support/rcs/support/getting-started/>`__.
|
||||||
|
|
||||||
|
.. _copy_configuration_files:
|
||||||
|
|
||||||
|
Copy configuration files
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
Copy the pre-prepared configuration files in your home, by cloning :
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
cd ~/
|
||||||
|
git clone git@bitbucket.org:florent-leclercq/imperialrcs_config.git .bashrc_repo
|
||||||
|
|
||||||
|
and typing:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
cd .bashrc_repo/
|
||||||
|
bash create_symlinks.bash
|
||||||
|
source ~/.bashrc
|
||||||
|
|
||||||
|
Load compiler and dependencies
|
||||||
|
------------------------------
|
||||||
|
|
||||||
|
Load the following modules (in this order, and **only these** to avoid
|
||||||
|
conflicts):
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
module purge
|
||||||
|
module load gcc/8.2.0 git/2.14.3 cmake/3.14.0 intel-suite/2019.4 mpi anaconda3/personal
|
||||||
|
|
||||||
|
You can check that no other module is loaded using:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
module list
|
||||||
|
|
||||||
|
.. _prepare_conda_environment:
|
||||||
|
|
||||||
|
Prepare conda environment
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
If it's your first time loading anaconda you will need to run (see `this page <https://www.imperial.ac.uk/admin-services/ict/self-service/research-support/rcs/support/applications/conda/>`__):
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
anaconda-setup
|
||||||
|
|
||||||
|
In any case, start from a clean conda environment (with only numpy) to
|
||||||
|
avoid conflicts between compilers. To do so:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
conda create -n pyborg numpy
|
||||||
|
conda activate pyborg
|
||||||
|
|
||||||
|
.. _clone_ares_and_additional_packages:
|
||||||
|
|
||||||
|
Clone ARES and additional packages
|
||||||
|
----------------------------------
|
||||||
|
|
||||||
|
Clone the repository and additional packages using as usual (see :ref:`ARES Building <Building>`):
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
mkdir ~/codes
|
||||||
|
cd ~/codes
|
||||||
|
git clone --recursive git@bitbucket.org:bayesian_lss_team/ares.git
|
||||||
|
cd ares
|
||||||
|
bash get-aquila-modules.sh --clone
|
||||||
|
|
||||||
|
If a particular release or development branch is desired, these
|
||||||
|
additional lines (for example) must be run:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git checkout develop/2.1
|
||||||
|
bash get-aquila-modules.sh --branch-set develop/2.1
|
||||||
|
|
||||||
|
Note that 'git branch' should not be used. Once this is done, one should
|
||||||
|
check to see whether the repository has been properly cloned, and the
|
||||||
|
submodules are all in the correct branch (and fine). To do so, one
|
||||||
|
should run:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
bash get-aquila-modules.sh --status
|
||||||
|
|
||||||
|
The output will describe whether the cloned modules are able to link to
|
||||||
|
the original repository.
|
||||||
|
|
||||||
|
If the root is not all well (for example, the error could be in
|
||||||
|
cosmotool), try:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
git submodule update
|
||||||
|
|
||||||
|
and check the modules status again
|
||||||
|
|
||||||
|
.. _compile_ares:
|
||||||
|
|
||||||
|
Compile ARES
|
||||||
|
------------
|
||||||
|
|
||||||
|
Run the ARES build script using:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
bash build.sh --with-mpi --c-compiler icc --cxx-compiler icpc --python
|
||||||
|
|
||||||
|
(for other possible flags, such as the flag to compile BORG python, type
|
||||||
|
``bash build.sh -h``). Note: for releases <= 2.0, a fortran compiler was
|
||||||
|
necessary: add ``--f-compiler ifort`` to the line above. One may have to
|
||||||
|
predownload dependencies for ares: for this, add the
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
--download-deps
|
||||||
|
|
||||||
|
flag on the first use of build.sh, and add
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
--use-predownload
|
||||||
|
|
||||||
|
on the second (which will then build ares).
|
||||||
|
|
||||||
|
Then compile:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
cd build
|
||||||
|
make
|
||||||
|
|
||||||
|
The 'make' command can be sped up by specifying the number of nodes, N,
|
||||||
|
used to perform this:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
cd build
|
||||||
|
make -j N
|
||||||
|
|
||||||
|
.. _run_ares_example_with_batch_script:
|
||||||
|
|
||||||
|
Run ARES example with batch script
|
||||||
|
----------------------------------
|
||||||
|
|
||||||
|
The following batch script (``job_example.bash``) runs the example using
|
||||||
|
mixed MPI/OpenMP parallelization (2 nodes, 32 processes/node = 16 MPI
|
||||||
|
processes x 2 threads per core). Check `this
|
||||||
|
page <https://www.imperial.ac.uk/admin-services/ict/self-service/research-support/rcs/computing/job-sizing-guidance/>`__
|
||||||
|
for job sizing on Imperial RCS.
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# request bash as shell for job
|
||||||
|
#PBS -S /bin/bash
|
||||||
|
|
||||||
|
# queue, parallel environment and number of processors
|
||||||
|
#PBS -l select=2:ncpus=32:mem=64gb:mpiprocs=16:ompthreads=2
|
||||||
|
#PBS -l walltime=24:00:00
|
||||||
|
|
||||||
|
# joins error and standard outputs
|
||||||
|
#PBS -j oe
|
||||||
|
|
||||||
|
# keep error and standard outputs on the execution host
|
||||||
|
#PBS -k oe
|
||||||
|
|
||||||
|
# forward environment variables
|
||||||
|
#PBS -V
|
||||||
|
|
||||||
|
# define job name
|
||||||
|
#PBS -N ARES_EXAMPLE
|
||||||
|
|
||||||
|
# main commands here
|
||||||
|
module load gcc/8.2.0 intel-suite/2019.4 mpi
|
||||||
|
cd ~/codes/ares/examples/
|
||||||
|
|
||||||
|
mpiexec ~/codes/ares/build/src/ares3 INIT 2mpp_ares.ini
|
||||||
|
|
||||||
|
exit
|
||||||
|
|
||||||
|
As per `Imperial
|
||||||
|
guidance <https://www.imperial.ac.uk/admin-services/ict/self-service/research-support/rcs/computing/high-throughput-computing/configuring-mpi-jobs/>`__,
|
||||||
|
do not provide any arguments to ``mpiexec`` other than the name of the
|
||||||
|
program to run.
|
||||||
|
|
||||||
|
Submit the job via ``qsub job_example.bash``. The outputs will appear in
|
||||||
|
``~/codes/ares/examples``.
|
||||||
|
|
||||||
|
.. _select_resources_for_more_advanced_runs:
|
||||||
|
|
||||||
|
Select resources for more advanced runs
|
||||||
|
---------------------------------------
|
||||||
|
|
||||||
|
The key line in the submission script is
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
#PBS -lselect=N:ncpus=Y:mem=Z:mpiprocs=P:ompthreads=W
|
||||||
|
|
||||||
|
to select N nodes of Y cores each (i.e. NxY cores will be allocated to
|
||||||
|
your job). On each node there will be P MPI ranks and each will be
|
||||||
|
configured to run W threads. You must have PxW<=Y (PxW=Y in all
|
||||||
|
practical situations). Using W=2 usually makes sense since most nodes
|
||||||
|
have hyperthreading (2 logical cores per physical core).
|
89
docs/source/user/clusters/Occigen.inc.rst
Normal file
89
docs/source/user/clusters/Occigen.inc.rst
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
.. _occigen:
|
||||||
|
|
||||||
|
Occigen
|
||||||
|
=======
|
||||||
|
|
||||||
|
Occigen is a CINES managed supercomputer in France. You need a time
|
||||||
|
allocation on this to use it. Check https://www.edari.fr
|
||||||
|
|
||||||
|
Module setup
|
||||||
|
------------
|
||||||
|
|
||||||
|
Compile with Intel
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
module purge
|
||||||
|
module load gcc/8.3.0
|
||||||
|
module load intel/19.4
|
||||||
|
# WARNING: openmpi 2.0.4 has a bug with Multithread, cause hangs
|
||||||
|
module load openmpi-intel-mt/2.0.2
|
||||||
|
module load intelpython3/2019.3
|
||||||
|
export OMPI_CC=$(which icc)
|
||||||
|
export OMPI_CXX=$(which icpc)
|
||||||
|
|
||||||
|
Then run:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
bash build.sh --use-predownload --no-debug-log --perf --native --c-compiler icc --cxx-compiler icpc --f-compiler ifort --with-mpi --build-dir $SCRATCHDIR/ares-build-icc --cmake $HOME/.local/bin/cmake
|
||||||
|
|
||||||
|
Compile with gcc
|
||||||
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
module purge
|
||||||
|
module load gcc/8.3.0
|
||||||
|
# WARNING: openmpi 2.0.4 has a bug with Multithread, cause hangs
|
||||||
|
module load openmpi/gnu-mt/2.0.2
|
||||||
|
module load intelpython3/2019.3
|
||||||
|
export OMPI_CC=$(which gcc)
|
||||||
|
export OMPI_CXX=$(which g++)
|
||||||
|
|
||||||
|
Prerequisite
|
||||||
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Download cmake >= 3.10.
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
wget https://github.com/Kitware/CMake/releases/download/v3.15.5/cmake-3.15.5.tar.gz
|
||||||
|
|
||||||
|
Be sure the above modules are loaded and then compile:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
cd cmake-3.15.5
|
||||||
|
./configure --prefix=$HOME/.local
|
||||||
|
nice make
|
||||||
|
make install
|
||||||
|
|
||||||
|
On your laptop run:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
bash build.sh --download-deps
|
||||||
|
scp -r downloads occigen:${ARES_ROOT_ON_OCCIGEN}
|
||||||
|
|
||||||
|
Build
|
||||||
|
-----
|
||||||
|
|
||||||
|
.. _with_intel:
|
||||||
|
|
||||||
|
With intel
|
||||||
|
~~~~~~~~~~
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
bash build.sh --use-predownload --no-debug-log --perf --native --c-compiler icc --cxx-compiler icpc --f-compiler ifort --with-mpi --build-dir $SCRATCHDIR/ares-build-icc --cmake $HOME/.local/bin/cmake
|
||||||
|
|
||||||
|
.. _with_gcc:
|
||||||
|
|
||||||
|
With gcc
|
||||||
|
~~~~~~~~
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
bash build.sh --use-predownload --no-debug-log --perf --native --c-compiler gcc --cxx-compiler g++ --f-compiler gfortran --with-mpi --build-dir $SCRATCHDIR/ares-build-gcc --cmake $HOME/.local/bin/cmake
|
80
docs/source/user/clusters/SNIC.inc.rst
Normal file
80
docs/source/user/clusters/SNIC.inc.rst
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
.. _snic:
|
||||||
|
|
||||||
|
SNIC
|
||||||
|
====
|
||||||
|
|
||||||
|
These instructions are for building on Tetralith - variations for other
|
||||||
|
systems may occur
|
||||||
|
|
||||||
|
Building at SNIC
|
||||||
|
----------------
|
||||||
|
|
||||||
|
Overview
|
||||||
|
~~~~~~~~
|
||||||
|
|
||||||
|
#. Ask for time
|
||||||
|
#. Load modules
|
||||||
|
#. Git clone the repo and get submodules
|
||||||
|
#. Use build.sh to build
|
||||||
|
#. Compile the code
|
||||||
|
#. Cancel remaining time
|
||||||
|
|
||||||
|
Detailed Instructions
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
1) ::
|
||||||
|
|
||||||
|
interactive -N1 --exclusive -t 2:00:00
|
||||||
|
|
||||||
|
2) ::
|
||||||
|
|
||||||
|
module load git
|
||||||
|
module load buildenv-gcc/2018a-eb
|
||||||
|
module load CMake/3.15.2
|
||||||
|
|
||||||
|
3) See instructions above
|
||||||
|
|
||||||
|
4) ::
|
||||||
|
|
||||||
|
bash build.sh --with-mpi --cmake /software/sse/manual/CMake/3.15.2/bin/cmake --c-compiler /software/sse/manual/gcc/8.3.0/nsc1/bin/gcc --cxx-compiler /software/sse/manual/gcc/8.3.0/nsc1/bin/g++ --debug
|
||||||
|
|
||||||
|
Note that these links are NOT the ones from the buildenv (as loaded
|
||||||
|
before). These are "hidden" in the systems and not accessible from the
|
||||||
|
"module avail". If trying to compile with the buildenv versions the
|
||||||
|
compilation will fail (due to old versions of the compilers)
|
||||||
|
|
||||||
|
5) ::
|
||||||
|
|
||||||
|
cd build
|
||||||
|
make -j
|
||||||
|
|
||||||
|
6) Find the jobID: ``squeue -u YOUR_USERNAME``
|
||||||
|
|
||||||
|
Find the jobID from the response
|
||||||
|
::
|
||||||
|
|
||||||
|
scancel JOBID
|
||||||
|
|
||||||
|
Running on Tetralith
|
||||||
|
--------------------
|
||||||
|
|
||||||
|
Use the following template:
|
||||||
|
|
||||||
|
.. code:: text
|
||||||
|
|
||||||
|
#!/bin/bash
|
||||||
|
####################################
|
||||||
|
# ARIS slurm script template #
|
||||||
|
# #
|
||||||
|
# Submit script: sbatch filename #
|
||||||
|
# #
|
||||||
|
####################################
|
||||||
|
#SBATCH -J NAME_OF_JOB
|
||||||
|
#SBATCH -t HH:MM:SS
|
||||||
|
#SBATCH -n NUMBER_OF_NODES
|
||||||
|
#SBATCH -c NUMBER_OF_CORES PER NODE (Max is 32)
|
||||||
|
#SBATCH --output=log.%j.out # Stdout (%j expands to jobId) (KEEP AS IS)
|
||||||
|
#SBATCH --error=error.%j.err # Stderr (%j expands to jobId) (KEEP AS IS)
|
||||||
|
#SBATCH --account=PROJECT-ID
|
||||||
|
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK ## you have to explicitly set this
|
||||||
|
mpprun ./PATH/TO/HADES3 INIT_OR_RESUME /PATH/TO/CONFIG/FILE.INI\
|
9
docs/source/user/extras.rst
Normal file
9
docs/source/user/extras.rst
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
Extra modules
|
||||||
|
#############
|
||||||
|
|
||||||
|
.. _extras:
|
||||||
|
|
||||||
|
.. include:: extras/dm_sheet.inc.rst
|
||||||
|
.. include:: extras/hmclet.inc.rst
|
||||||
|
.. include:: extras/virbius.inc.rst
|
||||||
|
.. include:: extras/python.inc.rst
|
52
docs/source/user/extras/dm_sheet.inc.rst
Normal file
52
docs/source/user/extras/dm_sheet.inc.rst
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
dm_sheet
|
||||||
|
========
|
||||||
|
|
||||||
|
This is a module for ARES/HADES/BORG.
|
||||||
|
It adds the algorithms **dm_sheet** to compute cosmological fields from
|
||||||
|
the dark matter phase-space sheet (in particular, density and velocity
|
||||||
|
fields from tetrahedra formalism).
|
||||||
|
|
||||||
|
``borg_forward`` supports the use of dm_sheet when it is available.
|
||||||
|
|
||||||
|
Setup
|
||||||
|
-----
|
||||||
|
|
||||||
|
To use this module, clone `the repository <https://bitbucket.org/bayesian_lss_team/dm_sheet/>`_ in $ARES_ROOT/extra/ (where $ARES_ROOT
|
||||||
|
represents the root source directory of ARES on your computer).
|
||||||
|
|
||||||
|
For example, you can do:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
cd $ARES_SOURCE/extra
|
||||||
|
git clone git@bitbucket.org:/bayesian_lss_team/dm_sheet.git dm_sheet
|
||||||
|
|
||||||
|
and :ref:`rebuild <building>`.
|
||||||
|
|
||||||
|
Use
|
||||||
|
---
|
||||||
|
|
||||||
|
To use dm_sheet in ``borg_forward``, use the flag ``--dmsheet``. New
|
||||||
|
fields are then added to the :ref:`output files<outputs>`.
|
||||||
|
|
||||||
|
Contributors
|
||||||
|
------------
|
||||||
|
|
||||||
|
The main authors of this module are:
|
||||||
|
|
||||||
|
- Florent Leclercq
|
||||||
|
- Guilhem Lavaux
|
||||||
|
|
||||||
|
To add more features, please contact these people, or submit pull
|
||||||
|
requests.
|
||||||
|
|
||||||
|
Additional contributions from:
|
||||||
|
|
||||||
|
- James Prideaux-Ghee
|
||||||
|
|
||||||
|
References
|
||||||
|
----------
|
||||||
|
|
||||||
|
- T. Abel, O. Hahn, R. Kaehler (2012), Tracing the Dark Matter Sheet in Phase Space, arXiv:1111.3944
|
||||||
|
- O. Hahn, R. Angulo, T. Abel (2015), The Properties of Cosmic Velocity Fields, arXiv:1404.2280
|
||||||
|
- F. Leclercq, J. Jasche, G. Lavaux, B. Wandelt, W. Percival (2017), The phase-space structure of nearby dark matter as constrained by the SDSS, arXiv:1601.00093
|
109
docs/source/user/extras/hmclet.inc.rst
Normal file
109
docs/source/user/extras/hmclet.inc.rst
Normal file
@ -0,0 +1,109 @@
|
|||||||
|
hmclet
|
||||||
|
======
|
||||||
|
|
||||||
|
Guilhem has developped a much smaller variant of the Hamiltonian Markov
|
||||||
|
Chain algorithm to jointly sample a limited set of parameters (like <
|
||||||
|
100).
|
||||||
|
|
||||||
|
This is **HMCLET**: a small extra HMC framework for |a| to allow sampling a bunch of model parameters
|
||||||
|
together. It provides a self calibration step to estimate the masses for
|
||||||
|
the HMC.
|
||||||
|
|
||||||
|
Setup
|
||||||
|
-----
|
||||||
|
|
||||||
|
The code is available in "hmclet" module . To use it, clone this
|
||||||
|
repository into extra/hmclet in ARES source tree. You can for example
|
||||||
|
do:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
cd $ARES_SOURCE/extra
|
||||||
|
git clone https://bitbucket.org/bayesian_lss_team/hmclet.git hmclet
|
||||||
|
|
||||||
|
Once it is checked out you can move to the build directory and run
|
||||||
|
``cmake .``, then ``make`` and you will have the new module compiled.
|
||||||
|
|
||||||
|
You can run ``libLSS/tests/test_hmclet`` to check that no error is
|
||||||
|
triggered and verify the content of "test_sample.h5". It must contain a
|
||||||
|
chain with 2 parameters for which the first one oscillates around 1 with
|
||||||
|
a variance of 10, and the other oscillates around 4 with a variance of
|
||||||
|
2.
|
||||||
|
|
||||||
|
Use
|
||||||
|
---
|
||||||
|
|
||||||
|
The Little HMC (HMClet, like Applet) framework consists in two classes
|
||||||
|
in the namespace ``LibLSS::HMCLet``:
|
||||||
|
|
||||||
|
- JointPosterior, which is the one acting like a parent to your class
|
||||||
|
describing the log-posterior,
|
||||||
|
- SimpleSampler, which is using an instance of JointPosterior to
|
||||||
|
generate samples using the HMC algorithm.
|
||||||
|
|
||||||
|
There is a demonstration (and test case) available in
|
||||||
|
libLSS/tests/test_hmclet.cpp, please have a look at it.
|
||||||
|
|
||||||
|
To use SingleSampler you have to make a new class derivative of
|
||||||
|
JointPosterior and implement three functions:
|
||||||
|
|
||||||
|
- ``getNumberOfParameters()`` which returns an integer corresponding to
|
||||||
|
the number of parameters supported by your posterior
|
||||||
|
- ``evaluate(parameters)`` which returns the opposite of the
|
||||||
|
log-posterior (i.e. like chi2/2)
|
||||||
|
- ``adjointGradient(parameters, adjoint_gradient)`` which fills the
|
||||||
|
adjoint gradient vector corresponding to the given parameters.
|
||||||
|
|
||||||
|
An example is as follow:
|
||||||
|
|
||||||
|
.. code:: cpp
|
||||||
|
|
||||||
|
class MyPosterior: virtual public JointPosterior {
|
||||||
|
public:
|
||||||
|
/* Bla bla for constructor and destructor */
|
||||||
|
virtual size_t getNumberOfParameters() const {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual double evaluate(VectorType const& params) {
|
||||||
|
return 0.5 * square(params[0]-1)/10.;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void adjointGradient(VectorType const& params, VectorType& params_gradient) {
|
||||||
|
params_gradient[0] = (params[0]-1)/10.;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
The above posterior will represent a Gaussian distribution centered on
|
||||||
|
one, with a variance of 10. It depends on a single parameter.
|
||||||
|
|
||||||
|
The sampling would occur like this:
|
||||||
|
|
||||||
|
.. code:: cpp
|
||||||
|
|
||||||
|
auto posterior = std::make_shared<MyPosterior>();
|
||||||
|
SimpleSampler sampler(posterior);
|
||||||
|
|
||||||
|
/* Calibrate the mass matrix.
|
||||||
|
* comm: MPI communication
|
||||||
|
* rgen: Random number generator
|
||||||
|
* steps: number of steps to attempt for calibration
|
||||||
|
* init_params: initial parameters to start calibration
|
||||||
|
* init_step: typical step size to start with
|
||||||
|
*/
|
||||||
|
sampler.calibrate(comm, rgen, steps, init_params, init_step);
|
||||||
|
|
||||||
|
/* Generate a sample with HMC
|
||||||
|
* comm: MPI communication
|
||||||
|
* rgen: Random number generator
|
||||||
|
* params: current parameter state
|
||||||
|
*/
|
||||||
|
sampler.newSample(comm, rgen, init_params);
|
||||||
|
|
||||||
|
Contributors
|
||||||
|
------------
|
||||||
|
|
||||||
|
- Guilhem Lavaux
|
||||||
|
- Jens Jasche
|
||||||
|
|
||||||
|
You can submit pull requests to the BLSS team admin.
|
643
docs/source/user/extras/python-jupyter/PM-tCOLA.ipynb
Normal file
643
docs/source/user/extras/python-jupyter/PM-tCOLA.ipynb
Normal file
File diff suppressed because one or more lines are too long
591
docs/source/user/extras/python.inc.rst
Normal file
591
docs/source/user/extras/python.inc.rst
Normal file
@ -0,0 +1,591 @@
|
|||||||
|
Python
|
||||||
|
======
|
||||||
|
|
||||||
|
This pages presents the features of the ARES/BORG Python module
|
||||||
|
|
||||||
|
Installation
|
||||||
|
------------
|
||||||
|
|
||||||
|
``bash get-aquila-modules.sh --clone`` automatically retrieves the
|
||||||
|
module.
|
||||||
|
|
||||||
|
Use the ``--python`` flag in ``build.sh`` (see :ref:`building <building>`). The
|
||||||
|
python package installation is automatic if you run ``make install``. At the end
|
||||||
|
of the make phase, a python module will be installed in the user site-package
|
||||||
|
directory and made available to python VM. If you also require to run with
|
||||||
|
python defined likelihood (see :ref:`how to write a likelihood in python
|
||||||
|
<building_python_likelihood_script>`) with hades then you also need to append
|
||||||
|
``--hades-python`` while executing ``build.sh``. This requirement will probably
|
||||||
|
go away later.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
If you compile with MPI support the Python binding interface
|
||||||
|
will look for the MPI4PY package. If it is not found, it will just
|
||||||
|
proceed as usual. However, if it is found, the MPI4PY must have been
|
||||||
|
compiled with the *same* MPI framework as ARES/BORG. Not doing so will
|
||||||
|
very likely result in a segmentation fault when importing borg. A
|
||||||
|
succesfull import will look like the following:
|
||||||
|
|
||||||
|
.. code:: python3
|
||||||
|
|
||||||
|
>>> import borg
|
||||||
|
Initializing console.
|
||||||
|
[INFO ] libLSS version v2.0.0alpha-47-g7d560cc built-in modules ares_fg;borg;dm_sheet;hades;hmclet;python
|
||||||
|
[INFO S ] Registered forward models:
|
||||||
|
[INFO S ] - 2LPT_CIC
|
||||||
|
[INFO S ] - 2LPT_CIC_OPENMP
|
||||||
|
[INFO S ] - 2LPT_DOUBLE
|
||||||
|
[INFO S ] - 2LPT_NGP
|
||||||
|
[INFO S ] - Downgrade
|
||||||
|
[INFO S ] - EnforceMass
|
||||||
|
[INFO S ] - HADES_LOG
|
||||||
|
[INFO S ] - HADES_PT
|
||||||
|
[INFO S ] - Haar
|
||||||
|
[INFO S ] - LPT_CIC
|
||||||
|
[INFO S ] - LPT_CIC_OPENMP
|
||||||
|
[INFO S ] - LPT_DOUBLE
|
||||||
|
[INFO S ] - LPT_NGP
|
||||||
|
[INFO S ] - PATCH_MODEL
|
||||||
|
[INFO S ] - PM_CIC
|
||||||
|
[INFO S ] - PM_CIC_OPENMP
|
||||||
|
[INFO S ] - PRIMORDIAL
|
||||||
|
[INFO S ] - PRIMORDIAL_FNL
|
||||||
|
[INFO S ] - Softplus
|
||||||
|
[INFO S ] - TRANSFER_EHU
|
||||||
|
[INFO S ] - Transfer
|
||||||
|
[INFO S ] - Upgrade
|
||||||
|
[INFO S ] - bias::BrokenPowerLaw
|
||||||
|
[INFO S ] - bias::DoubleBrokenPowerLaw
|
||||||
|
[INFO S ] - bias::EFT
|
||||||
|
[INFO S ] - bias::EFT_Thresh
|
||||||
|
[INFO S ] - bias::Linear
|
||||||
|
[INFO S ] - bias::ManyPower_1^1
|
||||||
|
[INFO S ] - bias::ManyPower_1^2
|
||||||
|
[INFO S ] - bias::ManyPower_1^4
|
||||||
|
[INFO S ] - bias::ManyPower_2^2
|
||||||
|
[INFO S ] - bias::Noop
|
||||||
|
[INFO S ] - bias::PowerLaw
|
||||||
|
[INFO ] Found MPI4PY.
|
||||||
|
[INFO ] CPU features: MMX [!AVX] [!AVX2] SSE SSE2 [!SSE3] [!SSE4.1] [!SSE4.2]
|
||||||
|
>>>
|
||||||
|
|
||||||
|
As you can see there is a line "Found MPI4PY".
|
||||||
|
|
||||||
|
Usage
|
||||||
|
-----
|
||||||
|
|
||||||
|
First step:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
import borg
|
||||||
|
|
||||||
|
# This retrieve the console management object
|
||||||
|
console = borg.console()
|
||||||
|
# This prints at the STD level
|
||||||
|
console.print_std("Hello!")
|
||||||
|
# Reduce verbosity
|
||||||
|
console.setVerboseLevel(2)
|
||||||
|
|
||||||
|
|
||||||
|
.. _building_your_first_chain:
|
||||||
|
|
||||||
|
Building your first chain
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
The BORG python pipeline closely follow the BORGForwardModel v2 API.
|
||||||
|
This means that the input is assumed to be Gaussian random number with
|
||||||
|
unit variance in Fourier space. Fortunately the generation of such
|
||||||
|
numbers is easy:
|
||||||
|
|
||||||
|
.. code:: python3
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
# Define a physical box (that is optional for this step, but it will be useful later
|
||||||
|
box = borg.forward.BoxModel()
|
||||||
|
box.L = (200,200,200)
|
||||||
|
box.N = (64,64,64)
|
||||||
|
|
||||||
|
# Generate gaussian random numbers, Fourier transform them, and rescale to ensure unit-variance
|
||||||
|
ic = np.fft.rfftn(np.random.randn(*box.N))/box.N[0]**(1.5)
|
||||||
|
|
||||||
|
In the above code snippet we have also defined a BORG box, which is at
|
||||||
|
the moment limited to 3d. ``box.L`` is the physical size (in Mpc/h) of
|
||||||
|
the box in each direction, while ``box.N`` is the grid size. In the
|
||||||
|
above you see that the Fourier transformed density has been rescaled by
|
||||||
|
:math:`1/\sqrt{N^3}`. This comes because of simple linear algebraic
|
||||||
|
properties, and the requirement of unit variance in the Fourier
|
||||||
|
representation.
|
||||||
|
|
||||||
|
Now we need to create a new chain object:
|
||||||
|
|
||||||
|
.. code:: python3
|
||||||
|
|
||||||
|
chain = borg.forward.ChainForwardModel(box)
|
||||||
|
chain.addModel(borg.forward.models.HermiticEnforcer(box))
|
||||||
|
|
||||||
|
We have immediately added an element that enforces that the elements of
|
||||||
|
the input Fourier density field to be self-complex conjugated. This is
|
||||||
|
not strictly required here as ``ic`` was generated by ``np.fft.rfftn``.
|
||||||
|
|
||||||
|
Our first real element of the chain is the injection of primordial
|
||||||
|
gravity fluctuation:
|
||||||
|
|
||||||
|
.. code:: python3
|
||||||
|
|
||||||
|
chain.addModel(borg.forward.models.Primordial(box, 0.1))
|
||||||
|
|
||||||
|
This multiplies in Fourier space the input density with a function:
|
||||||
|
:math:`A(k) \propto -k^{n_S/2-2}` The exact constant of proportionality
|
||||||
|
depends on :math:`\sigma_8` (or :math:`A_S`), the volume and the Hubble
|
||||||
|
constant. Note the ``0.1`` which indicates the scale factor at which the
|
||||||
|
potential is seeded in the chain. The next elements depend on that
|
||||||
|
number.
|
||||||
|
|
||||||
|
The next element is to add a physical transfer function to produce
|
||||||
|
density fluctuations out of this gravitational potential:
|
||||||
|
|
||||||
|
.. code:: python3
|
||||||
|
|
||||||
|
chain.addModel(borg.forward.models.EisensteinHu(box))
|
||||||
|
|
||||||
|
This is a simple Einsenstein & Hu power spectrum, which does not change
|
||||||
|
the scale factor of the universe.
|
||||||
|
|
||||||
|
Now we need to add a real gravity solver. One simple solver is provided
|
||||||
|
by "BorgLpt" (BORG 1-Lagrangian Perturbation Theory, also known as
|
||||||
|
Zel'dovich approximation).
|
||||||
|
|
||||||
|
.. code:: python3
|
||||||
|
|
||||||
|
lpt = borg.forward.models.BorgLpt(box=box, box_out=box, ai=0.1, af=1.0, supersampling=4)
|
||||||
|
chain.addModel(lpt)
|
||||||
|
|
||||||
|
(**Question from Andrija**: What does the supersampling param control?
|
||||||
|
The ai and af look intuitive enough, for initial scale factor and final
|
||||||
|
one essentially controlling the time, but supersampling I don't
|
||||||
|
understand. Also doing help(borg.forward.models.BorgLpt) didn't help me
|
||||||
|
much in understanding)
|
||||||
|
|
||||||
|
In the above case we keep the object ``lpt`` in the current scope to be
|
||||||
|
able to access more internal state later.
|
||||||
|
|
||||||
|
We can now setup the cosmology:
|
||||||
|
|
||||||
|
.. code:: python3
|
||||||
|
|
||||||
|
cosmo_par = borg.cosmo.CosmologicalParameters()
|
||||||
|
cosmo_par.default()
|
||||||
|
print(repr(cosmo_par))
|
||||||
|
chain.setCosmoParams(cosmo_par)
|
||||||
|
|
||||||
|
We have used some sane defaults for the cosmology in the above. The
|
||||||
|
values of the parameters are printed using the print statement. All the
|
||||||
|
elements of the chain are being updated with the last statement. They
|
||||||
|
try to do this "lazily", i.e. if the cosmology has not changed nothing
|
||||||
|
will happen (as updating the internal cached state may be very costly).
|
||||||
|
|
||||||
|
The model is run with ``chain.forwardModel_v2(ic)``, which goes through
|
||||||
|
the entire chain. The final density field is not yet produced. To do
|
||||||
|
this we need to request it explicitly:
|
||||||
|
|
||||||
|
.. code:: python3
|
||||||
|
|
||||||
|
rho = np.empty(chain.getOutputBoxModel().N)
|
||||||
|
chain.getDensityFinal(rho)
|
||||||
|
|
||||||
|
``rho`` holds now density contrast of the simulation. In IPython, one
|
||||||
|
can show check a slice using:
|
||||||
|
|
||||||
|
.. code:: python3
|
||||||
|
|
||||||
|
from matplotlib import pyplot as plt
|
||||||
|
plt.imshow(rho[:,:,chain.getOutputBoxModel().N[2]//2])
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
Computing the adjoint gradient
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
The evaluation of the adjoint gradient follows the same pattern as for
|
||||||
|
the forward evaluation. Instead of the pair ``forwardModel_v2`` and
|
||||||
|
``getDensityFinal``, one must use ``adjointModel_v2`` and
|
||||||
|
``getAdjointModel``. However keep in mind the shapes of the arrays are
|
||||||
|
reversed: ``adjointModel_v2`` requires an array according to the output
|
||||||
|
of the forward model. Thus we have:
|
||||||
|
|
||||||
|
.. code:: python3
|
||||||
|
|
||||||
|
dlogL_drho = np.empty(chain.getOutputBoxModel().N)
|
||||||
|
# Here fill up dlogL_drho from the gradient of the likelihood
|
||||||
|
chain.adjointModel_v2(dlogL_drho)
|
||||||
|
ic = np.empty(chain.getBoxModel().N)
|
||||||
|
chain.getAdjointModel(ic)
|
||||||
|
|
||||||
|
Note also that we have requested the initial conditions in real
|
||||||
|
representation (and not Fourier). A Fourier representation may be
|
||||||
|
requested by providing an adequate sized complex array.
|
||||||
|
|
||||||
|
Computing the velocity field
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
BORG comes pre-bundled with velocity field estimator (along with their
|
||||||
|
adjoint gradient of course). A very simple estimator is provided by the
|
||||||
|
CIC density estimator. It requires a particle based simulator to
|
||||||
|
estimate the velocity field from. Such particle based simulators are for
|
||||||
|
example BorgLpt, Borg2Lpt or BorgPM. If the types are not compatible, an
|
||||||
|
exception will be thrown.
|
||||||
|
|
||||||
|
The usage is simple, here is an example:
|
||||||
|
|
||||||
|
.. code:: python3
|
||||||
|
|
||||||
|
vmodel = borg.forward.velocity.CICModel(box, lpt)
|
||||||
|
out_v = np.empty((3,)+box.N)
|
||||||
|
vmodel.getVelocityField(out_v)
|
||||||
|
|
||||||
|
The first statement creates the velocity field estimator, with the
|
||||||
|
requested box to be produced and the particle based forward model
|
||||||
|
``lpt`` (same variable as in the :ref:`section "Building your first chain" <building_your_first_chain>`). The second statement
|
||||||
|
allocates the required memory. The last statement triggers the
|
||||||
|
computation. The above statements shall be run after executing
|
||||||
|
``forwardModel_v2`` on the ``chain`` object.
|
||||||
|
|
||||||
|
One can then show a slice (here of the x-component), and the check the
|
||||||
|
compatibility with the density field:
|
||||||
|
|
||||||
|
.. code:: python3
|
||||||
|
|
||||||
|
plt.imshow(out_v[0,:,:,chain.getOutputBoxModel().N[2]//2])
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
Computing some bias models directly
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
PyBORG has a submodule called "bias" which provides a direct route to
|
||||||
|
some of the bundled bias models (in C++ those are the generic bias
|
||||||
|
models). Not all models are linked in though. The usage is relatively
|
||||||
|
straightforward. We will use EFTBiasDefault as an example:
|
||||||
|
|
||||||
|
.. code:: python3
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import borg
|
||||||
|
|
||||||
|
boxm = borg.forward.BoxModel()
|
||||||
|
model = borg.forward.models.HadesLinear(boxm, 0.1, 1.0)
|
||||||
|
|
||||||
|
bias_model = borg.bias.EFTBiasDefault(0.1)
|
||||||
|
|
||||||
|
density = np.random.normal(size=boxm.N)
|
||||||
|
biased_density = np.zeros(boxm.N)
|
||||||
|
|
||||||
|
params = np.ones(7)
|
||||||
|
|
||||||
|
bias_model.compute(model, 1.0, params, density, biased_density)
|
||||||
|
|
||||||
|
The example starts by loading the ``borg`` module. Then we just
|
||||||
|
construct a forward model element for the example using ``HadesLinear``.
|
||||||
|
In your code that should be a reasonable element that you used to
|
||||||
|
produce the matter density field. The bias model may try to discuss
|
||||||
|
directly with that element so it is a good practice to really provide
|
||||||
|
meaningful elements. Then we construct a bias model object
|
||||||
|
``EFTBiasDefault``. This one has a mandatory argument to specify the
|
||||||
|
``Lambda`` parameter in that specific model, which we set to
|
||||||
|
:math:`0.1h \mathrm{Mpc}^{-1}` here. The next steps are just
|
||||||
|
initialization of the field used for ``bias_model.compute``. As can be
|
||||||
|
directly inferred from the call the following arguments are required:
|
||||||
|
|
||||||
|
- a borg forward model (``model``)
|
||||||
|
- the value of nmean (though it could be ignored depending on the
|
||||||
|
specific bias model)
|
||||||
|
- a 1d numpy array of float64 for the parameters of the model
|
||||||
|
- the 3d density contrast (``density``)
|
||||||
|
- the output 3d biased density (``biased_density``)
|
||||||
|
|
||||||
|
Running with MPI
|
||||||
|
----------------
|
||||||
|
|
||||||
|
Using MPI requires some care that is not completely handled
|
||||||
|
automatically.
|
||||||
|
|
||||||
|
One may initialize the python with MPI like this:
|
||||||
|
|
||||||
|
.. code:: python3
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import borg
|
||||||
|
from mpi4py import MPI
|
||||||
|
|
||||||
|
comm = MPI.COMM_WORLD
|
||||||
|
rank = comm.Get_rank()
|
||||||
|
size = comm.Get_size()
|
||||||
|
|
||||||
|
In rank and size you will now have the rank of the current process in
|
||||||
|
the MPI communicator, and size will hold the total size. Then a typical
|
||||||
|
initialization chain of forward models may be constructed as indicated
|
||||||
|
:ref:`there <building_your_first_chain>`. Assuming that chain is such an
|
||||||
|
object one may query the expected slabs with ``getMPISlice()``
|
||||||
|
|
||||||
|
(for the input) and ``getOutputMPISlice()`` (for the output):
|
||||||
|
|
||||||
|
.. code:: python3
|
||||||
|
|
||||||
|
startN0,localN0,in_N1,in_N2 = chain.getMPISlice()
|
||||||
|
out_startN0,out_localN0,out_N1,out_N2 = chain.getOutputMPISlice()
|
||||||
|
|
||||||
|
These may be used like this:
|
||||||
|
|
||||||
|
.. code:: python3
|
||||||
|
|
||||||
|
x = np.zeros((localN0,in_N1,in_N2))
|
||||||
|
if localN0 > 0:
|
||||||
|
x[:,:,:] = ref_data[startN0:(startN0+localN0),:,:]
|
||||||
|
|
||||||
|
with ``ref_data`` some array that covers the entire box. As you can see,
|
||||||
|
the ``x`` array requires only the part between startN0 and
|
||||||
|
startN0+localN0 of that array. In practice that array (``ref_data``) may
|
||||||
|
not have to exist in memory.
|
||||||
|
|
||||||
|
Then ``x`` may be directly provided to ``chain.forwardModel_v2`` as a
|
||||||
|
first argument. The output density field follow the same rule as the
|
||||||
|
input density field.
|
||||||
|
|
||||||
|
Writing a new forward model
|
||||||
|
---------------------------
|
||||||
|
|
||||||
|
The interface of the forward model in python closely follow the one in
|
||||||
|
C++. The basic skeleton is given by the following lines of code:
|
||||||
|
|
||||||
|
.. code:: python3
|
||||||
|
|
||||||
|
import jax
|
||||||
|
|
||||||
|
class MyModel(borg.forward.BaseForwardModel):
|
||||||
|
# Constructor
|
||||||
|
def __init__(self, box):
|
||||||
|
super().__init__(box, box)
|
||||||
|
|
||||||
|
# IO "preferences"
|
||||||
|
def getPreferredInput(self):
|
||||||
|
return borg.forward.PREFERRED_REAL
|
||||||
|
|
||||||
|
def getPreferredOutput(self):
|
||||||
|
return borg.forward.PREFERRED_REAL
|
||||||
|
|
||||||
|
# Forward part
|
||||||
|
|
||||||
|
def forwardModel_v2(self, input_array):
|
||||||
|
self.save = jax.numpy.array(input_array)
|
||||||
|
|
||||||
|
def getDensityFinal(self, output_array):
|
||||||
|
output_array[:] = self.save**2
|
||||||
|
|
||||||
|
# Adjoint part
|
||||||
|
|
||||||
|
def adjointModel_v2(self, input_ag):
|
||||||
|
self.ag = input_ag
|
||||||
|
|
||||||
|
def getAdjointModel(self, output_ag):
|
||||||
|
output_ag[:] = 2 * self.ag * self.save
|
||||||
|
|
||||||
|
There are four main group in the function that needs be implemented:
|
||||||
|
|
||||||
|
- the constructor. It is crucial that the constructor of the parent is
|
||||||
|
explicitly called. Otherwise the interface will not work. The parent
|
||||||
|
constructor takes two argument: the input box (of type
|
||||||
|
``borg.forward.BoxModel``) and the output box (same type).
|
||||||
|
- the function providing the "preferred IO" for the forward and adjoint
|
||||||
|
functions. In practice the preferrence is enforced for python. This
|
||||||
|
means that the value indicated here will change the kind of arrays
|
||||||
|
that are provided to the forward and adjoint part. At the moment two
|
||||||
|
type of IO are possible:
|
||||||
|
|
||||||
|
- PREFERRED_REAL: the model wants a 3d real space representation as
|
||||||
|
an argument
|
||||||
|
- PREFERRED_FOURIER: the model wants a 3d fourier space
|
||||||
|
representation as an argument
|
||||||
|
|
||||||
|
- then the forward evaluation part itself has to be implemented in two
|
||||||
|
pieces: ``forwardModel_v2`` and ``getDensityFinal`` (it is optional
|
||||||
|
depending on what is put after that model). It is expected that
|
||||||
|
``forwardModel_v2`` executes the main part of the computation but it
|
||||||
|
is not fully required.
|
||||||
|
- finally the computation of the adjoint gradient follows the same
|
||||||
|
pattern as the forward computation. The difference is that the types
|
||||||
|
and shapes of arrays are reversed. ``input_ag`` has a shape/type
|
||||||
|
corresponding to the **output** and ``output_ag`` to the **input**.
|
||||||
|
|
||||||
|
Finally, as shown above, the input/output array are using a numpy
|
||||||
|
interface. They can thus be used in JAX/Tensorflow/whatever. In the
|
||||||
|
example code above the input array is saved in a jax array and evaluated
|
||||||
|
later. This is legit, though bear in mind that means there will be
|
||||||
|
memory that will not be freed while you retain that reference.
|
||||||
|
|
||||||
|
.. _building_python_likelihood_script:
|
||||||
|
|
||||||
|
Build a python likelihood script
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
Ini file
|
||||||
|
~~~~~~~~
|
||||||
|
|
||||||
|
.. code:: text
|
||||||
|
|
||||||
|
[python]
|
||||||
|
likelihood_path=test_likelihood.py
|
||||||
|
bias_sampler_type=slice
|
||||||
|
|
||||||
|
The hades_python initializers
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
A typical python likelihood requires three initialization function. They
|
||||||
|
must be registered using the helper decorators
|
||||||
|
borg.registerGravityBuilder (for the forward model),
|
||||||
|
borg.registerLikelihoodBuilder (for the bias+likelihood part),
|
||||||
|
borg.registerSamplerBuilder (for extra sampling strategies).
|
||||||
|
|
||||||
|
An example of their use is the following piece of code:
|
||||||
|
|
||||||
|
.. code:: python3
|
||||||
|
|
||||||
|
import borg
|
||||||
|
|
||||||
|
@borg.registerGravityBuilder
|
||||||
|
def build_gravity_model(state, box):
|
||||||
|
global model
|
||||||
|
chain = borg.forward.ChainForwardModel(box)
|
||||||
|
chain.addModel(borg.forward.models.HermiticEnforcer(box))
|
||||||
|
chain.addModel(borg.forward.models.Primordial(box, 1.0))
|
||||||
|
chain.addModel(borg.forward.models.EisensteinHu(box))
|
||||||
|
model = chain
|
||||||
|
return chain
|
||||||
|
|
||||||
|
|
||||||
|
@borg.registerLikelihoodBuilder
|
||||||
|
def build_likelihood(state, info):
|
||||||
|
boxm = model.getBoxModel()
|
||||||
|
return MyLikelihood(model, boxm.N, boxm.L)
|
||||||
|
|
||||||
|
@borg.registerSamplerBuilder
|
||||||
|
def build_sampler(state, info):
|
||||||
|
return []
|
||||||
|
|
||||||
|
The build_gravity_model function returns a BORGForwardModel object, and
|
||||||
|
take a MarkovState and a BoxModel as parameters. The build_likelihood
|
||||||
|
function must return a Likelihood3d object (check
|
||||||
|
help(borg.likelihood.Likelihood3d)). Finally build_sampler must return a
|
||||||
|
list of sampler object.
|
||||||
|
|
||||||
|
The forward model elements can be either the C++ or Python object and
|
||||||
|
both work transparently. Likelihoods may also be written in pure python
|
||||||
|
though MPI is still untested at this time (August 2020).
|
||||||
|
|
||||||
|
Writing a likelihood
|
||||||
|
~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
In the previous section we have seen how to build the objects required
|
||||||
|
by hades_python to analyze data. We have not approached how to write a
|
||||||
|
likelihood in python. A lot of likelihood and bias are already available
|
||||||
|
from the C++ side, for example ``borg.likelihood.GaussianPassthrough``,
|
||||||
|
``borg.likelihood.GaussianLinear`` or
|
||||||
|
``borg.likelihood.PoissonPowerLaw``. To create new ones easily in
|
||||||
|
python, one has to write a class inheriting from
|
||||||
|
``borg.likelihood.BaseLikelihood`` and implement a number of functions.
|
||||||
|
An example of a simple gaussian likelihood is shown herebelow:
|
||||||
|
|
||||||
|
.. code:: python3
|
||||||
|
|
||||||
|
import borg
|
||||||
|
|
||||||
|
cons = borg.console()
|
||||||
|
|
||||||
|
myprint = lambda x: cons.print_std(x) if type(x) == str else cons.print_std(
|
||||||
|
repr(x))
|
||||||
|
|
||||||
|
class MyLikelihood(borg.likelihood.BaseLikelihood):
|
||||||
|
def __init__(self, fwd, N, L):
|
||||||
|
myprint(f" Init {N}, {L}")
|
||||||
|
super().__init__(fwd, N, L)
|
||||||
|
|
||||||
|
def initializeLikelihood(self, state):
|
||||||
|
myprint("Init likelihood")
|
||||||
|
self.data = state['galaxy_data_0']
|
||||||
|
state.newArray3d("my_density_field", True, self.data.shape[0],
|
||||||
|
self.data.shape[1], self.data.shape[2])
|
||||||
|
|
||||||
|
def updateMetaParameters(self, state):
|
||||||
|
cpar = state['cosmology']
|
||||||
|
myprint(f"Cosmology is {cpar}")
|
||||||
|
self.getForwardModel().setCosmoParams(cpar)
|
||||||
|
|
||||||
|
def generateMockData(self, s_hat, state):
|
||||||
|
|
||||||
|
fwd = self.getForwardModel()
|
||||||
|
output = np.zeros(fwd.getOutputBoxModel().N)
|
||||||
|
fwd.forwardModel_v2(s_hat)
|
||||||
|
fwd.getDensityFinal(output)
|
||||||
|
|
||||||
|
state['galaxy_data_0'][:] = output + np.random.normal(
|
||||||
|
size=output.shape) * sigma_noise
|
||||||
|
state['my_density_field'][:] = output
|
||||||
|
like = ((state['galaxy_data_0'][:] - output)**2).sum() / sigma_noise**2
|
||||||
|
myprint(
|
||||||
|
f"Initial log_likelihood: {like}, var(s_hat) = {np.var(s_hat)}")
|
||||||
|
|
||||||
|
def logLikelihoodComplex(self, s_hat, gradientIsNext):
|
||||||
|
fwd = self.getForwardModel()
|
||||||
|
|
||||||
|
output = np.zeros(fwd.getBoxModel().N)
|
||||||
|
fwd.forwardModel_v2(s_hat)
|
||||||
|
fwd.getDensityFinal(output)
|
||||||
|
L = 0.5 * ((output - self.data)**2).sum() / sigma_noise**2
|
||||||
|
myprint(f"var(s_hat): {np.var(s_hat)}, Call to logLike: {L}")
|
||||||
|
return L
|
||||||
|
|
||||||
|
def gradientLikelihoodComplex(self, s_hat):
|
||||||
|
fwd = self.getForwardModel()
|
||||||
|
output = np.zeros(fwd.getOutputBoxModel().N)
|
||||||
|
fwd.forwardModel_v2(s_hat)
|
||||||
|
fwd.getDensityFinal(output)
|
||||||
|
mygradient = (output - self.data) / sigma_noise**2
|
||||||
|
fwd.adjointModel_v2(mygradient)
|
||||||
|
mygrad_hat = np.zeros(s_hat.shape, dtype=np.complex128)
|
||||||
|
fwd.getAdjointModel(mygrad_hat)
|
||||||
|
return mygrad_hat
|
||||||
|
|
||||||
|
The function ``myprint`` is an helper to create nice output that streams
|
||||||
|
correctly with the rest of the C++ code. It is not mandatory but
|
||||||
|
strongly recommended to use the borg.console() object as it will
|
||||||
|
seemlessly integrate with other BORG tools.
|
||||||
|
|
||||||
|
We will now look at each function one after the other:
|
||||||
|
|
||||||
|
- ``__init__`` is the constructor. It is crucial that the base
|
||||||
|
constructor is called in the constructor of the new class: it will
|
||||||
|
not be done implicitly by the python virtual machine. The base
|
||||||
|
constructor takes a ``BORGForwardModel`` object, and the grid
|
||||||
|
specifications ``N`` and ``L`` as tuples.
|
||||||
|
- ``initializeLikelihood`` is called at the initialization of the chain
|
||||||
|
and before restoration. If you want to store additional fields in the
|
||||||
|
mcmc, you should allocate them at that moment in the state object. In
|
||||||
|
the above example, a new 3d array is allocated to store the density
|
||||||
|
field after the forward model evaluation.
|
||||||
|
|
||||||
|
Note that the forward model has to be evaluated in the log likelihood
|
||||||
|
and its gradient. Though it is in principle required to implement
|
||||||
|
logLikelihood and gradientLikelihood (the real counterpart of the
|
||||||
|
complex functions hereabove), in practice they are not used for the run.
|
||||||
|
|
||||||
|
More python jupyter tutorials
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 1
|
||||||
|
|
||||||
|
extras/python-jupyter/PM-tCOLA
|
||||||
|
|
||||||
|
* A notebook to showcase tCOLA and its convergence by considering at :math:`P(k)` is here__.
|
||||||
|
|
||||||
|
__ extras/python-jupyter/PM-tCOLA.ipynb
|
4
docs/source/user/extras/virbius.inc.rst
Normal file
4
docs/source/user/extras/virbius.inc.rst
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
virbius
|
||||||
|
=======
|
||||||
|
|
||||||
|
*To be written...*
|
10
docs/source/user/inputs.rst
Normal file
10
docs/source/user/inputs.rst
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
Inputs
|
||||||
|
######
|
||||||
|
|
||||||
|
.. include:: inputs/Configuration_file_v1.inc.rst
|
||||||
|
.. include:: inputs/Configuration_file_v2.inc.rst
|
||||||
|
.. include:: inputs/Configuration_file_v2.1.inc.rst
|
||||||
|
.. include:: inputs/Create_config-file.inc.rst
|
||||||
|
.. include:: inputs/Text_catalog_format.inc.rst
|
||||||
|
.. include:: inputs/HDF5_catalog_format.inc.rst
|
||||||
|
.. include:: inputs/Radial_selection.inc.rst
|
249
docs/source/user/inputs/Configuration_file_v1.inc.rst
Normal file
249
docs/source/user/inputs/Configuration_file_v1.inc.rst
Normal file
@ -0,0 +1,249 @@
|
|||||||
|
.. _configuration_file:
|
||||||
|
|
||||||
|
ARES_Configuration_file_v1
|
||||||
|
==========================
|
||||||
|
|
||||||
|
The configuration file for ARES uses the INI file syntax. It is
|
||||||
|
separated into sections among which three are main sections.
|
||||||
|
|
||||||
|
Main sections
|
||||||
|
-------------
|
||||||
|
|
||||||
|
Section [system]
|
||||||
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- console_output: Holds the prefix filename for all log output files.
|
||||||
|
- VERBOSE_LEVEL: Set the verbosity level for the console. Files get all
|
||||||
|
outputs.
|
||||||
|
- N0: Number of grid elements along the X axis.
|
||||||
|
- N1: Same for Y axis.
|
||||||
|
- N2: Same for Z axis.
|
||||||
|
- L0: Comoving length of the X axis
|
||||||
|
- L1: Same for Y axis
|
||||||
|
- L2: Same for Z axis
|
||||||
|
- corner0: Center of the voxel at the corner of the box in -X
|
||||||
|
direction, this should be the smallest X value.
|
||||||
|
- corner1: Same for Y
|
||||||
|
- corner2: Same for Z
|
||||||
|
- NUM_MODES: number of bins to represent the power spectrm
|
||||||
|
- N_MC: Maximum number of markov chain samples to produce in a single
|
||||||
|
run (**Note:** Used only for *v1*)
|
||||||
|
- borg_supersampling: Supersampling level of the grid for intermediate
|
||||||
|
calculations. The number of particles is
|
||||||
|
N0*N1*N2*borg_supersampling**3
|
||||||
|
- hades_likelihood: Likelihood to use in HADES run. Can be either one
|
||||||
|
of those values:
|
||||||
|
|
||||||
|
- BORG_POISSON: Use poisson likelihood
|
||||||
|
- BORG_POISSON_POWER:
|
||||||
|
- BORG_VOODOO:
|
||||||
|
- BORG_VOODOO_MAGIC:
|
||||||
|
- BORG_LINEAR: ARES likelihood model. Noise is Gaussian with
|
||||||
|
Variance equal to :math:`S \bar{N}`. Use power law bias.
|
||||||
|
- BORG_SH:
|
||||||
|
- BORG_NB: Negative binomial. Broken power law bias.
|
||||||
|
- Generic framework:
|
||||||
|
|
||||||
|
- GAUSSIAN_BROKEN_POWERLAW_BIAS
|
||||||
|
- GAUSSIAN_MO_WHITE_BIAS: Gaussian noise model, variance is
|
||||||
|
fitted. Double power law bias
|
||||||
|
- GAUSSIAN_POWERLAW_BIAS
|
||||||
|
- GAUSSIAN_2ND_ORDER_BIAS
|
||||||
|
- GENERIC_POISSON_BROKEN_POWERLAW_BIAS
|
||||||
|
- GENERIC_GAUSSIAN_LINEAR_BIAS
|
||||||
|
- GENERIC_GAUSSIAN_MANY_POWER_1^1
|
||||||
|
- GENERIC_GAUSSIAN_MANY_POWER_1^2
|
||||||
|
- GENERIC_GAUSSIAN_MANY_POWER_1^4
|
||||||
|
- GENERIC_POISSON_MANY_POWER_1^1
|
||||||
|
- GENERIC_POISSON_MANY_POWER_1^2
|
||||||
|
- GENERIC_POISSON_MANY_POWER_1^4
|
||||||
|
|
||||||
|
- hades_forward_model: Forward model to use
|
||||||
|
|
||||||
|
- LPT: Lagrangian perturbation theory, ModifiedNGP/Quad final
|
||||||
|
projection
|
||||||
|
- 2LPT: Second order Lagrangian perturbation theory,
|
||||||
|
ModifiedNGP/Quad final projection
|
||||||
|
- PM: Particle mesh, ModifiedNGP/Quad final projection
|
||||||
|
- LPT_CIC: Same as LPT, but use CIC for final projection
|
||||||
|
- 2LPT_CIC: Same as LPT, but use CIC for final projection
|
||||||
|
- PM_CIC: Same as LPT, but use CIC for final projection
|
||||||
|
- HADES_LOG: Use Exponential transform (HADES model) for the forward
|
||||||
|
model. Preserved mean density is enforced.
|
||||||
|
|
||||||
|
- borg_do_rsd: Do redshift space distortion if set to "true".
|
||||||
|
|
||||||
|
- projection_model: Specifies which projection to use for data. No
|
||||||
|
constraints are enforced on the likelihood, but of course they should be matched
|
||||||
|
to the value adopted here. The value is inspected in ``src/common/projection.hpp``.
|
||||||
|
There are two available at the moment: ``number_ngp`` and ``luminosity_cic``.
|
||||||
|
The ``number_ngp`` is just Nearest-Grid-Point number counting.
|
||||||
|
The ``luminosity_cic`` uses the value in ``Mgal`` to weight the object
|
||||||
|
before doing CIC projection.
|
||||||
|
|
||||||
|
- number_ngp: it just counts the number of galaxies/objects within a voxel
|
||||||
|
|
||||||
|
- luminosity_cic: it weights galaxies by their luminosity and do a CIC projection.
|
||||||
|
|
||||||
|
- test_mode: Runs ARES/BORG/HADES in test mode. Data is not used, mock
|
||||||
|
data is generated on the fly.
|
||||||
|
- seed_cpower: Set to true to seed the power spectrum with the correct
|
||||||
|
one according to the cosmology section. Otherwise it is set to a
|
||||||
|
small fraction of it.
|
||||||
|
- hades_max_epsilon: Stepsize for the HMC. It is unitless. Good
|
||||||
|
starting point is around 0.01.
|
||||||
|
- hades_max_timesteps: Maximum number of timesteps for a single HMC
|
||||||
|
sample.
|
||||||
|
- hades_mixing: Number of samples to compute before writing to disk.
|
||||||
|
- savePeriodicity: This reduces the number of times the restart files
|
||||||
|
are dumped to the hard drives. This is useful for reducing I/Os, as
|
||||||
|
restart files are heavy. You can set this to a number that is a
|
||||||
|
multiple of the number of mcmc steps. For example, 20 tells ares to
|
||||||
|
dump restart files every 20 mcmc steps.
|
||||||
|
- mask_precision: Precision to which you want to compute the mask. By
|
||||||
|
default it is "0.01", which is not related to the actual precision
|
||||||
|
(unfortunately not yet). It allows scaling the internal number of
|
||||||
|
evaluation of the selection function. So 0.001 will call it 100 times
|
||||||
|
more. The advice is not to decrease below 0.01.
|
||||||
|
- furious_seeding: if set to true the core sampler will reseed itself
|
||||||
|
from a system entropy source at each step of the MCMC. That means the
|
||||||
|
MCMC becomes unpredictable and the seed number is discarded.
|
||||||
|
- simulation: if set to true switches to N-body simulation analysis.
|
||||||
|
Additional cuts are possible depending on masses, spins, etc, of
|
||||||
|
halos.
|
||||||
|
|
||||||
|
Likelihoods that use the generic bias framework (currently
|
||||||
|
GAUSSIAN_MO_WHITE_BIAS) supports also the following tags:
|
||||||
|
|
||||||
|
- bias_XX_sampler_generic_blocked: if sets to true, it will not
|
||||||
|
sampling the XX parameter of the bias. XX varies depending on the
|
||||||
|
likelihood.
|
||||||
|
- block_sigma8_sampler: true by default, to sample sigma8 in the
|
||||||
|
initial conditions, sets this to false
|
||||||
|
|
||||||
|
Section [run]
|
||||||
|
~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- NCAT: Number of catalogs. This affects the number of "catalog"
|
||||||
|
sections.
|
||||||
|
|
||||||
|
- SIMULATION: Specify if the input is from simulation. Default is
|
||||||
|
false.
|
||||||
|
|
||||||
|
Section [cosmology]
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- omega_r: Radiation density
|
||||||
|
- omega_k: Curvature
|
||||||
|
- omega_m: Total matter density
|
||||||
|
- omega_b: Baryonic matter density
|
||||||
|
- omega_q: Quintescence density
|
||||||
|
- w: Quintescence equation of state
|
||||||
|
- wprime: Derivative of the equation of state
|
||||||
|
- n_s: Slope of the power spectrum of scalar fluctuations
|
||||||
|
- sigma8: Normalisation of powerspectrum at 8 Mpc/h
|
||||||
|
- h100: Hubble constant in unit of 100 km/s/Mpc
|
||||||
|
|
||||||
|
Section [julia]
|
||||||
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- likelihood_path: Path to the julia file describing the likelihood
|
||||||
|
(i.e. the main entry point for BORG in the likelihood)
|
||||||
|
- likelihood_module: Name of the julia module holding the likelihood
|
||||||
|
- bias_sampler_type: slice or hmclet, which sampling strategy to use to
|
||||||
|
sample the "bias" parameters
|
||||||
|
- ic_in_julia: true or false, whether the initial condition of the
|
||||||
|
Markov Chain is set in julia
|
||||||
|
- hmclet_diagonalMass: whether to use a diagonal or a dense mass matrix
|
||||||
|
estimed on the fly
|
||||||
|
- hmclet_burnin: number of steps allowed in "BURN IN" mode. This
|
||||||
|
depends on the complexity of the likelihood. A few hundred seems
|
||||||
|
reasonable.
|
||||||
|
- hmclet_burnin_memory: size of the memory in "BURN IN" mode. Something
|
||||||
|
like 50 is advocated to be sure it is fairly local but not too noisy.
|
||||||
|
- hmclet_maxEpsilon: maximum epsilon for the HMC integrator (take order
|
||||||
|
0.01)
|
||||||
|
- hmclet_maxNtime: maximum number of timesteps for the HMC integrator
|
||||||
|
(take a few decade like 20-50)
|
||||||
|
|
||||||
|
Catalog sections
|
||||||
|
----------------
|
||||||
|
|
||||||
|
Basic fields
|
||||||
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- datafile: Text filename holding the data
|
||||||
|
- maskdata: Healpix FITS file with the mask
|
||||||
|
- radial_selection: Type of selection function, can be either
|
||||||
|
"schechter", "file" or "piecewise".
|
||||||
|
- refbias: true if this catalog is a reference for bias. Bias will not
|
||||||
|
be sampled for it
|
||||||
|
- bias: Default bias value, also used for mock generation
|
||||||
|
- nmean: Initial mean galaxy density value, also used for mock
|
||||||
|
generation
|
||||||
|
|
||||||
|
Halo selection
|
||||||
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- halo_selection: Specifying how to select the halos from the halo catalog. Can be ``mass, radius, spin or mixed``. The ``mixed`` represents the combined cuts and can be applied by specifying, eg "halo_selection = mass radius"
|
||||||
|
- halo_low_mass_cut: this is log10 of mass in the same unit as the
|
||||||
|
masses of the input text file
|
||||||
|
- halo_high_mass_cut: same as for halo_low_mass_cut, this is log10 of
|
||||||
|
mass
|
||||||
|
- halo_small_radius_cut
|
||||||
|
- halo_large_radius_cut
|
||||||
|
- halo_small_spin_cut
|
||||||
|
- halo_high_spin_cut
|
||||||
|
|
||||||
|
Schechter selection function
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- schechter_mstar: Mstar for Schechter function
|
||||||
|
- schechter_alpha: Power law slope of Schechter function
|
||||||
|
- schechter_sampling_rate: How many distance points to precompute from
|
||||||
|
Schechter (i.e. 1000)
|
||||||
|
- schechter_dmax: Maximum distance to precompute Schecter selection
|
||||||
|
functino
|
||||||
|
- galaxy_bright_apparent_magnitude_cut: Apparent magnitude where data
|
||||||
|
and selection must be truncated, bright end.
|
||||||
|
- galaxy_faint_apparent_magnitude_cut: Same for faint end.
|
||||||
|
- galaxy_bright_absolute_magnitude_cut: Absolute magnitude cut in data
|
||||||
|
and selection function, bright end, useful to select different galaxy
|
||||||
|
populations
|
||||||
|
- galaxy_faint_absolute_magnitude_cut: Similar but faint end
|
||||||
|
- zmin: Minimum redshift for galaxy sample, galaxies will be truncated
|
||||||
|
- zmax: Maximum redshift for galaxy sample, galaxies will be truncated
|
||||||
|
|
||||||
|
'File' selection function
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- radial_file: Text file to load the selection from
|
||||||
|
|
||||||
|
The file has the following format. Each line starting with a '#' is a
|
||||||
|
comment line, and discarded. The first line is a set of three numbers:
|
||||||
|
'rmin dr N'. Each line that follows must be a number between 0 and 1
|
||||||
|
giving the selection function at a distance r = rmin + dr \* i, where
|
||||||
|
'i' is the line number (zero based). Finally 'N' is the number of points
|
||||||
|
in the text file.
|
||||||
|
|
||||||
|
Two possibilities are offered for adjusting the catalog and the
|
||||||
|
selection together:
|
||||||
|
|
||||||
|
- either you chose not to do anything, and take the whole sample and
|
||||||
|
provided selection. Then you need to specify:
|
||||||
|
|
||||||
|
- file_dmin: Minimal distance for selection function and data
|
||||||
|
- file_dmax: same but maximal distance
|
||||||
|
- no_cut_catalog: set to false, if you do not set this you will get
|
||||||
|
an error message.
|
||||||
|
|
||||||
|
- or you want ares to preprocess the catalog and then you need:
|
||||||
|
|
||||||
|
- zmin
|
||||||
|
- zmax
|
||||||
|
- galaxy_faint_apparent_magnitude_cut: Same for faint end.
|
||||||
|
- galaxy_bright_absolute_magnitude_cut: Absolute magnitude cut in
|
||||||
|
data and selection function, bright end, useful to select
|
||||||
|
different galaxy populations
|
||||||
|
- galaxy_faint_absolute_magnitude_cut: Similar but faint end
|
||||||
|
- no_cut_catalog: (not necessary, as it defaults to true)
|
378
docs/source/user/inputs/Configuration_file_v2.1.inc.rst
Normal file
378
docs/source/user/inputs/Configuration_file_v2.1.inc.rst
Normal file
@ -0,0 +1,378 @@
|
|||||||
|
ARES_Configuration_file_v2.1
|
||||||
|
============================
|
||||||
|
|
||||||
|
The configuration file for ARES uses the INI file syntax. It is
|
||||||
|
separated into sections among which three are main sections.
|
||||||
|
|
||||||
|
Main sections
|
||||||
|
-------------
|
||||||
|
|
||||||
|
Section [system]
|
||||||
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- console_output: Holds the prefix filename for all log output files.
|
||||||
|
- VERBOSE_LEVEL: Set the verbosity level for the console. Files get all
|
||||||
|
outputs.
|
||||||
|
- N0: Number of grid elements along the X axis.
|
||||||
|
- N1: Same for Y axis.
|
||||||
|
- N2: Same for Z axis.
|
||||||
|
- **Optionally:**
|
||||||
|
|
||||||
|
- Ndata0, Ndata1, Ndata2 specifies the same thing as N0, N1, N2 but
|
||||||
|
for the projection grid of the galaxy positions. This grid must be
|
||||||
|
different in the case the degrader bias pass is used (see bias
|
||||||
|
model section)
|
||||||
|
|
||||||
|
- L0: Comoving length of the X axis
|
||||||
|
- L1: Same for Y axis
|
||||||
|
- L2: Same for Z axis
|
||||||
|
- corner0: Center of the voxel at the corner of the box in -X
|
||||||
|
direction, this should be the smallest X value.
|
||||||
|
- corner1: Same for Y
|
||||||
|
- corner2: Same for Z
|
||||||
|
- NUM_MODES: number of bins to represent the power spectrm
|
||||||
|
|
||||||
|
- projection_model: Specifies which projection to use for data. No
|
||||||
|
constraints are enforced on the likelihood, but of course they should be matched
|
||||||
|
to the value adopted here. The value is inspected in ``src/common/projection.hpp``.
|
||||||
|
There are two available at the moment: ``number_ngp`` and ``luminosity_cic``.
|
||||||
|
The ``number_ngp`` is just Nearest-Grid-Point number counting.
|
||||||
|
The ``luminosity_cic`` uses the value in ``Mgal`` to weight the object
|
||||||
|
before doing CIC projection.
|
||||||
|
|
||||||
|
- number_ngp: it just counts the number of galaxies/objects within a voxel
|
||||||
|
|
||||||
|
- luminosity_cic: it weights galaxies by their luminosity and do a CIC projection.
|
||||||
|
|
||||||
|
- test_mode: Runs ARES/BORG/HADES in test mode. Data is not used, mock
|
||||||
|
data is generated on the fly.
|
||||||
|
- seed_cpower: Set to true to seed the power spectrum with the correct
|
||||||
|
one according to the cosmology section. Otherwise it is set to a
|
||||||
|
small fraction of it.
|
||||||
|
- savePeriodicity: This reduces the number of times the restart files
|
||||||
|
are dumped to the hard drives. This is useful for reducing I/Os, as
|
||||||
|
restart files are heavy. You can set this to a number that is a
|
||||||
|
multiple of the number of mcmc steps. For example, 20 tells ares to
|
||||||
|
dump restart files every 20 mcmc steps.
|
||||||
|
- mask_precision: Precision to which you want to compute the mask. By
|
||||||
|
default it is "0.01", which is not related to the actual precision
|
||||||
|
(unfortunately not yet). It allows scaling the internal number of
|
||||||
|
evaluation of the selection function. So 0.001 will call it 100 times
|
||||||
|
more. The advice is not to decrease below 0.01.
|
||||||
|
- furious_seeding: if set to true the core sampler will reseed itself
|
||||||
|
from a system entropy source at each step of the MCMC. That means the
|
||||||
|
MCMC becomes unpredictable and the seed number is discarded.
|
||||||
|
|
||||||
|
Section [block_loop]
|
||||||
|
~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- hades_sampler_blocked: Prevents the density field from being sampled
|
||||||
|
|
||||||
|
Likelihoods that use the generic bias framework (currently
|
||||||
|
GAUSSIAN_MO_WHITE_BIAS) supports also the following tags:
|
||||||
|
|
||||||
|
- bias_XX_sampler_generic_blocked: if sets to true, it will not
|
||||||
|
sampling the XX parameter of the bias. XX varies depending on the
|
||||||
|
likelihood. '''WARNING: the code has not yet been updated to look for
|
||||||
|
these variables in [block_loop], they should still be located in
|
||||||
|
[system] at the moment. '''
|
||||||
|
- sigma8_sampler_blocked: true by default, to sample sigma8 in the
|
||||||
|
initial conditions, sets this to false
|
||||||
|
|
||||||
|
Section [mcmc]
|
||||||
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- number_to_generate: Maximum number of markov chain samples to produce
|
||||||
|
in a single run
|
||||||
|
- init_random_scaling: This is more specific to HADES. It starts the
|
||||||
|
MCMC run with a random initial condition, scaled with this number
|
||||||
|
(default 0.1) compared to the reference initial powerspectrum.
|
||||||
|
- random_ic: true if ic must be reshuffled before starting the MCMC
|
||||||
|
sampling, false to keep them at their value generated by the mock
|
||||||
|
data generator
|
||||||
|
- scramble_bias: true (default), reset the bias values to some other
|
||||||
|
values before starting the chain, after generating the mock.
|
||||||
|
|
||||||
|
Section [gravity]
|
||||||
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- model: Forward model to use
|
||||||
|
|
||||||
|
- LPT: Lagrangian perturbation theory, ModifiedNGP/Quad final
|
||||||
|
projection
|
||||||
|
- 2LPT: Second order Lagrangian perturbation theory,
|
||||||
|
ModifiedNGP/Quad final projection
|
||||||
|
- PM: Particle mesh, ModifiedNGP/Quad final projection
|
||||||
|
- LPT_CIC: Same as LPT, but use CIC for final projection
|
||||||
|
- 2LPT_CIC: Same as 2LPT, but use CIC for final projection
|
||||||
|
- PM_CIC: Same as PM, but use CIC for final projection
|
||||||
|
- tCOLA: Same as PM_CIC but uses a TCOLA gravity machine. To enable,
|
||||||
|
specify model=PM_CIC, as above, AND set tCOLA=true.
|
||||||
|
- HADES_LOG: Use Exponential transform (HADES model) for the forward
|
||||||
|
model. Preserved mean density is enforced.
|
||||||
|
|
||||||
|
- supersampling: Controls the number of particles (supersampling level
|
||||||
|
of the particle grid with respect to the grid). The number of
|
||||||
|
particles is (N0*N1*N2*borg_supersampling)**3
|
||||||
|
- forcesampling
|
||||||
|
- a_initial
|
||||||
|
- a_final
|
||||||
|
- pm_start_z:
|
||||||
|
- pm_nsteps:
|
||||||
|
- part_factor:
|
||||||
|
- lightcone:
|
||||||
|
- do_rsd: Do redshift space distortion if set to "true".
|
||||||
|
|
||||||
|
Forward model elements can as well be chained and have different grid sizes. *"model"* can now be CHAIN, which then needs a specific list of model layers in *"models"*.
|
||||||
|
|
||||||
|
Here is an example:
|
||||||
|
|
||||||
|
.. code:: text
|
||||||
|
|
||||||
|
[gravity]
|
||||||
|
model=CHAIN
|
||||||
|
models=PRIMORDIAL,TRANSFER_EHU,LPT_CIC
|
||||||
|
[gravity_chain_0]
|
||||||
|
a_final=0.001
|
||||||
|
[gravity_chain_1]
|
||||||
|
[gravity_chain_2]
|
||||||
|
supersampling=2
|
||||||
|
lightcone=false
|
||||||
|
do_rsd=false
|
||||||
|
a_initial=0.001
|
||||||
|
a_final=1.
|
||||||
|
part_factor=2.0
|
||||||
|
mul_out=1
|
||||||
|
|
||||||
|
Each element of the chain gets its own configuration section which is
|
||||||
|
the same as previously when it was a global descriptor (see above). Note
|
||||||
|
that it you use the chain mechanism, you have to be explicit on the
|
||||||
|
production of initial conditions power spectrum. As you can see above,
|
||||||
|
we indicate "PRIMORDIAL,TRANSFER_EHU" to start with a primordial
|
||||||
|
scale-free gravitational potential, onto which we apply an Einstein-Hu
|
||||||
|
transfer function to form density fluctuations, which are then passed
|
||||||
|
down to LPT_CIC. Also keep in mind that the scale factors must be
|
||||||
|
compatibles and no checks are run by the code at the moment. \`mul_out\`
|
||||||
|
specifices how much the output grid as to be supersampled for the CIC
|
||||||
|
(i.e. the CIC grid is produced at mul_out times the initial grid size).
|
||||||
|
|
||||||
|
Model 'Primordial'
|
||||||
|
^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Apply a primordial scale free power spectrum on the input. The output is
|
||||||
|
scaled linearly to a_final.
|
||||||
|
|
||||||
|
Model 'Transfer'
|
||||||
|
^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
* **CIC correction**: use_invert_cic=true: Transfer function is inverse CIC smoother=0.99 (in unit of grid)
|
||||||
|
* **Sharp K filter**: use_sharpk=true: Transfer function is sharp k filter k_max=0.1 (in h/Mpc)
|
||||||
|
|
||||||
|
Model 'Softplus'
|
||||||
|
^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Apply a softplus transform hardness=1.0 , some parameter making the
|
||||||
|
transition more or less harder
|
||||||
|
|
||||||
|
Model 'Downgrade'
|
||||||
|
^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
(No option)
|
||||||
|
|
||||||
|
Section [hades]
|
||||||
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
- max_epsilon: Stepsize for the HMC. It is unitless. Good starting
|
||||||
|
point is around 0.01.
|
||||||
|
- max_timesteps: Maximum number of timesteps for a single HMC sample.
|
||||||
|
- mixing: Number of samples to compute before writing to disk.
|
||||||
|
- algorithm:
|
||||||
|
|
||||||
|
- HMC: classical HMC algorithm
|
||||||
|
- QN-HMC: Quasi-Newton HMC algorithm
|
||||||
|
- FROZEN-PHASE: Fixed phase. They are not sampled at all but provide
|
||||||
|
some pipelines to allow the other samplers to work.
|
||||||
|
|
||||||
|
- phases: if ``algorithm`` is FROZEN-PHASE, you can specify an HDF5
|
||||||
|
filename here. This file must contain a "phase" array which is
|
||||||
|
conforming to the setup of the ini.
|
||||||
|
- noPhasesProvided: if phases is omitted, this one has to be set to
|
||||||
|
true, otherwise an error is thrown.
|
||||||
|
- phasesDataKey: this indicate which field to use in the ``phases``
|
||||||
|
HDF5 file.
|
||||||
|
- likelihood: Likelihood to use in HADES run. Can be either one of
|
||||||
|
those values:
|
||||||
|
|
||||||
|
- LINEAR: Gaussian likelihood
|
||||||
|
- BORG_POISSON: Use poisson likelihood
|
||||||
|
- Generic framework:
|
||||||
|
|
||||||
|
- GAUSSIAN_BROKEN_POWERLAW_BIAS
|
||||||
|
- GAUSSIAN_MO_WHITE_BIAS: Gaussian noise model, variance is
|
||||||
|
fitted. Double power law bias
|
||||||
|
- GAUSSIAN_POWERLAW_BIAS: Power law bias model with a Gaussian
|
||||||
|
noise model, variance is fitted.
|
||||||
|
- GAUSSIAN_2ND_ORDER_BIAS
|
||||||
|
- GENERIC_POISSON_BROKEN_POWERLAW_BIAS: Broken power law bias
|
||||||
|
model (also called Neyrinck's model), with Poisson noise lmodel
|
||||||
|
- GENERIC_GAUSSIAN_LINEAR_BIAS: Linear bias model, Gaussian noise
|
||||||
|
model
|
||||||
|
- GENERIC_GAUSSIAN_MANY_POWER_1^1
|
||||||
|
- GENERIC_GAUSSIAN_MANY_POWER_1^2
|
||||||
|
- GENERIC_GAUSSIAN_MANY_POWER_1^4
|
||||||
|
- GENERIC_POISSON_MANY_POWER_1^1
|
||||||
|
- GENERIC_POISSON_MANY_POWER_1^2
|
||||||
|
- GENERIC_POISSON_MANY_POWER_1^4
|
||||||
|
- GENERIC_POISSON_POWERLAW_BIAS: simple power law bias model with
|
||||||
|
Poisson noise model
|
||||||
|
- GENERIC_POISSON_POWERLAW_BIAS_DEGRADE4: power law bias models
|
||||||
|
preceded by a degrade pass (N -> N/4 in each direction)
|
||||||
|
- GENERIC_POISSON_BROKEN_POWERLAW_BIAS_DEGRADE4: broken power law
|
||||||
|
bias model preceded by a degrade pass (N -> N/4 in each
|
||||||
|
direction)
|
||||||
|
|
||||||
|
- scheme: SI_2A, SI_2B, SI_2C, SI_3A, SI_4B, SI_4C, SI_4D, SI_6A
|
||||||
|
|
||||||
|
|
||||||
|
Section [run]
|
||||||
|
~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- NCAT: Number of catalogs. This affects the number of "catalog"
|
||||||
|
sections.
|
||||||
|
|
||||||
|
- SIMULATION: Specify if the input is from simulation. Default is
|
||||||
|
false.
|
||||||
|
|
||||||
|
|
||||||
|
Section [likelihood]
|
||||||
|
~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- MainPower_prior_width: Variance of the manypower parameters (except
|
||||||
|
mean which is always uniform positive)
|
||||||
|
- EFT_Lambda: Lambda truncation parameter of the EFT bias model
|
||||||
|
- Options related to robust likelihood. Each patch of a robust likelihood can be sliced in the redshift direction.
|
||||||
|
There are two options controlling the slicing: the maximum distance "rmax" and the number of slices "slices"
|
||||||
|
|
||||||
|
* rmax: Maximum distance accessible during the inference. In practice it is at least the farthest distance of a voxel in the box.
|
||||||
|
Unit is the one of the box, most generally :math:`h^{-1}` Mpc.
|
||||||
|
* slices: Number of slices to build in the redshift direction. Each patch will have a depth ~rmax/slices.
|
||||||
|
|
||||||
|
Section [cosmology]
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- omega_r: Radiation density
|
||||||
|
- omega_k: Curvature
|
||||||
|
- omega_m: Total matter density
|
||||||
|
- omega_b: Baryonic matter density
|
||||||
|
- omega_q: Quintescence density
|
||||||
|
- w: Quintescence equation of state
|
||||||
|
- wprime: Derivative of the equation of state
|
||||||
|
- n_s: Slope of the power spectrum of scalar fluctuations
|
||||||
|
- sigma8: Normalisation of powerspectrum at 8 Mpc/h
|
||||||
|
- h100: Hubble constant in unit of 100 km/s/Mpc
|
||||||
|
- fnl: primordial non-Gaussianity
|
||||||
|
|
||||||
|
Section [julia]
|
||||||
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- likelihood_path: path of the julia code
|
||||||
|
- likelihood_module: julia module where the likelihood is implemented
|
||||||
|
- bias_sampler_type: type of sampler for the bias parameters (hmclet,
|
||||||
|
slice)
|
||||||
|
- ic_in_julia: whether initial conditions of the MCMC are coded in
|
||||||
|
julia or choose some random numbers
|
||||||
|
- hmclet_diagonalMass: where to use a diagonal mass matrix or a full
|
||||||
|
dense
|
||||||
|
- mass_burnin: number of MCMC steps in burnin mode
|
||||||
|
- mass_burnin_memory: number of MCMC steps to store when in burnin mode
|
||||||
|
- hmclet_maxEpsilon: maximum epsilon for the leapfrog integrator
|
||||||
|
(~0.002-0.01 depending on likelihood complexity)
|
||||||
|
- hmclet_maxNtime: maximum number of steps for the leapfrog integrator
|
||||||
|
(~50-100)
|
||||||
|
- hmclet_massScale: amount of momentum reshuffling (0.0 = full, 1.0 =
|
||||||
|
none bad for MCMC)
|
||||||
|
- hmclet_correlationLimiter: reduce the correlations in the covariance
|
||||||
|
matrix by some number. Typically the smaller the number the less
|
||||||
|
reduction with :math:`\simeq 1` reducing the correlation by 2.
|
||||||
|
|
||||||
|
Catalog sections
|
||||||
|
----------------
|
||||||
|
|
||||||
|
Basic fields
|
||||||
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- datafile: Text filename holding the data
|
||||||
|
- maskdata: Healpix FITS file with the mask
|
||||||
|
- radial_selection: Type of selection function, can be either
|
||||||
|
"schechter", "file" or "piecewise".
|
||||||
|
- refbias: true if this catalog is a reference for bias. Bias will not
|
||||||
|
be sampled for it
|
||||||
|
- bias: Default bias value, also used for mock generation
|
||||||
|
- nmean: Initial mean galaxy density value, also used for mock
|
||||||
|
generation
|
||||||
|
|
||||||
|
Halo selection
|
||||||
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- halo_selection: Specifying how to select the halos from the halo catalog. Can be ``mass, radius, spin or mixed``. The ``mixed`` represents the combined cuts and can be applied by specifying, eg "halo_selection = mass radius"
|
||||||
|
- halo_low_mass_cut: this is log10 of mass in the same unit as the
|
||||||
|
masses of the input text file
|
||||||
|
- halo_high_mass_cut: same as for halo_low_mass_cut, this is log10 of
|
||||||
|
mass
|
||||||
|
- halo_small_radius_cut
|
||||||
|
- halo_large_radius_cut
|
||||||
|
- halo_small_spin_cut
|
||||||
|
- halo_high_spin_cut
|
||||||
|
|
||||||
|
Schechter selection function
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- schechter_mstar: Mstar for Schechter function
|
||||||
|
- schechter_alpha: Power law slope of Schechter function
|
||||||
|
- schechter_sampling_rate: How many distance points to precompute from
|
||||||
|
Schechter (i.e. 1000)
|
||||||
|
- schechter_dmax: Maximum distance to precompute Schecter selection
|
||||||
|
function
|
||||||
|
- galaxy_bright_apparent_magnitude_cut: Apparent magnitude where data
|
||||||
|
and selection must be truncated, bright end.
|
||||||
|
- galaxy_faint_apparent_magnitude_cut: Same for faint end.
|
||||||
|
- galaxy_bright_absolute_magnitude_cut: Absolute magnitude cut in data
|
||||||
|
and selection function, bright end, useful to select different galaxy
|
||||||
|
populations
|
||||||
|
- galaxy_faint_absolute_magnitude_cut: Similar but faint end
|
||||||
|
- zmin: Minimum redshift for galaxy sample, galaxies will be truncated
|
||||||
|
- zmax: Maximum redshift for galaxy sample, galaxies will be truncated
|
||||||
|
|
||||||
|
'File' selection function
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- radial_file: Text file to load the selection from
|
||||||
|
|
||||||
|
The file has the following format. Each line starting with a '#' is a
|
||||||
|
comment line, and discarded. The first line is a set of three numbers:
|
||||||
|
'rmin dr N'. Each line that follows must be a number between 0 and 1
|
||||||
|
giving the selection function at a distance r = rmin + dr \* i, where
|
||||||
|
'i' is the line number (zero based). Finally 'N' is the number of points
|
||||||
|
in the text file.
|
||||||
|
|
||||||
|
Two possibilities are offered for adjusting the catalog and the
|
||||||
|
selection together:
|
||||||
|
|
||||||
|
- either you chose not to do anything, and take the whole sample and
|
||||||
|
provided selection. Then you need to specify:
|
||||||
|
|
||||||
|
- file_dmin: Minimal distance for selection function and data
|
||||||
|
- file_dmax: same but maximal distance
|
||||||
|
- no_cut_catalog: set to false, if you do not set this you will get
|
||||||
|
an error message.
|
||||||
|
|
||||||
|
- or you want ares to preprocess the catalog and then you need:
|
||||||
|
|
||||||
|
- zmin
|
||||||
|
- zmax
|
||||||
|
- galaxy_faint_apparent_magnitude_cut: Same for faint end.
|
||||||
|
- galaxy_bright_absolute_magnitude_cut: Absolute magnitude cut in
|
||||||
|
data and selection function, bright end, useful to select
|
||||||
|
different galaxy populations
|
||||||
|
- galaxy_faint_absolute_magnitude_cut: Similar but faint end
|
||||||
|
- no_cut_catalog: (not necessary, as it defaults to true)
|
393
docs/source/user/inputs/Configuration_file_v2.inc.rst
Normal file
393
docs/source/user/inputs/Configuration_file_v2.inc.rst
Normal file
@ -0,0 +1,393 @@
|
|||||||
|
ARES_Configuration_file_v2
|
||||||
|
==========================
|
||||||
|
|
||||||
|
The configuration file for ARES uses the INI file syntax. It is
|
||||||
|
separated into sections among which three are main sections.
|
||||||
|
|
||||||
|
Main sections
|
||||||
|
-------------
|
||||||
|
|
||||||
|
Section [system]
|
||||||
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- console_output: Holds the prefix filename for all log output files.
|
||||||
|
- VERBOSE_LEVEL: Set the verbosity level for the console. Files get all
|
||||||
|
outputs. Check inside ``libLSS/tools/log_traits.hpp`` for details.
|
||||||
|
|
||||||
|
- **Values**:
|
||||||
|
- VERBOSE_LEVEL=1 : up to STD level
|
||||||
|
- VERBOSE_LEVEL=2 : INFO level
|
||||||
|
- VERBOSE_LEVEL=3 : VERBOSE level
|
||||||
|
- VERBOSE_LEVEL=4 : DEBUG level
|
||||||
|
|
||||||
|
- N0: Number of grid elements along the X axis.
|
||||||
|
- N1: Same for Y axis.
|
||||||
|
- N2: Same for Z axis.
|
||||||
|
- **Optionally:**
|
||||||
|
|
||||||
|
- Ndata0, Ndata1, Ndata2 specifies the same thing as N0, N1, N2 but
|
||||||
|
for the projection grid of the galaxy positions. This grid must be
|
||||||
|
different in the case the degrader bias pass is used (see bias
|
||||||
|
model section)
|
||||||
|
|
||||||
|
- L0: Comoving length of the X axis
|
||||||
|
- L1: Same for Y axis
|
||||||
|
- L2: Same for Z axis
|
||||||
|
- corner0: Center of the voxel at the corner of the box in -X
|
||||||
|
direction, this should be the smallest X value.
|
||||||
|
- corner1: Same for Y
|
||||||
|
- corner2: Same for Z
|
||||||
|
- NUM_MODES: number of bins to represent the power spectrm
|
||||||
|
|
||||||
|
- projection_model: Specifies which projection to use for data. No
|
||||||
|
constraints are enforced on the likelihood, but of course they should be matched
|
||||||
|
to the value adopted here. The value is inspected in ``src/common/projection.hpp``.
|
||||||
|
There are two available at the moment: ``number_ngp`` and ``luminosity_cic``.
|
||||||
|
The ``number_ngp`` is just Nearest-Grid-Point number counting.
|
||||||
|
The ``luminosity_cic`` uses the value in ``Mgal`` to weight the object
|
||||||
|
before doing CIC projection.
|
||||||
|
|
||||||
|
- number_ngp: it just counts the number of galaxies/objects within a voxel\
|
||||||
|
|
||||||
|
- luminosity_cic: it weights galaxies by their luminosity and do a CIC projection.
|
||||||
|
|
||||||
|
- test_mode: Runs ARES/BORG/HADES in test mode. Data is not used, mock
|
||||||
|
data is generated on the fly.
|
||||||
|
- seed_cpower: Set to true to seed the power spectrum with the correct
|
||||||
|
one according to the cosmology section. Otherwise it is set to a
|
||||||
|
small fraction of it.
|
||||||
|
- savePeriodicity: This reduces the number of times the restart files
|
||||||
|
are dumped to the hard drives. This is useful for reducing I/Os, as
|
||||||
|
restart files are heavy. You can set this to a number that is a
|
||||||
|
multiple of the number of mcmc steps. For example, 20 tells ares to
|
||||||
|
dump restart files every 20 mcmc steps.
|
||||||
|
- mask_precision: Precision to which you want to compute the mask. By
|
||||||
|
default it is "0.01", which is not related to the actual precision
|
||||||
|
(unfortunately not yet). It allows scaling the internal number of
|
||||||
|
evaluation of the selection function. So 0.001 will call it 100 times
|
||||||
|
more. The advice is not to decrease below 0.01.
|
||||||
|
- furious_seeding: if set to true the core sampler will reseed itself
|
||||||
|
from a system entropy source at each step of the MCMC. That means the
|
||||||
|
MCMC becomes unpredictable and the seed number is discarded.
|
||||||
|
|
||||||
|
Section [block_loop]
|
||||||
|
~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- hades_sampler_blocked: Prevents the density field from being sampled
|
||||||
|
|
||||||
|
Likelihoods that use the generic bias framework (currently
|
||||||
|
GAUSSIAN_MO_WHITE_BIAS) supports also the following tags:
|
||||||
|
|
||||||
|
- bias_XX_sampler_generic_blocked: if sets to true, it will not
|
||||||
|
sampling the XX parameter of the bias. XX varies depending on the
|
||||||
|
likelihood. '''WARNING: the code has not yet been updated to look for
|
||||||
|
these variables in [block_loop], they should still be located in
|
||||||
|
[system] at the moment. '''
|
||||||
|
|
||||||
|
- **Note:**
|
||||||
|
Whenever a bias model uses $b_0$ to hold the normalization,
|
||||||
|
inside its header you should set/see ``NmeanIsBias=True``.
|
||||||
|
Take a look inside ``libLSS/physics/bias/*`` (for example ``linear.hpp``).
|
||||||
|
|
||||||
|
- sigma8_sampler_blocked: true by default, to sample sigma8 in the
|
||||||
|
initial conditions, sets this to false
|
||||||
|
|
||||||
|
Section [mcmc]
|
||||||
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- number_to_generate: Maximum number of markov chain samples to produce
|
||||||
|
in a single run
|
||||||
|
- init_random_scaling: This is more specific to HADES. It starts the
|
||||||
|
MCMC run with a random initial condition, scaled with this number
|
||||||
|
(default 0.1) compared to the reference initial powerspectrum.
|
||||||
|
- random_ic: true if ic must be reshuffled before starting the MCMC
|
||||||
|
sampling, false to keep them at their value generated by the mock
|
||||||
|
data generator
|
||||||
|
|
||||||
|
Section [gravity]
|
||||||
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- model: Forward model to use
|
||||||
|
|
||||||
|
- LPT: Lagrangian perturbation theory, ModifiedNGP/Quad final
|
||||||
|
projection
|
||||||
|
- 2LPT: Second order Lagrangian perturbation theory,
|
||||||
|
ModifiedNGP/Quad final projection
|
||||||
|
- PM: Particle mesh, ModifiedNGP/Quad final projection
|
||||||
|
- LPT_CIC: Same as LPT, but use CIC for final projection
|
||||||
|
- 2LPT_CIC: Same as 2LPT, but use CIC for final projection
|
||||||
|
- PM_CIC: Same as PM, but use CIC for final projection
|
||||||
|
- tCOLA: Same as PM_CIC but uses a TCOLA gravity machine. To enable,
|
||||||
|
specify model=PM_CIC, as above, AND set tCOLA=true.
|
||||||
|
- HADES_LOG: Use Exponential transform (HADES model) for the forward model. Preserved mean density is enforced.
|
||||||
|
|
||||||
|
|
||||||
|
- supersampling: Controls the number of particles (supersampling level of the particle grid with respect to the grid). The number of particles is :math:`(N_0 \cdot N_1 \cdot N_2 \cdot \mathrm{supersampling})^3`
|
||||||
|
|
||||||
|
- forcesampling : This is the oversampling for computing the gravitational field (and thus the force in the PM). A current rule of thumb is to have forcesampling at least twice of supersampling, and supersampling at least two. For tCOLA, the requirements are less.
|
||||||
|
|
||||||
|
- **To be checked:** Setup with forcesampling=supersampling.
|
||||||
|
|
||||||
|
- a_initial : Scale factor value reflects the time. This parameter controls the value of the a_initial (:math:`a_i`) which should be :math:`10^{-3} \leq a_i \leq 1.0`, with :math:`a_i=10^{-3}` corresponding to the time of CMB
|
||||||
|
- a_final : Same as a_initial parameter, but :math:`a_f > a_i`
|
||||||
|
- pm_start_z: This is relevant only for the PM forward model and represents the starting redshift for the PM simulation.
|
||||||
|
- pm_nsteps: Relevant only for PM model, see ``extra/borg/libLSS/physics/forwards/borg_multi_pm.cpp``. There are two scalings in the code, controlled with ``LOG_SCALE_STEP``. If ``LOG_SCALE_STEP`` is set to ``False`` then steps are splitted linearly in :math:`a`. It seems the linear scaling gives better results in tests of :math:`P(k)`.
|
||||||
|
|
||||||
|
- part_factor: An option relevant for MPI run. This is the overallocation of particles on each node to allow for moving them in and out of the node. It is required because the density projection needs to have only the relevant particles on the node. If one of them is outside the slab it will cause a failure.
|
||||||
|
|
||||||
|
- **Note**: ``part_factor`` is indipendent of ``forcesampling`` and ``supersampling`` It will likely be larger for smaller boxes (physical length) and smaller box (in terms of mesh / grid size). The first case because particles travel larger distances w.r.t to the size of the box, and the second because there is more shot noise.
|
||||||
|
- lightcone: See equation 2 from the `SDSS3-BOSS inference paper <https://arxiv.org/pdf/1909.06396.pdf>`_. This option is more relevant for larger boxes.
|
||||||
|
|
||||||
|
- do_rsd: Do redshift space distortion if set to ``True``.
|
||||||
|
|
||||||
|
- **Note:** The DM particles are shifted directly. But, this will never be the case in observations, for which it is ensemble of gas particles around a galaxy that is shifted.
|
||||||
|
|
||||||
|
Forward model elements can as well be chained and have different grid sizes. *"model"* can now be CHAIN, which then needs a specific list of models in *"models"*.
|
||||||
|
|
||||||
|
Here is an example:
|
||||||
|
|
||||||
|
.. code:: text
|
||||||
|
|
||||||
|
[gravity]
|
||||||
|
model=CHAIN
|
||||||
|
models=PRIMORDIAL,TRANSFER_EHU,LPT_CIC
|
||||||
|
[gravity_chain_0]
|
||||||
|
a_final=0.001
|
||||||
|
[gravity_chain_1]
|
||||||
|
[gravity_chain_2]
|
||||||
|
supersampling=2
|
||||||
|
lightcone=false
|
||||||
|
do_rsd=false
|
||||||
|
a_initial=0.001
|
||||||
|
a_final=1.
|
||||||
|
part_factor=2.0
|
||||||
|
mul_out=1
|
||||||
|
|
||||||
|
Each element of the chain gets its own configuration section which is
|
||||||
|
the same as previously when it was a global descriptor (see above). Note that
|
||||||
|
if you use the chain mechanism, you have to be explicit on the production of initial conditions power spectrum.
|
||||||
|
As you can see above, we indicate "PRIMORDIAL,TRANSFER_EHU" to start with a primordial scale-free gravitational potential,
|
||||||
|
onto which we apply an Einstein-Hu transfer function to form density fluctuations, which are then
|
||||||
|
passed down to LPT_CIC. Also keep in mind that the scale factors must be compatibles and no checks
|
||||||
|
are run by the code at the moment. ``mul_out`` specifices how much the output grid as to be supersampled for the
|
||||||
|
CIC (i.e. the CIC grid is produced at mul_out times the initial grid size).
|
||||||
|
|
||||||
|
Model 'Primordial'
|
||||||
|
^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Apply a primordial scale free power spectrum on the input. The output is
|
||||||
|
scaled linearly to a_final.
|
||||||
|
|
||||||
|
Model 'Transfer'
|
||||||
|
^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
* **CIC correction**: use_invert_cic=true: Transfer function is inverse CIC smoother=0.99 (in unit of grid)
|
||||||
|
* **Sharp K filter**: use_sharpk=true: Transfer function is sharp k filter k_max=0.1 (in h/Mpc)
|
||||||
|
|
||||||
|
Model 'Softplus'
|
||||||
|
^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Apply a softplus transform hardness=1.0 , some parameter making the
|
||||||
|
transition more or less harder
|
||||||
|
|
||||||
|
Model 'Downgrade'
|
||||||
|
^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
(No option)
|
||||||
|
|
||||||
|
Section [hades]
|
||||||
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- max_epsilon: Stepsize for the HMC. It is unitless. Good starting
|
||||||
|
point is around 0.01.
|
||||||
|
- max_timesteps: Maximum number of timesteps for a single HMC sample.
|
||||||
|
- mixing: Number of samples to compute before writing to disk.
|
||||||
|
- algorithm:
|
||||||
|
|
||||||
|
- HMC: classical HMC algorithm
|
||||||
|
- QN-HMC: Quasi-Newton HMC algorithm
|
||||||
|
- FROZEN-PHASE: Fixed phase. They are not sampled at all but provide
|
||||||
|
some pipelines to allow the other samplers to work.
|
||||||
|
|
||||||
|
- phases: if ``algorithm`` is FROZEN-PHASE, you can specify an HDF5
|
||||||
|
filename here. This file must contain a "phase" array which is
|
||||||
|
conforming to the setup of the ini.
|
||||||
|
- noPhasesProvided: if phases is omitted, this one has to be set to
|
||||||
|
true, otherwise an error is thrown.
|
||||||
|
- phasesDataKey: this indicate which field to use in the ``phases``
|
||||||
|
HDF5 file.
|
||||||
|
- likelihood: Likelihood to use in HADES run. Can be either one of
|
||||||
|
those values:
|
||||||
|
|
||||||
|
- LINEAR: Gaussian likelihood
|
||||||
|
- BORG_POISSON: Use poisson likelihood
|
||||||
|
- Generic framework:
|
||||||
|
|
||||||
|
- GAUSSIAN_BROKEN_POWERLAW_BIAS
|
||||||
|
- GAUSSIAN_MO_WHITE_BIAS: Gaussian noise model, variance is
|
||||||
|
fitted. Double power law bias
|
||||||
|
- GAUSSIAN_POWERLAW_BIAS: Power law bias model with a Gaussian
|
||||||
|
noise model, variance is fitted.
|
||||||
|
- GAUSSIAN_2ND_ORDER_BIAS
|
||||||
|
- GENERIC_POISSON_BROKEN_POWERLAW_BIAS: Broken power law bias
|
||||||
|
model (also called Neyrinck's model), with Poisson noise lmodel
|
||||||
|
- GENERIC_GAUSSIAN_LINEAR_BIAS: Linear bias model, Gaussian noise
|
||||||
|
model
|
||||||
|
- GENERIC_GAUSSIAN_MANY_POWER_1^1
|
||||||
|
- GENERIC_GAUSSIAN_MANY_POWER_1^2
|
||||||
|
- GENERIC_GAUSSIAN_MANY_POWER_1^4
|
||||||
|
- GENERIC_POISSON_MANY_POWER_1^1
|
||||||
|
- GENERIC_POISSON_MANY_POWER_1^2
|
||||||
|
- GENERIC_POISSON_MANY_POWER_1^4
|
||||||
|
- GENERIC_POISSON_POWERLAW_BIAS: simple power law bias model with
|
||||||
|
Poisson noise model
|
||||||
|
- GENERIC_POISSON_POWERLAW_BIAS_DEGRADE4: power law bias models
|
||||||
|
preceded by a degrade pass (N -> N/4 in each direction)
|
||||||
|
- GENERIC_POISSON_BROKEN_POWERLAW_BIAS_DEGRADE4: broken power law
|
||||||
|
bias model preceded by a degrade pass (N -> N/4 in each
|
||||||
|
direction)
|
||||||
|
|
||||||
|
- scheme: SI_2A, SI_2B, SI_2C, SI_3A, SI_4B, SI_4C, SI_4D, SI_6A
|
||||||
|
|
||||||
|
Section [run]
|
||||||
|
~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- NCAT: Number of catalogs. This affects the number of "catalog"
|
||||||
|
sections.
|
||||||
|
|
||||||
|
-**Note:** If ``NCAT>1`` then it is supposed catalogues are independently taken (no double counting of galaxies etc.)
|
||||||
|
and hence when one evaluates the log-likelihood, they are just summed together.
|
||||||
|
|
||||||
|
- SIMULATION: Specify if the input is from simulation. Default is
|
||||||
|
false.
|
||||||
|
|
||||||
|
Section [cosmology]
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- omega_r: Radiation density
|
||||||
|
- omega_k: Curvature
|
||||||
|
- omega_m: Total matter density
|
||||||
|
- omega_b: Baryonic matter density
|
||||||
|
- omega_q: Quintescence density
|
||||||
|
- w: Quintescence equation of state
|
||||||
|
- wprime: Derivative of the equation of state
|
||||||
|
- n_s: Slope of the power spectrum of scalar fluctuations
|
||||||
|
- sigma8: Normalisation of powerspectrum at 8 Mpc/h
|
||||||
|
- h100: Hubble constant in unit of 100 km/s/Mpc
|
||||||
|
- fnl: primordial non-Gaussianity
|
||||||
|
|
||||||
|
|
||||||
|
Section [likelihood]
|
||||||
|
~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- Options related to robust likelihood. Each patch of a robust likelihood can be sliced in the redshift direction.
|
||||||
|
There are two options controlling the slicing: the maximum distance "rmax" and the number of slices "slices"
|
||||||
|
|
||||||
|
- rmax: Maximum distance accessible during the inference. In practice it is at least the farthest distance of a voxel in the box.
|
||||||
|
Unit is the one of the box, most generally :math:`h^{-1}` Mpc.
|
||||||
|
- slices: Number of slices to build in the redshift direction. Each patch will have a depth ~rmax/slices.
|
||||||
|
|
||||||
|
Section [julia]
|
||||||
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- likelihood_path: path of the julia code
|
||||||
|
- likelihood_module: julia module where the likelihood is implemented
|
||||||
|
- bias_sampler_type: type of sampler for the bias parameters (hmclet,
|
||||||
|
slice)
|
||||||
|
- ic_in_julia: whether initial conditions of the MCMC are coded in
|
||||||
|
julia or choose some random numbers
|
||||||
|
- hmclet_diagonalMass: where to use a diagonal mass matrix or a full
|
||||||
|
dense
|
||||||
|
- mass_burnin: number of MCMC steps in burnin mode
|
||||||
|
- mass_burnin_memory: number of MCMC steps to store when in burnin mode
|
||||||
|
- hmclet_maxEpsilon: maximum epsilon for the leapfrog integrator
|
||||||
|
(~0.002-0.01 depending on likelihood complexity)
|
||||||
|
- hmclet_maxNtime: maximum number of steps for the leapfrog integrator
|
||||||
|
(~50-100)
|
||||||
|
- hmclet_massScale: amount of momentum reshuffling (0.0 = full, 1.0 =
|
||||||
|
none bad for MCMC)
|
||||||
|
- hmclet_correlationLimiter: reduce the correlations in the covariance
|
||||||
|
matrix by some number. Typically the smaller the number the less
|
||||||
|
reduction with :math:`\simeq 1` reducing the correlation by 2.
|
||||||
|
|
||||||
|
Catalog sections
|
||||||
|
----------------
|
||||||
|
|
||||||
|
Basic fields
|
||||||
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- datafile: Text filename holding the data
|
||||||
|
- maskdata: Healpix FITS file with the mask
|
||||||
|
- radial_selection: Type of selection function, can be either
|
||||||
|
"schechter", "file" or "piecewise".
|
||||||
|
- refbias: true if this catalog is a reference for bias. Bias will not
|
||||||
|
be sampled for it
|
||||||
|
- bias: Default bias value, also used for mock generation
|
||||||
|
- nmean: Initial mean galaxy density value, also used for mock
|
||||||
|
generation
|
||||||
|
|
||||||
|
Halo selection
|
||||||
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- halo_selection: Specifying how to select the halos from the halo catalog. Can be ``mass, radius, spin or mixed``. The ``mixed`` represents the combined cuts and can be applied by specifying, eg "halo_selection = mass radius"
|
||||||
|
- halo_low_mass_cut: this is log10 of mass in the same unit as the
|
||||||
|
masses of the input text file
|
||||||
|
- halo_high_mass_cut: same as for halo_low_mass_cut, this is log10 of
|
||||||
|
mass
|
||||||
|
- halo_small_radius_cut
|
||||||
|
- halo_large_radius_cut
|
||||||
|
- halo_small_spin_cut
|
||||||
|
- halo_high_spin_cut
|
||||||
|
|
||||||
|
Schechter selection function
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- schechter_mstar: Mstar for Schechter function
|
||||||
|
- schechter_alpha: Power law slope of Schechter function
|
||||||
|
- schechter_sampling_rate: How many distance points to precompute from
|
||||||
|
Schechter (i.e. 1000)
|
||||||
|
- schechter_dmax: Maximum distance to precompute Schecter selection
|
||||||
|
function
|
||||||
|
- galaxy_bright_apparent_magnitude_cut: Apparent magnitude where data
|
||||||
|
and selection must be truncated, bright end.
|
||||||
|
- galaxy_faint_apparent_magnitude_cut: Same for faint end.
|
||||||
|
- galaxy_bright_absolute_magnitude_cut: Absolute magnitude cut in data
|
||||||
|
and selection function, bright end, useful to select different galaxy
|
||||||
|
populations
|
||||||
|
- galaxy_faint_absolute_magnitude_cut: Similar but faint end
|
||||||
|
- zmin: Minimum redshift for galaxy sample, galaxies will be truncated
|
||||||
|
- zmax: Maximum redshift for galaxy sample, galaxies will be truncated
|
||||||
|
|
||||||
|
'File' selection function
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- radial_file: Text file to load the selection from
|
||||||
|
|
||||||
|
The file has the following format. Each line starting with a '#' is a
|
||||||
|
comment line, and discarded. The first line is a set of three numbers:
|
||||||
|
'rmin dr N'. Each line that follows must be a number between 0 and 1
|
||||||
|
giving the selection function at a distance r = rmin + dr \* i, where
|
||||||
|
'i' is the line number (zero based). Finally 'N' is the number of points
|
||||||
|
in the text file.
|
||||||
|
|
||||||
|
Two possibilities are offered for adjusting the catalog and the
|
||||||
|
selection together:
|
||||||
|
|
||||||
|
- either you chose not to do anything, and take the whole sample and
|
||||||
|
provided selection. Then you need to specify:
|
||||||
|
|
||||||
|
- file_dmin: Minimal distance for selection function and data
|
||||||
|
- file_dmax: same but maximal distance
|
||||||
|
- no_cut_catalog: set to false, if you do not set this you will get
|
||||||
|
an error message.
|
||||||
|
|
||||||
|
- or you want ares to preprocess the catalog and then you need:
|
||||||
|
|
||||||
|
- zmin
|
||||||
|
- zmax
|
||||||
|
- galaxy_faint_apparent_magnitude_cut: Same for faint end.
|
||||||
|
- galaxy_bright_absolute_magnitude_cut: Absolute magnitude cut in
|
||||||
|
data and selection function, bright end, useful to select
|
||||||
|
different galaxy populations
|
||||||
|
- galaxy_faint_absolute_magnitude_cut: Similar but faint end
|
||||||
|
- no_cut_catalog: (not necessary, as it defaults to true)
|
19
docs/source/user/inputs/Create_config-file.inc.rst
Normal file
19
docs/source/user/inputs/Create_config-file.inc.rst
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
How to create a config file from python
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
This page is about running the ``gen_subcat_conf.py`` script under
|
||||||
|
``scripts/ini_generator`` in ares. For an explanation of the config-file itself, see :ref:`here<configuration_file>`.
|
||||||
|
|
||||||
|
Config-file for 2M++ and SDSS(MGS)
|
||||||
|
----------------------------------
|
||||||
|
|
||||||
|
The folder containing the scripts and the ini files below is located in ``$SOURCE/scripts/ini_generator``. Steps to generate the config-file are the following:
|
||||||
|
|
||||||
|
- Manipulate ``header.ini`` for your needs
|
||||||
|
- (If needed) alter template files (``template_sdss_main.py``,
|
||||||
|
``template_2mpp_main.py`` and ``template_2mpp_second.py``) for the cutting and adjusting of data
|
||||||
|
- To create ini file, run this command:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
python gen_subcat_conf.py --output NAME_OF_OUTPUT_FILE.ini --configs template_sdss_main.py:template_2mpp_main.py:template_2mpp_second.py --header header.ini
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user