diff --git a/external/cosmotool/FindNumPy.cmake b/external/cosmotool/FindNumPy.cmake new file mode 100644 index 0000000..eafed16 --- /dev/null +++ b/external/cosmotool/FindNumPy.cmake @@ -0,0 +1,102 @@ +# - Find the NumPy libraries +# This module finds if NumPy is installed, and sets the following variables +# indicating where it is. +# +# TODO: Update to provide the libraries and paths for linking npymath lib. +# +# NUMPY_FOUND - was NumPy found +# NUMPY_VERSION - the version of NumPy found as a string +# NUMPY_VERSION_MAJOR - the major version number of NumPy +# NUMPY_VERSION_MINOR - the minor version number of NumPy +# NUMPY_VERSION_PATCH - the patch version number of NumPy +# NUMPY_VERSION_DECIMAL - e.g. version 1.6.1 is 10601 +# NUMPY_INCLUDE_DIRS - path to the NumPy include files + +#============================================================================ +# Copyright 2012 Continuum Analytics, Inc. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files +# (the "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +#============================================================================ + +# Finding NumPy involves calling the Python interpreter +if(NumPy_FIND_REQUIRED) + find_package(PythonInterp REQUIRED) +else() + find_package(PythonInterp) +endif() + +if(NOT PYTHONINTERP_FOUND) + set(NUMPY_FOUND FALSE) + return() +endif() + +execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c" + "import numpy as n; print(n.__version__); print(n.get_include());" + RESULT_VARIABLE _NUMPY_SEARCH_SUCCESS + OUTPUT_VARIABLE _NUMPY_VALUES_OUTPUT + ERROR_VARIABLE _NUMPY_ERROR_VALUE + OUTPUT_STRIP_TRAILING_WHITESPACE) + +if(NOT _NUMPY_SEARCH_SUCCESS MATCHES 0) + if(NumPy_FIND_REQUIRED) + message(FATAL_ERROR + "NumPy import failure:\n${_NUMPY_ERROR_VALUE}") + endif() + set(NUMPY_FOUND FALSE) + return() +endif() + +# Convert the process output into a list +string(REGEX REPLACE ";" "\\\\;" _NUMPY_VALUES ${_NUMPY_VALUES_OUTPUT}) +string(REGEX REPLACE "\n" ";" _NUMPY_VALUES ${_NUMPY_VALUES}) +# Just in case there is unexpected output from the Python command. +list(GET _NUMPY_VALUES -2 NUMPY_VERSION) +list(GET _NUMPY_VALUES -1 NUMPY_INCLUDE_DIRS) + +string(REGEX MATCH "^[0-9]+\\.[0-9]+\\.[0-9]+" _VER_CHECK "${NUMPY_VERSION}") +if("${_VER_CHECK}" STREQUAL "") + # The output from Python was unexpected. Raise an error always + # here, because we found NumPy, but it appears to be corrupted somehow. + message(FATAL_ERROR + "Requested version and include path from NumPy, got instead:\n${_NUMPY_VALUES_OUTPUT}\n") + return() +endif() + +# Make sure all directory separators are '/' +string(REGEX REPLACE "\\\\" "/" NUMPY_INCLUDE_DIRS ${NUMPY_INCLUDE_DIRS}) + +# Get the major and minor version numbers +string(REGEX REPLACE "\\." ";" _NUMPY_VERSION_LIST ${NUMPY_VERSION}) +list(GET _NUMPY_VERSION_LIST 0 NUMPY_VERSION_MAJOR) +list(GET _NUMPY_VERSION_LIST 1 NUMPY_VERSION_MINOR) +list(GET _NUMPY_VERSION_LIST 2 NUMPY_VERSION_PATCH) +string(REGEX MATCH "[0-9]*" NUMPY_VERSION_PATCH ${NUMPY_VERSION_PATCH}) +math(EXPR NUMPY_VERSION_DECIMAL + "(${NUMPY_VERSION_MAJOR} * 10000) + (${NUMPY_VERSION_MINOR} * 100) + ${NUMPY_VERSION_PATCH}") + +find_package_message(NUMPY + "Found NumPy: version \"${NUMPY_VERSION}\" ${NUMPY_INCLUDE_DIRS}" + "${NUMPY_INCLUDE_DIRS}${NUMPY_VERSION}") + +set(NUMPY_FOUND TRUE) + diff --git a/external/cosmotool/FindPyLibs.cmake b/external/cosmotool/FindPyLibs.cmake new file mode 100644 index 0000000..f8d5bae --- /dev/null +++ b/external/cosmotool/FindPyLibs.cmake @@ -0,0 +1,32 @@ +execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c" + "import distutils.sysconfig as cs; import os; import sys; v=cs.get_config_vars(); print(os.path.join(v['LIBDIR'],v['LDLIBRARY'])); sys.exit(0)" + RESULT_VARIABLE _PYLIB_SEARCH_SUCCESS + OUTPUT_VARIABLE _PYLIB_VALUES_OUTPUT + ERROR_VARIABLE _PYLIB_ERROR_VALUE + OUTPUT_STRIP_TRAILING_WHITESPACE) + +message(${_PYLIB_SEARCH_SUCCESS}) + +execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c" + "import distutils.sysconfig as cs; import os; v=cs.get_config_vars(); print(v['INCLUDEPY']);" + RESULT_VARIABLE _PYINC_SEARCH_SUCCESS + OUTPUT_VARIABLE _PYINC_VALUES_OUTPUT + ERROR_VARIABLE _PYINC_ERROR_VALUE + OUTPUT_STRIP_TRAILING_WHITESPACE) + + +if(NOT _PYLIB_SEARCH_SUCCESS MATCHES 0) + message(FATAL_ERROR + "PyLib search failure:\n${_PYLIB_ERROR_VALUE}") + return() +endif() + +if(NOT _PYINC_SEARCH_SUCCESS MATCHES 0) + message(FATAL_ERROR + "PyInc search failure:\n${_PYINC_ERROR_VALUE}") + return() +endif() + + +set(PYTHON_LIBRARY ${_PYLIB_VALUES_OUTPUT} CACHE PATH "Python runtime library path") +set(PYTHON_INCLUDE_PATH ${_PYINC_VALUES_OUTPUT} CACHE PATH "Python runtime include path") diff --git a/external/cosmotool/color_msg.cmake b/external/cosmotool/color_msg.cmake new file mode 100644 index 0000000..4fdf518 --- /dev/null +++ b/external/cosmotool/color_msg.cmake @@ -0,0 +1,42 @@ +if(NOT WIN32) + string(ASCII 27 Esc) + set(ColourReset "${Esc}[m") + set(ColourBold "${Esc}[1m") + set(Red "${Esc}[31m") + set(Green "${Esc}[32m") + set(Yellow "${Esc}[33m") + set(Blue "${Esc}[34m") + set(Magenta "${Esc}[35m") + set(Cyan "${Esc}[36m") + set(White "${Esc}[37m") + set(BoldRed "${Esc}[1;31m") + set(BoldGreen "${Esc}[1;32m") + set(BoldYellow "${Esc}[1;33m") + set(BoldBlue "${Esc}[1;34m") + set(BoldMagenta "${Esc}[1;35m") + set(BoldCyan "${Esc}[1;36m") + set(BoldWhite "${Esc}[1;37m") +endif() + +function(cmessage) + list(GET ARGV 0 MessageType) + if(MessageType STREQUAL FATAL_ERROR OR MessageType STREQUAL SEND_ERROR) + list(REMOVE_AT ARGV 0) + message(${MessageType} "${BoldRed}${ARGV}${ColourReset}") + elseif(MessageType STREQUAL CWARNING) + list(REMOVE_AT ARGV 0) + message(STATUS "${BoldYellow}${ARGV}${ColourReset}") + elseif(MessageType STREQUAL WARNING) + list(REMOVE_AT ARGV 0) + message(${MessageType} "${BoldYellow}${ARGV}${ColourReset}") + elseif(MessageType STREQUAL AUTHOR_WARNING) + list(REMOVE_AT ARGV 0) + message(${MessageType} "${BoldCyan}${ARGV}${ColourReset}") + elseif(MessageType STREQUAL STATUS) + list(REMOVE_AT ARGV 0) + message(${MessageType} "${Green}${ARGV}${ColourReset}") + else() + message("${ARGV}") + endif() +endfunction() + diff --git a/external/cosmotool/doc/make.bat b/external/cosmotool/doc/make.bat new file mode 100644 index 0000000..b53d165 --- /dev/null +++ b/external/cosmotool/doc/make.bat @@ -0,0 +1,190 @@ +@ECHO OFF + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set BUILDDIR=build +set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source +set I18NSPHINXOPTS=%SPHINXOPTS% source +if NOT "%PAPER%" == "" ( + set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% + set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% +) + +if "%1" == "" goto help + +if "%1" == "help" ( + :help + echo.Please use `make ^` where ^ is one of + echo. html to make standalone HTML files + echo. dirhtml to make HTML files named index.html in directories + echo. singlehtml to make a single large HTML file + echo. pickle to make pickle files + echo. json to make JSON files + echo. htmlhelp to make HTML files and a HTML help project + echo. qthelp to make HTML files and a qthelp project + echo. devhelp to make HTML files and a Devhelp project + echo. epub to make an epub + echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter + echo. text to make text files + echo. man to make manual pages + echo. texinfo to make Texinfo files + echo. gettext to make PO message catalogs + echo. changes to make an overview over all changed/added/deprecated items + echo. linkcheck to check all external links for integrity + echo. doctest to run all doctests embedded in the documentation if enabled + goto end +) + +if "%1" == "clean" ( + for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i + del /q /s %BUILDDIR%\* + goto end +) + +if "%1" == "html" ( + %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/html. + goto end +) + +if "%1" == "dirhtml" ( + %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. + goto end +) + +if "%1" == "singlehtml" ( + %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. + goto end +) + +if "%1" == "pickle" ( + %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can process the pickle files. + goto end +) + +if "%1" == "json" ( + %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can process the JSON files. + goto end +) + +if "%1" == "htmlhelp" ( + %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can run HTML Help Workshop with the ^ +.hhp project file in %BUILDDIR%/htmlhelp. + goto end +) + +if "%1" == "qthelp" ( + %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can run "qcollectiongenerator" with the ^ +.qhcp project file in %BUILDDIR%/qthelp, like this: + echo.^> qcollectiongenerator %BUILDDIR%\qthelp\CosmoToolbox.qhcp + echo.To view the help file: + echo.^> assistant -collectionFile %BUILDDIR%\qthelp\CosmoToolbox.ghc + goto end +) + +if "%1" == "devhelp" ( + %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. + goto end +) + +if "%1" == "epub" ( + %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The epub file is in %BUILDDIR%/epub. + goto end +) + +if "%1" == "latex" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "text" ( + %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The text files are in %BUILDDIR%/text. + goto end +) + +if "%1" == "man" ( + %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The manual pages are in %BUILDDIR%/man. + goto end +) + +if "%1" == "texinfo" ( + %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. + goto end +) + +if "%1" == "gettext" ( + %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The message catalogs are in %BUILDDIR%/locale. + goto end +) + +if "%1" == "changes" ( + %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes + if errorlevel 1 exit /b 1 + echo. + echo.The overview file is in %BUILDDIR%/changes. + goto end +) + +if "%1" == "linkcheck" ( + %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck + if errorlevel 1 exit /b 1 + echo. + echo.Link check complete; look for any errors in the above output ^ +or in %BUILDDIR%/linkcheck/output.txt. + goto end +) + +if "%1" == "doctest" ( + %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest + if errorlevel 1 exit /b 1 + echo. + echo.Testing of doctests in the sources finished, look at the ^ +results in %BUILDDIR%/doctest/output.txt. + goto end +) + +:end diff --git a/external/cosmotool/doc/source/conf.py b/external/cosmotool/doc/source/conf.py new file mode 100644 index 0000000..7d5abbe --- /dev/null +++ b/external/cosmotool/doc/source/conf.py @@ -0,0 +1,286 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# +# CosmoToolbox documentation build configuration file, created by +# sphinx-quickstart on Sun Dec 7 09:45:00 2014. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys, os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ----------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = ['sphinx.ext.autodoc','sphinxcontrib.napoleon', 'sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.mathjax'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = 'CosmoToolbox' +copyright = '2014, Guilhem Lavaux' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '1' +# The full version, including alpha/beta/rc tags. +release = '1.0' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = [] + +# The reST default role (used for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'default' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'CosmoToolboxdoc' + + +# -- Options for LaTeX output -------------------------------------------------- + +latex_elements = { +# The paper size ('letterpaper' or 'a4paper'). +#'papersize': 'letterpaper', + +# The font size ('10pt', '11pt' or '12pt'). +#'pointsize': '10pt', + +# Additional stuff for the LaTeX preamble. +#'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ('index', 'CosmoToolbox.tex', 'CosmoToolbox Documentation', + 'Guilhem Lavaux', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output -------------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('index', 'cosmotoolbox', 'CosmoToolbox Documentation', + ['Guilhem Lavaux'], 1) +] + +# If true, show URL addresses after external links. +#man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------------ + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'CosmoToolbox', 'CosmoToolbox Documentation', + 'Guilhem Lavaux', 'CosmoToolbox', 'One line description of project.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' + + +# -- Options for Epub output --------------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = 'CosmoToolbox' +epub_author = 'Guilhem Lavaux' +epub_publisher = 'Guilhem Lavaux' +epub_copyright = '2014, Guilhem Lavaux' + +# The language of the text. It defaults to the language option +# or en if the language is not set. +#epub_language = '' + +# The scheme of the identifier. Typical schemes are ISBN or URL. +#epub_scheme = '' + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +#epub_identifier = '' + +# A unique identification for the text. +#epub_uid = '' + +# A tuple containing the cover image and cover page html template filenames. +#epub_cover = () + +# HTML files that should be inserted before the pages created by sphinx. +# The format is a list of tuples containing the path and title. +#epub_pre_files = [] + +# HTML files shat should be inserted after the pages created by sphinx. +# The format is a list of tuples containing the path and title. +#epub_post_files = [] + +# A list of files that should not be packed into the epub file. +#epub_exclude_files = [] + +# The depth of the table of contents in toc.ncx. +#epub_tocdepth = 3 + +# Allow duplicate toc entries. +#epub_tocdup = True diff --git a/external/cosmotool/doc/source/cpplibrary.rst b/external/cosmotool/doc/source/cpplibrary.rst new file mode 100644 index 0000000..dd84b54 --- /dev/null +++ b/external/cosmotool/doc/source/cpplibrary.rst @@ -0,0 +1,2 @@ +The CosmoTool C++ library +========================= diff --git a/external/cosmotool/doc/source/index.rst b/external/cosmotool/doc/source/index.rst new file mode 100644 index 0000000..eee378c --- /dev/null +++ b/external/cosmotool/doc/source/index.rst @@ -0,0 +1,26 @@ +.. CosmoToolbox documentation master file, created by + sphinx-quickstart on Sun Dec 7 09:45:00 2014. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to CosmoToolbox's documentation! +======================================== + +Contents: + +.. toctree:: + :maxdepth: 2 + + intro + cpplibrary + pythonmodule + + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + diff --git a/external/cosmotool/doc/source/intro.rst b/external/cosmotool/doc/source/intro.rst new file mode 100644 index 0000000..fe3eb37 --- /dev/null +++ b/external/cosmotool/doc/source/intro.rst @@ -0,0 +1,6 @@ +Introduction +============ + + + +This is an intro diff --git a/external/cosmotool/doc/source/pythonmodule.rst b/external/cosmotool/doc/source/pythonmodule.rst new file mode 100644 index 0000000..11fa942 --- /dev/null +++ b/external/cosmotool/doc/source/pythonmodule.rst @@ -0,0 +1,53 @@ +The CosmoToolbox python module +============================== + +.. module:: cosmotool + + +Simulation handling +^^^^^^^^^^^^^^^^^^^ + +The simulation data +------------------- + +.. autoclass:: PySimulationBase + :members: + +.. autoclass:: SimulationBare + :members: + +.. autoclass:: Simulation + :members: + +.. autoclass:: PySimulationAdaptor + :members: + +The simulation loaders +---------------------- + +.. autofunction:: loadRamses +.. autofunction:: loadRamsesAll +.. autofunction:: loadGadget +.. autofunction:: loadParallelGadget + +Grafic Input and Output +----------------------- + +.. autofunction:: readGrafic + +Timing +------ + +.. autofunction:: time_block +.. autofunction:: timeit +.. autofunction:: timeit_quiet + + +Cosmology +^^^^^^^^^ + +Power spectrum +-------------- + +.. autoclass:: CosmologyPower + :members: diff --git a/external/cosmotool/external/external_build.cmake b/external/cosmotool/external/external_build.cmake new file mode 100644 index 0000000..48ce837 --- /dev/null +++ b/external/cosmotool/external/external_build.cmake @@ -0,0 +1,373 @@ +include(FindOpenMP) + +OPTION(ENABLE_OPENMP "Set to Yes if Healpix and/or you need openMP" OFF) + +SET(FFTW_URL "http://www.fftw.org/fftw-3.3.3.tar.gz" CACHE URL "URL to download FFTW from") +SET(EIGEN_URL "http://bitbucket.org/eigen/eigen/get/3.2.10.tar.gz" CACHE URL "URL to download Eigen from") +SET(GENGETOPT_URL "ftp://ftp.gnu.org/gnu/gengetopt/gengetopt-2.22.5.tar.gz" CACHE STRING "URL to download gengetopt from") +SET(HDF5_URL "https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-1.8/hdf5-1.8.18/src/hdf5-1.8.18.tar.bz2" CACHE STRING "URL to download HDF5 from") +SET(NETCDF_URL "ftp://ftp.unidata.ucar.edu/pub/netcdf/netcdf-4.5.0.tar.gz" CACHE STRING "URL to download NetCDF from") +SET(NETCDFCXX_URL "https://github.com/Unidata/netcdf-cxx4/archive/v4.3.0.tar.gz" CACHE STRING "URL to download NetCDF-C++ from") +SET(BOOST_URL "http://sourceforge.net/projects/boost/files/boost/1.61.0/boost_1_61_0.tar.gz/download" CACHE STRING "URL to download Boost from") +SET(GSL_URL "ftp://ftp.gnu.org/gnu/gsl/gsl-1.15.tar.gz" CACHE STRING "URL to download GSL from ") +mark_as_advanced(FFTW_URL EIGEN_URL HDF5_URL NETCDF_URL BOOST_URL GSL_URL) + + +MACRO(CHECK_CHANGE_STATE VAR) + IF (DEFINED _PREVIOUS_${VAR}) + IF (NOT ${_PREVIOUS_${VAR}} EQUAL ${${VAR}}) + foreach(loopvar ${ARGN}) + UNSET(${loopvar} CACHE) + endforeach() + ENDIF (NOT ${_PREVIOUS_${VAR}} EQUAL ${${VAR}}) + ENDIF (DEFINED _PREVIOUS_${VAR}) + SET(_PREVIOUS_${VAR} ${${VAR}} CACHE INTERNAL "Internal value") +ENDMACRO(CHECK_CHANGE_STATE) + +CHECK_CHANGE_STATE(INTERNAL_BOOST Boost_LIBRARIES Boost_INCLUDE_DIRS) +CHECK_CHANGE_STATE(INTERNAL_EIGEN EIGEN3_INCLUDE_DIRS) +CHECK_CHANGE_STATE(INTERNAL_GSL GSL_LIBRARY GSL_CBLAS_LIBRARY GSL_INCLUDE) +CHECK_CHANGE_STATE(INTERNAL_HDF5 + HDF5_INCLUDE_DIR HDF5_LIBRARIES HDF5_CXX_LIBRARIES + HDF5_C_STATIC_LIBRARY HDF5_HL_STATIC_LIBRARY HDF5_CXX_STATIC_LIBRARY) +CHECK_CHANGE_STATE(INTERNAL_DLIB DLIB_INCLUDE_DIR DLIB_LIBRARIES) + + +IF(ENABLE_OPENMP) + IF (NOT OPENMP_FOUND) + MESSAGE(ERROR "No known compiler option for enabling OpenMP") + ENDIF(NOT OPENMP_FOUND) + + SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}") + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") + SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OpenMP_C_FLAGS}") +ENDIF(ENABLE_OPENMP) + + +SET(BUILD_PREFIX ${CMAKE_BINARY_DIR}/external_build) +SET(EXT_INSTALL ${CMAKE_BINARY_DIR}/ext_install) +SET(CONFIGURE_LIBS ) +SET(CONFIGURE_CPP_FLAGS "") +SET(CONFIGURE_LDFLAGS "") + +if (ENABLE_SHARP) + SET(DEP_BUILD ${BUILD_PREFIX}/sharp-prefix/src/sharp/auto) + IF(NOT ENABLE_OPENMP) + SET(SHARP_OPENMP --disable-openmp) + ENDIF() + ExternalProject_Add(sharp + URL ${CMAKE_SOURCE_DIR}/external/libsharp-6077806.tar.gz + PREFIX ${BUILD_PREFIX}/sharp-prefix + BUILD_IN_SOURCE 1 + CONFIGURE_COMMAND autoconf && ./configure "CC=${CMAKE_C_COMPILER}" "CXX=${CMAKE_CXX_COMPILER}" --prefix=${DEP_BUILD} ${SHARP_OPENMP} + BUILD_COMMAND ${CMAKE_MAKE_PROGRAM} + INSTALL_COMMAND echo "No install" + ) + SET(CUTILS_LIBRARY ${DEP_BUILD}/lib/libc_utils.a) + SET(FFTPACK_LIBRARY ${DEP_BUILD}/lib/libfftpack.a) + SET(SHARP_LIBRARY ${DEP_BUILD}/lib/libsharp.a) + SET(SHARP_LIBRARIES ${SHARP_LIBRARY} ${FFTPACK_LIBRARY} ${CUTILS_LIBRARY}) + SET(SHARP_INCLUDE_PATH ${DEP_BUILD}/include) +endif (ENABLE_SHARP) + + +############### +# Build HDF5 +############### + +if (INTERNAL_HDF5) + SET(HDF5_SOURCE_DIR ${BUILD_PREFIX}/hdf5-prefix/src/hdf5) + SET(HDF5_BIN_DIR ${EXT_INSTALL}) + ExternalProject_Add(hdf5 + PREFIX ${BUILD_PREFIX}/hdf5-prefix + URL ${HDF5_URL} + URL_HASH MD5=29117bf488887f89888f9304c8ebea0b + CMAKE_ARGS + -DCMAKE_INSTALL_PREFIX=${EXT_INSTALL} + -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + -DHDF5_BUILD_CPP_LIB=ON + -DHDF5_BUILD_TOOLS=ON + -DHDF5_BUILD_HL_LIB=ON + -DBUILD_SHARED_LIBS=OFF + ) + SET(cosmotool_DEPS ${cosmotool_DEPS} hdf5) + SET(hdf5_built hdf5) + SET(ENV{HDF5_ROOT} ${HDF5_BIN_DIR}) + SET(HDF5_ROOTDIR ${HDF5_BIN_DIR}) + SET(CONFIGURE_LDFLAGS "${CONFIGURE_LDFLAGS} -L${HDF5_BIN_DIR}/lib") + SET(CONFIGURE_LIBS "${CONFIGURE_LIBS} -ldl") + set(HDF5_C_STATIC_LIBRARY ${HDF5_BIN_DIR}/lib/libhdf5-static.a) + set(HDF5_HL_STATIC_LIBRARY ${HDF5_BIN_DIR}/lib/libhdf5_hl-static.a) + set(HDF5_LIBRARIES ${HDF5_BIN_DIR}/lib/libhdf5-static.a CACHE STRING "HDF5 lib" FORCE) + set(HDF5_HL_LIBRARIES ${HDF5_BIN_DIR}/lib/libhdf5_hl-static.a CACHE STRING "HDF5 HL lib" FORCE) + set(HDF5_CXX_LIBRARIES ${HDF5_BIN_DIR}/lib/libhdf5_cpp-static.a CACHE STRING "HDF5 C++ lib" FORCE) + SET(HDF5_INCLUDE_DIRS ${HDF5_BIN_DIR}/include CACHE STRING "HDF5 include path" FORCE) + mark_as_advanced(HDF5_LIBRARIES HDF5_CXX_LIBRARIES HDF5_INCLUDE_DIRS) + + MESSAGE(STATUS "Internal HDF5 directory: $ENV{HDF5_ROOT}") + MESSAGE(STATUS "Libs: ${HDF5_LIBRARIES}") + SET(HDF5_FOUND TRUE) +else (INTERNAL_HDF5) + mark_as_advanced(CLEAR HDF5_LIBRARIES HDF5_CXX_LIBRARIES HDF5_INCLUDE_DIRS) + if(HDF5_ROOTDIR) + SET(ENV{HDF5_ROOT} ${HDF5_ROOTDIR}) + endif(HDF5_ROOTDIR) + find_package(HDF5 CONFIG QUIET COMPONENTS C CXX HL static) + if (NOT HDF5_FOUND) + cmessage(CWARNING "Could not find HDF5 cmake config. Try classical exploration") + find_package(HDF5 COMPONENTS C CXX HL) + cmessage(STATUS "HDF5 lib: ${HDF5_LIBRARIES}") + cmessage(STATUS "HDF5 includes: ${HDF5_INCLUDE_DIRS}") + cmessage(STATUS "HDF5 C lib: ${HDF5_C_LIBRARY}") + cmessage(STATUS "HDF5 HL lib: ${HDF5_HL_LIBRARY}") + cmessage(STATUS "HDF5 BIN: ${HDF5_BIN_DIR}") + foreach(hdf5lib IN LISTS HDF5_LIBRARIES) + if (${hdf5lib} MATCHES "(hdf5)|(HDF5)") + get_filename_component(HDF5_BIN_DIR ${hdf5lib} DIRECTORY) + endif() + endforeach() + cmessage(STATUS "HDF5 libpath: ${HDF5_BIN_DIR}") + else() + cmessage(STATUS "Found HDF5 cmake config.") + cmessage(STATUS "HDF5_C_STATIC_LIBRARY : ${HDF5_C_STATIC_LIBRARY}") + set(HDF5_LIBRARIES ${HDF5_C_STATIC_LIBRARY} CACHE STRING "HDF5 lib" FORCE) + set(HDF5_HL_LIBRARIES ${HDF5_HL_STATIC_LIBRARY} CACHE STRING "HDF5 HL lib" FORCE) + set(HDF5_CXX_LIBRARIES ${HDF5_CXX_STATIC_LIBRARY} CACHE STRING "HDF5 C++ lib" FORCE) + get_filename_component(HDF5_BIN_DIR ${HDF5_C_STATIC_LIBRARY} DIRECTORY) + endif() + SET(CONFIGURE_LDFLAGS "${CONFIGURE_LDFLAGS} -L${HDF5_BIN_DIR}") +endif (INTERNAL_HDF5) + +foreach(include_dir ${HDF5_INCLUDE_DIRS}) + SET(CONFIGURE_CPP_FLAGS "${CONFIGURE_CPP_FLAGS} -I${include_dir}") +endforeach(include_dir) + +############### +# Build NetCDF +############### + + +if (INTERNAL_NETCDF) + SET(NETCDF_SOURCE_DIR ${BUILD_PREFIX}/netcdf-prefix/src/netcdf) + SET(NETCDF_BIN_DIR ${EXT_INSTALL}) + SET(CONFIGURE_CPP_FLAGS "${CONFIGURE_CPP_FLAGS} -I${NETCDF_BIN_DIR}/include") + SET(CONFIGURE_LDFLAGS "${CONFIGURE_LDFLAGS} -L${NETCDF_BIN_DIR}/lib") + SET(EXTRA_NC_FLAGS CPPFLAGS=${CONFIGURE_CPP_FLAGS} LIBS=${CONFIGURE_LIBS} LDFLAGS=${CONFIGURE_LDFLAGS}) + SET(NETCDF_CONFIG_COMMAND ${NETCDF_SOURCE_DIR}/configure + --prefix=${NETCDF_BIN_DIR} --libdir=${NETCDF_BIN_DIR}/lib + --enable-netcdf-4 --with-pic --disable-shared --disable-dap + --disable-cdmremote --disable-rpc --enable-cxx-4 + --disable-examples ${EXTRA_NC_FLAGS} CC=${CMAKE_C_COMPILER} + CXX=${CMAKE_CXX_COMPILER}) + list(INSERT CMAKE_PREFIX_PATH 0 ${EXT_INSTALL}) + string(REPLACE ";" "|" CMAKE_PREFIX_PATH_ALT_SEP "${CMAKE_PREFIX_PATH}") + ExternalProject_Add(netcdf + DEPENDS ${hdf5_built} + PREFIX ${BUILD_PREFIX}/netcdf-prefix + URL ${NETCDF_URL} + LIST_SEPARATOR | + CMAKE_ARGS + -DCMAKE_PREFIX_PATH=${CMAKE_PREFIX_PATH_ALT_SEP} + -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + -DBUILD_SHARED_LIBS=OFF + -DBUILD_TESTING=OFF + -DCMAKE_BUILD_TYPE=Release + -DENABLE_NETCDF4=ON + -DCMAKE_POSITION_INDEPENDENT_CODE=ON + -DENABLE_DAP=OFF + -DCMAKE_INSTALL_PREFIX=${NETCDF_BIN_DIR} + -DHDF5_C_LIBRARY=${HDF5_C_STATIC_LIBRARY} + -DHDF5_HL_LIBRARY=${HDF5_HL_STATIC_LIBRARY} + -DHDF5_INCLUDE_DIR=${HDF5_INCLUDE_DIRS} + -DCMAKE_INSTALL_LIBDIR=lib + ) + + SET(NETCDFCXX_SOURCE_DIR ${BUILD_PREFIX}/netcdf-c++-prefix/src/netcdf-c++) + ExternalProject_Add(netcdf-c++ + DEPENDS ${hdf5_built} netcdf + PREFIX ${BUILD_PREFIX}/netcdf-c++-prefix + URL ${NETCDFCXX_URL} + CMAKE_ARGS + -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + -DBUILD_SHARED_LIBS=OFF + -DCMAKE_POSITION_INDEPENDENT_CODE=ON + -DBUILD_TESTING=OFF + -DCMAKE_BUILD_TYPE=Release + -DCMAKE_INSTALL_PREFIX=${NETCDF_BIN_DIR} + -DCMAKE_INSTALL_LIBDIR=lib + ) +# SET(CONFIGURE_CPP_LDFLAGS "${CONFIGURE_LDFLAGS}") +# SET(EXTRA_NC_FLAGS CPPFLAGS=${CONFIGURE_CPP_FLAGS} LDFLAGS=${CONFIGURE_CPP_LDFLAGS}) + SET(cosmotool_DEPS ${cosmotool_DEPS} netcdf netcdf-c++) +# find_library(NETCDF_LIBRARY netcdf NO_DEFAULT_PATH HINTS ${NETCDF_BIN_DIR}/lib ${NETCDF_BIN_DIR}/lib32 ${NETCDF_BIN_DIR}/lib64) +# find_library(NETCDFCPP_LIBRARY netcdf-cxx4 NO_DEFAULT_PATH HINTS ${NETCDF_BIN_DIR}/lib ${NETCDF_BIN_DIR}/lib32 ${NETCDF_BIN_DIR}/lib64) + SET(NETCDF_LIBRARY ${NETCDF_BIN_DIR}/lib/libnetcdf.a CACHE STRING "NetCDF lib" FORCE) + SET(NETCDFCPP_LIBRARY ${NETCDF_BIN_DIR}/lib/libnetcdf-cxx4.a CACHE STRING "NetCDF-C++ lib" FORCE) + SET(NETCDF_INCLUDE_PATH ${NETCDF_BIN_DIR}/include CACHE STRING "NetCDF include" FORCE) + SET(NETCDFCPP_INCLUDE_PATH ${NETCDF_INCLUDE_PATH} CACHE STRING "NetCDF C++ include path" FORCE) + +ELSE(INTERNAL_NETCDF) + find_path(NETCDF_INCLUDE_PATH NAMES netcdf.h) + find_path(NETCDFCPP_INCLUDE_PATH NAMES netcdfcpp.h netcdf) + find_library(NETCDF_LIBRARY netcdf) + find_library(NETCDFCPP_LIBRARY NAMES netcdf_c++4 netcdf_c++) + + SET(CONFIGURE_CPP_FLAGS "${CONFIGURE_CPP_FLAGS} -I${NETCDF_INCLUDE_PATH} -I${NETCDFCPP_INCLUDE_PATH}") +endif (INTERNAL_NETCDF) +mark_as_advanced(NETCDF_LIBRARY NETCDFCPP_LIBRARY NETCDF_INCLUDE_PATH NETCDFCPP_INCLUDE_PATH) + +################## +# Build BOOST +################## + +if (INTERNAL_BOOST) + message(STATUS "Building Boost") + SET(BOOST_SOURCE_DIR ${BUILD_PREFIX}/boost-prefix/src/boost) + ExternalProject_Add(boost + URL ${BOOST_URL} + PREFIX ${BUILD_PREFIX}/boost-prefix + CONFIGURE_COMMAND + ${BOOST_SOURCE_DIR}/bootstrap.sh --prefix=${CMAKE_BINARY_DIR}/ext_build/boost + BUILD_IN_SOURCE 1 + BUILD_COMMAND ${BOOST_SOURCE_DIR}/b2 --with-exception + INSTALL_COMMAND echo "No install" + ) + set(Boost_INCLUDE_DIRS ${BOOST_SOURCE_DIR} CACHE STRING "Boost path" FORCE) + set(Boost_LIBRARIES ${BOOST_SOURCE_DIR}/stage/lib/libboost_python.a CACHE STRING "Boost libraries" FORCE) + set(Boost_FOUND YES) + set(Boost_DEP boost) + +ELSE (INTERNAL_BOOST) + find_package(Boost 1.53 QUIET) + set(Boost_DEP) + if (NOT Boost_FOUND) + cmessage(CWARNING "Boost >= 1.53 was not found") + endif() +endif (INTERNAL_BOOST) +mark_as_advanced(Boost_INCLUDE_DIRS Boost_LIBRARIES) + +################## +# Build GSL +################## + +IF(INTERNAL_GSL) + SET(GSL_SOURCE_DIR ${BUILD_PREFIX}/gsl-prefix/src/gsl) + ExternalProject_Add(gsl + URL ${GSL_URL} + PREFIX ${BUILD_PREFIX}/gsl-prefix + CONFIGURE_COMMAND ${GSL_SOURCE_DIR}/configure + --prefix=${EXT_INSTALL} --disable-shared + --with-pic + CPPFLAGS=${CONFIGURE_CPP_FLAGS} CC=${CMAKE_C_COMPILER} CXX=${CMAKE_CXX_COMPILER} + BUILD_IN_SOURCE 1 + BUILD_COMMAND ${CMAKE_MAKE_PROGRAM} + INSTALL_COMMAND ${CMAKE_MAKE_PROGRAM} install + ) + SET(GSL_INTERNAL_LIBS ${EXT_INSTALL}/lib) + SET(GSL_LIBRARY ${GSL_INTERNAL_LIBS}/libgsl.a CACHE STRING "GSL internal path" FORCE) + SET(GSLCBLAS_LIBRARY ${GSL_INTERNAL_LIBS}/libgslcblas.a CACHE STRING "GSL internal path" FORCE) + set(GSL_INCLUDE_PATH ${CMAKE_BINARY_DIR}/ext_build/gsl/include CACHE STRING "GSL internal path" FORCE) + set(GSL_LIBRARIES ${GSL_LIBRARY} ${GSLCBLAS_LIBRARY}) + SET(cosmotool_DEPS ${cosmotool_DEPS} gsl) +ELSE(INTERNAL_GSL) + find_path(GSL_INCLUDE_PATH NAMES gsl/gsl_blas.h) + find_library(GSL_LIBRARY gsl) + find_library(GSLCBLAS_LIBRARY gslcblas) + + set(GSL_LIBRARIES ${GSL_LIBRARY} ${GSLCBLAS_LIBRARY}) + +ENDIF(INTERNAL_GSL) +mark_as_advanced(GSL_LIBRARY GSLCBLAS_LIBRARY GSL_INCLUDE_PATH) + + +############# +# Build FFTW +############# + +IF(INTERNAL_FFTW) + SET(EXTRA_FFTW_CONF) + IF(HAVE_SSE) + SET(EXTRA_FFTW_CONF ${EXTRA_FFTW_CONF} --enable-sse) + ENDIF(HAVE_SSE) + IF(HAVE_SSE2) + SET(EXTRA_FFTW_CONF ${EXTRA_FFTW_CONF} --enable-sse2) + ENDIF(HAVE_SSE2) + IF(HAVE_AVX) + SET(EXTRA_FFTW_CONF ${EXTRA_FFTW_CONF} --enable-avx) + ENDIF(HAVE_AVX) + + SET(cosmotool_DEPS ${cosmotool_DEPS} fftw) + SET(FFTW_SOURCE ${BUILD_PREFIX}/fftw-prefix/src/fftw) + ExternalProject_Add(fftw + URL ${FFTW_URL} + PREFIX ${BUILD_PREFIX}/fftw-prefix + CONFIGURE_COMMAND + ${FFTW_SOURCE}/configure + --prefix=${EXT_INSTALL} + ${EXTRA_FFTW_CONF} --disable-shared --enable-threads + BUILD_COMMAND ${CMAKE_MAKE_PROGRAM} + INSTALL_COMMAND ${CMAKE_MAKE_PROGRAM} install + ) + SET(FFTW3_LIBRARY_DIRS ${EXT_INSTALL}/lib) + SET(FFTW3_INCLUDE_PATH ${EXT_INSTALL}/include) + SET(FFTW3_THREADS ${EXT_INSTALL}/lib/libfftw3_threads.a) + SET(FFTW3_LIBRARIES ${EXT_INSTALL}/lib/libfftw3.a) + +ELSE (INTERNAL_FFTW) + pkg_check_modules(FFTW3 fftw3>=3.3) + pkg_check_modules(FFTW3F fftw3f>=3.3) + + find_library(FFTW3F_LIBRARY_FULL fftw3f PATHS ${FFTW3F_LIBDIR} NO_DEFAULT_PATH) + find_library(FFTW3_LIBRARY_FULL fftw3 PATHS ${FFTW3_LIBDIR} NO_DEFAULT_PATH) + +ENDIF(INTERNAL_FFTW) + + +############## +# Build Eigen +############## +IF (INTERNAL_EIGEN) + ExternalProject_Add(eigen + URL ${EIGEN_URL} + URL_HASH SHA256=04f8a4fa4afedaae721c1a1c756afeea20d3cdef0ce3293982cf1c518f178502 + PREFIX ${BUILD_PREFIX}/eigen-prefix + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${EXT_INSTALL} + -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + ) + SET(EIGEN3_INCLUDE_DIRS ${EXT_INSTALL}/include/eigen3) + +ELSE (INTERNAL_EIGEN) + if(DEFINED EIGEN_PATH) + set(_eigen_old_pkg_path $ENV{PKG_CONFIG_PATH}) + set(ENV{PKG_CONFIG_PATH} ${EIGEN_PATH}/share/pkgconfig) + endif() + pkg_check_modules(EIGEN3 NO_CMAKE_PATH NO_CMAKE_ENVIRONMENT_PATH REQUIRED eigen3) + if(DEFINED EIGEN_PATH) + set(ENV{PKG_CONFIG_PATH} ${_eigen_old_pkg_path}) + endif() + if (NOT EIGEN3_FOUND) + cmessage(CWARNING "Eigen library not found") + else() + cmessage(STATUS "Found EIGEN3 in ${EIGEN3_INCLUDE_DIRS}") + endif() +ENDIF(INTERNAL_EIGEN) + + + +SET(cosmotool_DEPS ${cosmotool_DEPS} omptl) +SET(OMPTL_BUILD_DIR ${BUILD_PREFIX}/omptl-prefix/src/omptl) +ExternalProject_Add(omptl + PREFIX ${BUILD_PREFIX}/omptl-prefix + URL ${CMAKE_SOURCE_DIR}/external/omptl-20120422.tar.bz2 + CONFIGURE_COMMAND echo "No configure" + BUILD_COMMAND echo "No build" + PATCH_COMMAND patch -p1 -t -N < ${CMAKE_SOURCE_DIR}/external/patch-omptl + INSTALL_COMMAND ${CMAKE_COMMAND} -E copy_directory ${OMPTL_BUILD_DIR} ${EXT_INSTALL}/include/omptl +) +include_directories(${EXT_INSTALL}/include) +##include_directories(${OMPTL_BUILD_DIR}/src/) + diff --git a/external/cosmotool/external/libsharp-6077806.tar.gz b/external/cosmotool/external/libsharp-6077806.tar.gz new file mode 100644 index 0000000..360bef9 Binary files /dev/null and b/external/cosmotool/external/libsharp-6077806.tar.gz differ diff --git a/external/cosmotool/external/omptl-20120422.tar.bz2 b/external/cosmotool/external/omptl-20120422.tar.bz2 new file mode 100644 index 0000000..cfbed07 Binary files /dev/null and b/external/cosmotool/external/omptl-20120422.tar.bz2 differ diff --git a/external/cosmotool/external/patch-omptl b/external/cosmotool/external/patch-omptl new file mode 100644 index 0000000..f9b2e6d --- /dev/null +++ b/external/cosmotool/external/patch-omptl @@ -0,0 +1,94 @@ +diff -ur omptl.orig/omptl_algorithm omptl/omptl_algorithm +--- omptl.orig/omptl_algorithm 2017-01-16 14:58:37.996690639 +0100 ++++ omptl/omptl_algorithm 2017-01-16 15:00:26.678641720 +0100 +@@ -20,7 +20,7 @@ + #define OMPTL_ALGORITHM 1 + + #include +-#include ++#include "omptl" + + namespace omptl + { +@@ -553,9 +553,9 @@ + } // namespace omptl + + #ifdef _OPENMP +- #include ++ #include "omptl_algorithm_par.h" + #else +- #include ++ #include "omptl_algorithm_ser.h" + #endif + + #endif /* OMPTL_ALGORITHM */ +diff -ur omptl.orig/omptl_algorithm_par.h omptl/omptl_algorithm_par.h +--- omptl.orig/omptl_algorithm_par.h 2017-01-16 14:58:37.996690639 +0100 ++++ omptl/omptl_algorithm_par.h 2017-01-16 14:59:57.974126410 +0100 +@@ -21,8 +21,8 @@ + #include + #include + +-#include +-#include ++#include "omptl_tools.h" ++#include "omptl_numeric" + + #include + +diff -ur omptl.orig/omptl_numeric omptl/omptl_numeric +--- omptl.orig/omptl_numeric 2017-01-16 14:58:37.996690639 +0100 ++++ omptl/omptl_numeric 2017-01-16 15:00:57.051186974 +0100 +@@ -19,7 +19,7 @@ + #define OMPTL_NUMERIC 1 + + #include +-#include ++#include "omptl" + + namespace omptl + { +@@ -73,11 +73,11 @@ + } // namespace omptl + + #ifdef _OPENMP +- #include ++ #include "omptl_numeric_par.h" + #else +- #include ++ #include "omptl_numeric_ser.h" + #endif + +-#include ++#include "omptl_numeric_extensions.h" + + #endif /* OMPTL_NUMERIC */ +diff -ur omptl.orig/omptl_numeric_extensions.h omptl/omptl_numeric_extensions.h +--- omptl.orig/omptl_numeric_extensions.h 2017-01-16 14:58:37.996690639 +0100 ++++ omptl/omptl_numeric_extensions.h 2017-01-16 14:59:21.549472508 +0100 +@@ -51,9 +51,9 @@ + } // namespace + + #ifdef _OPENMP +- #include ++ #include "omptl_numeric_extensions_par.h" + #else +- #include ++ #include "omptl_numeric_extensions_ser.h" + #endif + + namespace omptl +diff -ur omptl.orig/omptl_numeric_par.h omptl/omptl_numeric_par.h +--- omptl.orig/omptl_numeric_par.h 2017-01-16 14:58:37.996690639 +0100 ++++ omptl/omptl_numeric_par.h 2017-01-16 14:59:36.397739066 +0100 +@@ -23,8 +23,8 @@ + #include + #include + +-#include +-#include ++#include "omptl_algorithm" ++#include "omptl_tools.h" + + namespace omptl + { diff --git a/external/cosmotool/python/CMakeLists.txt b/external/cosmotool/python/CMakeLists.txt new file mode 100644 index 0000000..7513f38 --- /dev/null +++ b/external/cosmotool/python/CMakeLists.txt @@ -0,0 +1,111 @@ +set(CMAKE_SHARED_MODULE_PREFIX) + + +set(PYTHON_INCLUDES ${NUMPY_INCLUDE_DIRS} ${PYTHON_INCLUDE_PATH} ${CMAKE_SOURCE_DIR}/python) +include_directories(${CMAKE_SOURCE_DIR}/src ${CMAKE_BINARY_DIR}/src) + +IF(CYTHON) + add_custom_command( + OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/_cosmotool.cpp + COMMAND ${CYTHON} --cplus -o ${CMAKE_CURRENT_BINARY_DIR}/_cosmotool.cpp ${CMAKE_CURRENT_SOURCE_DIR}/_cosmotool.pyx + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/_cosmotool.pyx) + + add_custom_command( + OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/_cosmo_power.cpp + COMMAND ${CYTHON} --cplus -o ${CMAKE_CURRENT_BINARY_DIR}/_cosmo_power.cpp ${CMAKE_CURRENT_SOURCE_DIR}/_cosmo_power.pyx + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/_cosmo_power.pyx) + + add_custom_command( + OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/_fast_interp.cpp + COMMAND ${CYTHON} --cplus -o ${CMAKE_CURRENT_BINARY_DIR}/_fast_interp.cpp ${CMAKE_CURRENT_SOURCE_DIR}/_fast_interp.pyx + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/_fast_interp.pyx) + + add_custom_command( + OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/_cosmo_cic.cpp + COMMAND ${CYTHON} --cplus -o ${CMAKE_CURRENT_BINARY_DIR}/_cosmo_cic.cpp ${CMAKE_CURRENT_SOURCE_DIR}/_cosmo_cic.pyx + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/_cosmo_cic.pyx) + + add_custom_command( + OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/_project.cpp + COMMAND ${CYTHON} --cplus -o ${CMAKE_CURRENT_BINARY_DIR}/_project.cpp ${CMAKE_CURRENT_SOURCE_DIR}/_project.pyx + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/_project.pyx ${CMAKE_CURRENT_SOURCE_DIR}/project_tool.hpp ) + +ENDIF(CYTHON) + + +add_library(_cosmotool MODULE ${CMAKE_CURRENT_BINARY_DIR}/_cosmotool.cpp) +add_library(_cosmo_power MODULE ${CMAKE_CURRENT_BINARY_DIR}/_cosmo_power.cpp) +add_library(_cosmo_cic MODULE ${CMAKE_CURRENT_BINARY_DIR}/_cosmo_cic.cpp) +add_library(_fast_interp MODULE ${CMAKE_CURRENT_BINARY_DIR}/_fast_interp.cpp) +add_library(_project MODULE ${CMAKE_CURRENT_BINARY_DIR}/_project.cpp) +target_include_directories(_cosmotool PRIVATE ${PYTHON_INCLUDES}) +target_include_directories(_cosmo_power PRIVATE ${PYTHON_INCLUDES}) +target_include_directories(_cosmo_cic PRIVATE ${PYTHON_INCLUDES}) +target_include_directories(_fast_interp PRIVATE ${PYTHON_INCLUDES}) +target_include_directories(_project PRIVATE ${PYTHON_INCLUDES}) + + +SET(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -Bsymbolic-functions") +if(APPLE) + set(CMAKE_MODULE_LINKER_FLAGS "-undefined dynamic_lookup") +endif() + +target_link_libraries(_cosmotool ${CosmoTool_local} ${PYTHON_LIBRARIES} ${GSL_LIBRARIES}) +target_link_libraries(_cosmo_power ${CosmoTool_local} ${PYTHON_LIBRARIES} ${GSL_LIBRARIES}) +target_link_libraries(_cosmo_cic ${CosmoTool_local} ${PYTHON_LIBRARIES} ${GSL_LIBRARIES}) +target_link_libraries(_project ${PYTHON_LIBRARIES}) +target_link_libraries(_fast_interp ${CosmoTool_local} ${PYTHON_LIBRARIES}) + +SET(ct_TARGETS _cosmotool _project _cosmo_power _cosmo_cic _fast_interp ) + +if (Boost_FOUND) + message(STATUS "Building bispectrum support (path = ${Boost_INCLUDE_DIRS})") + include_directories(${Boost_INCLUDE_DIRS}) + add_library(_cosmo_bispectrum MODULE _cosmo_bispectrum.cpp) + target_link_libraries(_cosmo_bispectrum ${MATH_LIBRARY}) + if(ENABLE_OPENMP) + set_target_properties(_cosmo_bispectrum PROPERTIES COMPILE_FLAGS "${OpenMP_CXX_FLAGS}" LINK_FLAGS "${OpenMP_CXX_FLAGS}") + endif() + if (Boost_DEP) + add_dependencies(_cosmo_bispectrum ${Boost_DEP}) + endif() + SET(ct_TARGETS ${ct_TARGETS} _cosmo_bispectrum) +endif() + +# Discover where to put packages +if (NOT PYTHON_SITE_PACKAGES) + execute_process (COMMAND ${PYTHON_EXECUTABLE} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())" OUTPUT_VARIABLE internal_PYTHON_SITE_PACKAGES OUTPUT_STRIP_TRAILING_WHITESPACE) + SET(SYSTEM_PYTHON_SITE_PACKAGES ${internal_PYTHON_SITE_PACKAGES} CACHE PATH "Path to the target system-wide site-package where to install python modules") + + execute_process (COMMAND ${PYTHON_EXECUTABLE} -c "from site import USER_SITE; print(USER_SITE)" OUTPUT_VARIABLE internal_PYTHON_SITE_PACKAGES OUTPUT_STRIP_TRAILING_WHITESPACE) + SET(USER_PYTHON_SITE_PACKAGES ${internal_PYTHON_SITE_PACKAGES} CACHE PATH "Path to the target user site-package where to install python modules") + + mark_as_advanced(USER_PYTHON_SITE_PACKAGES SYSTEM_PYTHON_SITE_PACKAGES) +endif (NOT PYTHON_SITE_PACKAGES) + +message(STATUS "System python site: ${SYSTEM_PYTHON_SITE_PACKAGES}") +message(STATUS "User python site: ${USER_PYTHON_SITE_PACKAGES}") + +OPTION(INSTALL_PYTHON_LOCAL OFF) + +IF (NOT INSTALL_PYTHON_LOCAL) + SET(PYTHON_SITE_PACKAGES ${SYSTEM_PYTHON_SITE_PACKAGES}) +ELSE (NOT INSTALL_PYTHON_LOCAL) + SET(PYTHON_SITE_PACKAGES ${USER_PYTHON_SITE_PACKAGES}) +ENDIF(NOT INSTALL_PYTHON_LOCAL) +cmessage(STATUS "Python install location: ${PYTHON_SITE_PACKAGES}") + + +if (WIN32 AND NOT CYGWIN) + SET_TARGET_PROPERTIES(_cosmotool PROPERTIES SUFFIX ".pyd") +endif (WIN32 AND NOT CYGWIN) + +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cosmotool/config.py.in ${CMAKE_CURRENT_BINARY_DIR}/cosmotool/config.py @ONLY) + +INSTALL(TARGETS + ${ct_TARGETS} + LIBRARY DESTINATION ${PYTHON_SITE_PACKAGES}/cosmotool +) + +INSTALL(DIRECTORY cosmotool ${CMAKE_CURRENT_BINARY_DIR}/cosmotool DESTINATION ${PYTHON_SITE_PACKAGES} + FILES_MATCHING PATTERN "*.py") diff --git a/external/cosmotool/python/_cosmo_bispectrum.cpp b/external/cosmotool/python/_cosmo_bispectrum.cpp new file mode 100644 index 0000000..db6a6e0 --- /dev/null +++ b/external/cosmotool/python/_cosmo_bispectrum.cpp @@ -0,0 +1,278 @@ +#ifdef _OPENMP +#include +#endif +#include +#include +#include +#include +#include +#include "symbol_visible.hpp" +#include "algo.hpp" + +using std::cout; +using std::endl; +using boost::format; +using CosmoTool::square; + + +struct ModeSet +{ + ssize_t N1, N2, N3; + bool half_copy; + + struct TriangleIterator + { + ssize_t i1, i2, i3; + ssize_t N1, N2, N3; + ssize_t first_iteration; + + + TriangleIterator& operator++() { + i3++; + if (i3==(N3/2+1)) { i3 = first_iteration; i2++; } + if (i2==(N2/2+1)) { i2 = -N2/2; i1++; } + return *this; + } + + bool operator!=(const TriangleIterator& t) const { + return i1!=t.i1 || i2!=t.i2 || i3 != t.i3; + } + + bool in_box() const { + ssize_t hN1 = N1/2, hN2 = N2/2, hN3 = N3/2; + + return (i1 >= -hN1) && (i1 <= hN1) && (i2 >= -hN2) && (i2 <= hN2) && (i3 >= -hN3) && (i3 <= hN3); + } + + TriangleIterator operator+(const TriangleIterator& other_t) const { + TriangleIterator t = *this; + + t.i1 = (t.i1+other_t.i1); + t.i2 = (t.i2+other_t.i2); + t.i3 = (t.i3+other_t.i3); + return t; + } + + TriangleIterator& inp_array() { + if (i1 < 0) + i1 += N1; + if (i2 < 0) + i2 += N2; + if (i3 < 0) + i3 += N3; + return *this; + } + + TriangleIterator array() const { + TriangleIterator t = *this; + + t.inp_array(); + return t; + } + + TriangleIterator real() const { + TriangleIterator t = *this; + if (t.i1 >= N1/2) + t.i1 -= N1; + if (t.i2 >= N2/2) + t.i2 -= N2; + if (t.i3 >= N3/2) + t.i3 -= N3; + return t; + } + + double norm() const { + double r1 = i1, r2 = i2, r3 = i3; + return std::sqrt(r1*r1 + r2*r2 + r3*r3); + } + void reverse() { i1=-i1; i2=-i2; i3=-i3; } + + TriangleIterator& operator*() { return *this; } + }; + + ModeSet(size_t N1_, size_t N2_, size_t N3_, bool _half_copy = false) + : N1(N1_), N2(N2_), N3(N3_),half_copy(_half_copy) { + } + + TriangleIterator begin() const { + TriangleIterator t; + t.i1 = -N1/2; + t.i2 = -N2/2; + if (half_copy) + t.first_iteration = t.i3 = 0; + else + t.first_iteration = t.i3 = -N3/2; + t.N1 = N1; + t.N2 = N2; + t.N3 = N3; + return t; + } + + TriangleIterator end() const { + TriangleIterator t; + t.first_iteration = (half_copy ? 0 : (-N3/2)); + t.i3 = t.first_iteration; + t.i2 = -N2/2; + t.i1 = N1/2+1; + t.N1 = N1; + t.N2 = N2; + t.N3 = N3; + return t; + } + +}; + +std::ostream& operator<<(std::ostream& o, const ModeSet::TriangleIterator& t) +{ + o << t.i1 << "," << t.i2 << "," << t.i3; + return o; +} + +template +static T no_conj(const T& a) { return a; } + +template +static inline void accum_bispec(const Delta& delta_mirror, SubArrayB b_Nt, SubArrayCnt b_B, + const typename Delta::element& v1, const typename Delta::element& v2, + const ModeSet::TriangleIterator& rm1, + const ModeSet::TriangleIterator& rm2, + const ModeSet::TriangleIterator& rm3, + double delta_k, + size_t Nk) +{ + typedef std::complex CType; + + size_t q1 = std::floor(rm1.norm()/delta_k); + if (q1 >= Nk) + return; + + size_t q2 = std::floor(rm2.norm()/delta_k); + if (q2 >= Nk) + return; + + size_t q3 = std::floor(rm3.norm()/delta_k); + if (q3 >= Nk) + return; + + CType prod = v1*v2; + ModeSet::TriangleIterator m3 = rm3; + // We use hermitic symmetry to get -m3, it is just the mode in m3 but conjugated. + m3.reverse(); + m3.inp_array(); + prod *= delta_mirror[m3.i1][m3.i2][m3.i3]; + + b_Nt[q1][q2][q3] ++; + b_B[q1][q2][q3] += prod; +} + +extern "C" CTOOL_DLL_PUBLIC +void CosmoTool_compute_bispectrum( + double *delta_hat, size_t Nx, size_t Ny, size_t Nz, + size_t *Ntriangles, + double* B, double delta_k, size_t Nk ) +{ + // First remap to multi_array for easy access + size_t kNz = Nz/2+1; +#ifdef _OPENMP + int Ntasks = omp_get_max_threads(); +#else + int Ntasks = 1; +#endif + boost::multi_array_ref, 3> a_delta(reinterpret_cast*>(delta_hat), boost::extents[Nx][Ny][kNz]); + boost::multi_array_ref a_Nt(Ntriangles, boost::extents[Nk][Nk][Nk]); + boost::multi_array_ref, 3> a_B(reinterpret_cast*>(B), boost::extents[Nk][Nk][Nk]); + boost::multi_array, 4> b_B(boost::extents[Ntasks][Nk][Nk][Nk]); + boost::multi_array b_Nt(boost::extents[Ntasks][Nk][Nk][Nk]); + typedef std::complex CType; + boost::multi_array, 3> delta_mirror(boost::extents[Nx][Ny][Nz]); + + // Add hermiticity + for (auto m : ModeSet(Nx, Ny, Nz, true)) { + auto n1 = m; + auto n2 = m.array(); + n1.reverse(); + n1.inp_array(); + delta_mirror[n2.i1][n2.i2][n2.i3] = (a_delta[n2.i1][n2.i2][n2.i3]); + delta_mirror[n1.i1][n1.i2][n1.i3] = std::conj(delta_mirror[n2.i1][n2.i2][n2.i3]); + } + +#ifdef _OPENMP + // First loop over m1 +#pragma omp parallel + { +#pragma omp single + { + for (auto m1 : ModeSet(Nx, Ny, Nz)) { + auto am1 = m1.array(); + CType v1 = delta_mirror[am1.i1][am1.i2][am1.i3]; + int tid = omp_get_thread_num(); + +#pragma omp task + { + auto rm1 = m1.real(); + // Second mode m2 + for (auto m2 : ModeSet(Nx, Ny, Nz)) { + // Now derive m3 + auto am2 = m2.array(); + auto m3 = (m1+m2); + CType v2 = delta_mirror[am2.i1][am2.i2][am2.i3]; + + // Not in Fourier box, stop here + if (!m3.in_box()) + continue; + + accum_bispec(delta_mirror, b_Nt[tid], b_B[tid], v1, v2, m1, m2, m3, delta_k, Nk); + + } + } + + } + } + } + +#pragma omp taskwait + for (int tid = 0; tid < Ntasks; tid++) { + size_t *b_p = b_Nt[tid].origin(); + size_t *a_p = a_Nt.data(); + std::complex *b_B_p = b_B[tid].origin(); + std::complex *a_B_p = a_B.origin(); +//#pragma omp simd +#pragma omp parallel for + for (size_t q = 0; q < Nk*Nk*Nk; q++) { + a_p[q] += b_p[q]; + a_B_p[q] += b_B_p[q]; + } + } +#else +#warning Serial version not implemented +#endif +} + + +extern "C" CTOOL_DLL_PUBLIC +void CosmoTool_compute_powerspectrum( + double *delta_hat, size_t Nx, size_t Ny, size_t Nz, + size_t *Ncounts, + double* P, double delta_k, size_t Nk ) +{ + // First remap to multi_array for easy access + size_t kNz = Nz/2+1; + boost::multi_array_ref, 3> a_delta(reinterpret_cast*>(delta_hat), boost::extents[Nx][Ny][kNz]); + boost::multi_array_ref a_Nc(Ncounts, boost::extents[Nk]); + boost::multi_array_ref a_P(reinterpret_cast(P), boost::extents[Nk]); + typedef std::complex CType; + + // First loop over m1 + for (auto m : ModeSet(Nx, Ny, kNz)) { + auto m1 = m.array(); + CType& v1 = a_delta[m1.i1][m1.i2][m1.i3]; + + size_t q1 = std::floor(m.norm()/delta_k); + + if (q1 >= Nk) + continue; + + a_Nc[q1] ++; + a_P[q1] += std::norm(v1); + } +} diff --git a/external/cosmotool/python/_cosmo_cic.pyx b/external/cosmotool/python/_cosmo_cic.pyx new file mode 100644 index 0000000..cdb6a23 --- /dev/null +++ b/external/cosmotool/python/_cosmo_cic.pyx @@ -0,0 +1,76 @@ +from libcpp cimport bool +from libcpp cimport string as cppstring +from libcpp cimport vector as cppvector +import numpy as np +cimport numpy as np +from cpython cimport PyObject, Py_INCREF +cimport cython + +np.import_array() + +cdef extern from "cic.hpp" namespace "CosmoTool": + + ctypedef float CICType + ctypedef float Coordinates[3] + + cdef cppclass CICParticles: + float mass + Coordinates coords + + cdef cppclass CICFilter: + + CICFilter(np.uint32_t resolution, double L) nogil + + void resetMesh() nogil + void putParticles(CICParticles* particles, np.uint32_t N) nogil + void getDensityField(CICType *& field, np.uint32_t& res) nogil + + +@cython.boundscheck(False) +@cython.cdivision(True) +@cython.wraparound(False) +def leanCic(float[:,:] particles, float L, int Resolution): + cdef cppvector.vector[CICParticles] *p + cdef CICFilter *cic + cdef np.uint64_t i + cdef CICType *field + cdef np.uint32_t dummyRes + cdef np.ndarray[np.float64_t, ndim=3] out_field + cdef np.ndarray[np.float64_t, ndim=1] out_field0 + cdef np.float64_t[:] out_field_buf + cdef np.uint64_t j + + cic = new CICFilter(Resolution, L) + print("Reset mesh") + cic.resetMesh() + + if particles.shape[1] != 3: + raise ValueError("Particles must be Nx3 array") + + print("Inserting particles") +# p = new cppvector.vector[CICParticles](particles.shape[0]) +# for i in xrange(particles.shape[0]): +# *p[i].mass = 1 +# *p[i].coords[0] = particles[i,0] +# *p[i].coords[1] = particles[i,1] +# *p[i].coords[2] = particles[i,2] + +# cic.putParticles(&p[0], particles.shape[0]) + del p + + print("Done") + field = 0 + dummyRes = 0 + cic.getDensityField(field, dummyRes) + + print("Got to allocate a numpy %dx%dx%d" % (dummyRes, dummyRes,dummyRes)) + + out_field = np.empty((dummyRes, dummyRes, dummyRes), dtype=np.float64) + out_field0 = out_field.reshape(out_field.size) + out_field_buf = out_field + print("Copy") + for j in xrange(out_field_buf.size): + out_field_buf[j] = field[j] + + del cic + return out_field diff --git a/external/cosmotool/python/_cosmo_power.pyx b/external/cosmotool/python/_cosmo_power.pyx new file mode 100644 index 0000000..47e7bab --- /dev/null +++ b/external/cosmotool/python/_cosmo_power.pyx @@ -0,0 +1,153 @@ +from libcpp cimport bool +from libcpp cimport string as cppstring +import numpy as np +cimport numpy as np +from cpython cimport PyObject, Py_INCREF +cimport cython + +np.import_array() + +cdef extern from "cosmopower.hpp" namespace "CosmoTool": + + cdef enum CosmoFunction "CosmoTool::CosmoPower::CosmoFunction": + POWER_EFSTATHIOU "CosmoTool::CosmoPower::POWER_EFSTATHIOU", + HU_WIGGLES "CosmoTool::CosmoPower::HU_WIGGLES", + HU_BARYON "CosmoTool::CosmoPower::HU_BARYON", + OLD_POWERSPECTRUM, + POWER_BARDEEN "CosmoTool::CosmoPower::POWER_BARDEEN", + POWER_SUGIYAMA "CosmoTool::CosmoPower::POWER_SUGIYAMA", + POWER_BDM, + POWER_TEST, + HU_WIGGLES_ORIGINAL "CosmoTool::CosmoPower::HU_WIGGLES_ORIGINAL" + + cdef cppclass CosmoPower: + double n + double K0 + double V_LG_CMB + + double CMB_VECTOR[3] + double h + double SIGMA8 + double OMEGA_B + double OMEGA_C + double omega_B + double omega_C + double Theta_27 + double OMEGA_0 + double Omega + double beta + double OmegaEff + double Gamma0 + double normPower + + + CosmoPower() + void setFunction(CosmoFunction) + void updateCosmology() + void updatePhysicalCosmology() + void normalize(double,double) + void setNormalization(double) + double power(double) + +cdef class CosmologyPower: + """CosmologyPower(**cosmo) + + CosmologyPower manages and compute power spectra computation according to different + approximation given in the litterature. + + Keyword arguments: + omega_B_0 (float): relative baryon density + omega_M_0 (float): relative matter density + h (float): Hubble constant relative to 100 km/s/Mpc + ns (float): power law of the large scale inflation spectrum + """ + + cdef CosmoPower power + + def __init__(self,**cosmo): + self.power = CosmoPower() + self.power.OMEGA_B = cosmo['omega_B_0'] + self.power.OMEGA_C = cosmo['omega_M_0']-cosmo['omega_B_0'] + self.power.h = cosmo['h'] + if 'ns' in cosmo: + self.power.n = cosmo['ns'] + + assert self.power.OMEGA_C > 0 + + self.power.updateCosmology() + + def setNormalization(self,A): + self.power.setNormalization(A) + + def normalize(self,s8,k_min=-1,k_max=-1): + """normalize(self, sigma8) + + Compute the normalization of the power spectrum using sigma8. + + Arguments: + sigma8 (float): standard deviation of density field smoothed at 8 Mpc/h + """ + self.power.SIGMA8 = s8 + self.power.normalize(k_min, k_max) + + + def setFunction(self,funcname): + """setFunction(self, funcname) + + Choose an approximation to use for the computation of the power spectrum + + Arguments: + funcname (str): the name of the approximation. It can be either + EFSTATHIOU, HU_WIGGLES, HU_BARYON, BARDEEN or SUGIYAMA. + """ + cdef CosmoFunction f + + f = POWER_EFSTATHIOU + + if funcname=='EFSTATHIOU': + f = POWER_EFSTATHIOU + elif funcname=='HU_WIGGLES': + f = HU_WIGGLES + elif funcname=='HU_BARYON': + f = HU_BARYON + elif funcname=='BARDEEN': + f = POWER_BARDEEN + elif funcname=='SUGIYAMA': + f = POWER_SUGIYAMA + elif funcname=='HU_WIGGLES_ORIGINAL': + f = HU_WIGGLES_ORIGINAL + else: + raise ValueError("Unknown function name " + funcname) + + self.power.setFunction(f) + + cdef double _compute(self, double k): + k *= self.power.h + return self.power.power(k) * self.power.h**3 + + def compute(self, k): + """compute(self, k) + + Compute the power spectrum for mode which length k. + + Arguments: + k (float): Mode for which to evaluate the power spectrum. + It can be a scalar or a numpy array. + The units must be in 'h Mpc^{-1}'. + + Returns: + a scalar or a numpy array depending on the type of the k argument + """ + + cdef np.ndarray out + cdef double kval + cdef tuple i + + if isinstance(k, np.ndarray): + out = np.empty(k.shape, dtype=np.float64) + for i,kval in np.ndenumerate(k): + out[i] = self._compute(kval) + return out + else: + return self._compute(k) + diff --git a/external/cosmotool/python/_cosmotool.pyx b/external/cosmotool/python/_cosmotool.pyx new file mode 100644 index 0000000..61ba1a0 --- /dev/null +++ b/external/cosmotool/python/_cosmotool.pyx @@ -0,0 +1,560 @@ +from libcpp cimport bool +from libcpp cimport string as cppstring +from libcpp.vector cimport vector as cppvector +from cython.parallel cimport prange +import numpy as np +cimport numpy as np +from cpython cimport PyObject, Py_INCREF +cimport cython + +np.import_array() + +cdef extern from "sys/types.h": + ctypedef np.int64_t int64_t + +cdef extern from "loadSimu.hpp" namespace "CosmoTool": + + cdef cppclass SimuData: + np.float_t BoxSize + np.float_t time + np.float_t Hubble + + np.float_t Omega_M + np.float_t Omega_Lambda + np.int64_t TotalNumPart + np.int64_t NumPart + int64_t *Id + float *Pos[3] + float *Vel[3] + int *type + float *Mass + + bool noAuto + + cdef const int NEED_GADGET_ID + cdef const int NEED_POSITION + cdef const int NEED_VELOCITY + cdef const int NEED_TYPE + cdef const int NEED_MASS + +cdef extern from "loadGadget.hpp" namespace "CosmoTool": + + SimuData *loadGadgetMulti(const char *fname, int id, int flags, int gformat) nogil except + + void cxx_writeGadget "CosmoTool::writeGadget" (const char * s, SimuData *data) except + + +cdef extern from "safe_gadget.hpp": + SimuData *loadGadgetMulti_safe(cppstring.string s, int flags, int gformat) nogil + SimuData **alloc_simudata(int num) nogil + void del_simudata(SimuData **d) nogil + +cdef extern from "cppHelper.hpp": + int customCosmotoolHandler() nogil + +cdef extern from "loadRamses.hpp" namespace "CosmoTool": + SimuData *loadRamsesSimu(const char *basename, int id, int cpuid, bool dp, int flags) except +customCosmotoolHandler + +class PySimulationBase(object): + """ + This is the base class to representation Simulation in CosmoTool/python. + """ + + def getPositions(self): + """ + getPositions(self) + + Returns: + A list of three arrays holding the positions of the particles. + The i-th element is the i-th coordinate of each particle. + It may be None if the positions were not requested. + """ + raise NotImplementedError("getPositions is not implemented") + + def getVelocities(self): + """ + getVelocities(self) + + Returns: + A list of three arrays holding the velocities of the particles. + The i-th element is the i-th coordinate of each particle. + It may be None if the velocities were not requested. + """ + raise NotImplementedError("getVelocities is not implemented") + + def getIdentifiers(self): + """ + getIdentifiers(self) + + Returns: + Returns an integer array that hold the unique identifiers of + each particle. + It may be None if the identifiers were not requested. + """ + raise NotImplementedError("getIdentifiers is not implemented") + + def getTypes(self): + """ + getTypes(self) + + Returns: + Returns an integer array that hold the type of + each particle. + It may be None if the types were not requested. + """ + raise NotImplementedError("getTypes is not implemented") + + def getOmega_M(self): + """ + getOmega_M(self) + + Returns: + the mean matter density in the simulation, with respect + to the critical density. + """ + raise NotImplementedError("getOmega_M is not implemented") + + def getOmega_Lambda(self): + """ + getOmega_Lambda(self) + + Returns: + the mean dark energy density in the simulation, with respect + to the critical density. + """ + raise NotImplementedError("getOmega_Lambda is not implemented") + + def getTime(self): + """ + getTime(self) + + Returns: + the time the snapshot was taken in the simulation. It can + have various units depending on the file format. + """ + raise NotImplementedError("getTime is not implemented") + + def getHubble(self): + """ + getHubble(self) + + Returns: + the hubble constant in unit of 100 km/s/Mpc + """ + raise NotImplementedError("getHubble is not implemented") + + def getBoxsize(self): + """ + getBoxsize(self) + + Returns: + the size of the simulation box. The length unit is not fixed, + though it is customary to have it in Mpc/h if the loader has + access to the unit normalization. + """ + raise NotImplementedError("getBoxsize is not implemented") + + def getMasses(self): + """ + getMasses(self) + + Returns: + an array with the masses of each particles, in unspecified unit that + depend on the loader. + """ + raise NotImplementedError("getMasses is not implemented") + +cdef class Simulation: + """ + Simulation() + + Class that directly manages internal loaded data obtained from a loader + """ + + cdef list positions + cdef list velocities + cdef object identifiers + cdef object types + cdef object masses + + cdef SimuData *data + + property BoxSize: + def __get__(Simulation self): + return self.data.BoxSize + + property time: + def __get__(Simulation self): + return self.data.time + + property Hubble: + def __get__(Simulation self): + return self.data.Hubble + + property Omega_M: + def __get__(Simulation self): + return self.data.Omega_M + + property Omega_Lambda: + def __get__(Simulation self): + return self.data.Omega_Lambda + + property positions: + def __get__(Simulation self): + return self.positions + + property velocities: + def __get__(Simulation self): + return self.velocities + + property identifiers: + def __get__(Simulation self): + return self.identifiers + + property types: + def __get__(Simulation self): + return self.types + + property masses: + def __get__(Simulation self): + return self.masses + + property numParticles: + def __get__(Simulation self): + return self.data.NumPart + + + property totalNumParticles: + def __get__(Simulation self): + return self.data.TotalNumPart + + def __cinit__(Simulation self): + self.data = 0 + + def __dealloc__(Simulation self): + if self.data != 0: + del self.data + + +class PySimulationAdaptor(PySimulationBase): + """ + PySimulationAdaptor(PySimulationBase_) + + This class is an adaptor for an internal type to the loader. It defines + all the methods of PySimulationBase. + + Attributes: + simu: a Simulation_ object + """ + def __init__(self,sim): + self.simu = sim + + def getBoxsize(self): + return self.simu.BoxSize + + def getPositions(self): + return self.simu.positions + + def getTypes(self): + return self.simu.types + + def getVelocities(self): + return self.simu.velocities + + def getIdentifiers(self): + return self.simu.identifiers + + def getTime(self): + return self.simu.time + + def getHubble(self): + return self.simu.Hubble + + def getOmega_M(self): + return self.simu.Omega_M + + def getOmega_Lambda(self): + return self.simu.Omega_Lambda + + def getMasses(self): + return self.simu.masses + +cdef class ArrayWrapper: + cdef void* data_ptr + cdef np.uint64_t size + cdef int type_array + + cdef set_data(self, np.uint64_t size, int type_array, void* data_ptr): + """ Set the data of the array + This cannot be done in the constructor as it must recieve C-level + arguments. + + Args: + size (int): Length of the array. + data_ptr (void*): Pointer to the data + """ + self.data_ptr = data_ptr + self.size = size + self.type_array = type_array + + def __array__(self): + """ Here we use the __array__ method, that is called when numpy +tries to get an array from the object.""" + cdef np.npy_intp shape[1] + + shape[0] = self.size + # Create a 1D array, of length 'size' + ndarray = np.PyArray_SimpleNewFromData(1, shape, self.type_array, self.data_ptr) + return ndarray + + def __dealloc__(self): + """ Frees the array. This is called by Python when all the +references to the object are gone. """ + pass + +cdef object wrap_array(void *p, np.uint64_t s, int typ): + cdef np.ndarray ndarray + cdef ArrayWrapper wrapper + + wrapper = ArrayWrapper() + wrapper.set_data(s, typ, p) + ndarray = np.array(wrapper, copy=False) + ndarray.base = wrapper + Py_INCREF(wrapper) + + return ndarray + + +cdef object wrap_float_array(float *p, np.uint64_t s): + return wrap_array(p, s, np.NPY_FLOAT32) + +cdef object wrap_int64_array(int64_t* p, np.uint64_t s): + return wrap_array(p, s, np.NPY_INT64) + +cdef object wrap_int_array(int* p, np.uint64_t s): + return wrap_array(p, s, np.NPY_INT) + +cdef object wrap_simudata(SimuData *data, int flags): + cdef Simulation simu + + simu = Simulation() + simu.data = data + if flags & NEED_POSITION: + simu.positions = [wrap_float_array(data.Pos[i], data.NumPart) for i in xrange(3)] + else: + simu.positions = None + + if flags & NEED_VELOCITY: + simu.velocities = [wrap_float_array(data.Vel[i], data.NumPart) for i in xrange(3)] + else: + simu.velocities = None + + if flags & NEED_GADGET_ID: + simu.identifiers = wrap_int64_array(data.Id, data.NumPart) + else: + simu.identifiers = None + + if flags & NEED_TYPE: + simu.types = wrap_int_array(data.type, data.NumPart) + else: + simu.types = None + + if flags & NEED_MASS: + simu.masses = wrap_float_array(data.Mass, data.NumPart) + else: + simu.masses = None + + return simu + +def loadGadget(str filename, int snapshot_id, int gadgetFormat = 1, bool loadPosition = True, bool loadVelocity = False, bool loadId = False, bool loadType = False, bool loadMass=False): + """loadGadget(filename, snapshot_id, gadgetFormat = 1, loadPosition=True, loadVelocity=False, loadId=False, loadType=False) + + This function loads Gadget-1 snapshot format. + + If snapshot_id is negative then the snapshot is considered not to be part of + a set of snapshots written by different cpu. Otherwise the filename is modified + to reflect the indicated snapshot_id. + + Arguments: + filename (str): input filename + snapshot_id (int): identifier of the gadget file if it is a multi-file snapshot + + Keyword arguments: + loadPosition (bool): whether to load positions + loadVelocity (bool): whether to load velocities + loadId (bool): whether to load unique identifiers + loadType (bool): whether to set types to particles + loadMass (bool): whether to set the mass of particles + + Returns: + an PySimulationAdaptor instance. +""" + + cdef int flags + cdef SimuData *data + cdef Simulation simu + cdef const char *filename_bs + + flags = 0 + if loadPosition: + flags |= NEED_POSITION + if loadVelocity: + flags |= NEED_VELOCITY + if loadId: + flags |= NEED_GADGET_ID + if loadType: + flags |= NEED_TYPE + if loadMass: + flags |= NEED_MASS + + filename_b = bytes(filename, 'utf-8') + filename_bs = filename_b + with nogil: + data = loadGadgetMulti(filename_bs, snapshot_id, flags, gadgetFormat) + if data == 0: + return None + + return PySimulationAdaptor(wrap_simudata(data, flags)) + +def loadParallelGadget(object filename_list, int gadgetFormat = 1, bool loadPosition = True, bool loadVelocity = False, bool loadId = False, bool loadType = False, bool loadMass=False): + """loadParallelGadget(filename list, gadgetFormat=1, loadPosition=True, loadVelocity=False, loadId=False, loadType=False) + + Arguments: + filename (list): a list or tuple of filenames to load in parallel + + Keyword arguments: + loadPosition (bool): indicate to load positions + loadVelocity (bool): indicate to load velocities + loadId (bool): indicate to load id + loadType (bool): indicate to load particle types + + Returns: + It loads a gadget-1 snapshot and return a cosmotool.PySimulationBase_ object. + """ + cdef int flags, i, num_files + cdef list out_arrays + cdef SimuData ** data + cdef SimuData * local_data + cdef Simulation simu + cdef cppvector[cppstring.string] filenames + + flags = 0 + if loadPosition: + flags |= NEED_POSITION + if loadVelocity: + flags |= NEED_VELOCITY + if loadId: + flags |= NEED_GADGET_ID + if loadType: + flags |= NEED_TYPE + if loadMass: + flags |= NEED_MASS + + num_files = len(filename_list) + filenames.resize(num_files) + data = alloc_simudata(num_files) + for i,l in enumerate(filename_list): + filenames[i] = l.encode('utf-8') + + with nogil: + for i in prange(num_files): + local_data = loadGadgetMulti_safe(filenames[i], flags, gadgetFormat) + data[i] = local_data +# data[i] = loadGadgetMulti(filenames[i].c_str(), -1, flags) + + out_arrays = [] + for i in xrange(num_files): + if data[i] == 0: + out_arrays.append(None) + else: + out_arrays.append(PySimulationAdaptor(wrap_simudata(data[i], flags))) + + del_simudata(data) + + return out_arrays + +def writeGadget(str filename, object simulation): + """writeGadget(filename, simulation) + + This function attempts to write the content of the simulation object into + a file named `filename` using a Gadget-1 file format. + + Arguments: + filename (str): output filename + simulation (PySimulationBase): a simulation object + """ + cdef SimuData simdata + cdef np.ndarray[np.float32_t, ndim=1] pos, vel + cdef np.ndarray[np.int64_t, ndim=1] ids + cdef np.int64_t NumPart + cdef int j + + if not isinstance(simulation,PySimulationBase): + raise TypeError("Second argument must be of type SimulationBase") + + NumPart = simulation.positions[0].size + simdata.noAuto = True + + for j in xrange(3): + pos = simulation.getPositions()[j] + vel = simulation.getVelocities()[j] + + if pos.size != NumPart or vel.size != NumPart: + raise ValueError("Invalid number of particles") + + simdata.Pos[j] = pos.data + simdata.Vel[j] = vel.data + + ids = simulation.getIdentifiers() + simdata.Id = ids.data + simdata.BoxSize = simulation.getBoxsize() + simdata.time = simulation.getTime() + simdata.Hubble = simulation.getHubble() + simdata.Omega_M = simulation.getOmega_M() + simdata.Omega_Lambda = simulation.getOmega_Lambda() + simdata.TotalNumPart = NumPart + simdata.NumPart = NumPart + + cxx_writeGadget(filename, &simdata) + +def loadRamses(str basepath, int snapshot_id, int cpu_id, bool doublePrecision = False, bool loadPosition = True, bool loadVelocity = False, bool loadId = False, bool loadMass = False): + """ loadRamses(basepath, snapshot_id, cpu_id, doublePrecision = False, loadPosition = True, loadVelocity = False) + Loads the indicated snapshot based on the cpu id, snapshot id and basepath. It is important to specify the correct precision in doublePrecision otherwise the loading will fail. There is no way of auto-detecting properly the precision of the snapshot file. + + Args: + basepath (str): the base directory of the snapshot + snapshot_id (int): the snapshot id + cpu_id (int): the cpu id of the file to load + + Keyword args: + doublePrecision (bool): By default it is False, thus singlePrecision + loadPosition (bool): Whether to load positions + loadVelocity (bool): Whether to load velocities + loadId (bool): Whether to load identifiers + loadMass (bool): Whether to load mass value + + Returns: + An object derived from PySimulationBase_. +""" + cdef int flags + cdef SimuData *data + cdef Simulation simu + + flags = 0 + if loadPosition: + flags |= NEED_POSITION + if loadVelocity: + flags |= NEED_VELOCITY + if loadId: + flags |= NEED_GADGET_ID + if loadMass: + flags |= NEED_MASS + + encpath = basepath.encode('utf-8') + try: + data = loadRamsesSimu(encpath, snapshot_id, cpu_id, doublePrecision, flags) + if data == 0: + return None + except RuntimeError as e: + raise RuntimeError(str(e) + ' (check the float precision in snapshot)') + + return PySimulationAdaptor(wrap_simudata(data, flags)) + diff --git a/external/cosmotool/python/_fast_interp.pyx b/external/cosmotool/python/_fast_interp.pyx new file mode 100644 index 0000000..b13b396 --- /dev/null +++ b/external/cosmotool/python/_fast_interp.pyx @@ -0,0 +1,43 @@ +from cpython cimport bool +from cython cimport view +from cython.parallel import prange, parallel +from libc.math cimport sin, cos, abs, floor, round, sqrt +import numpy as np +cimport numpy as npx +cimport cython + +__all__=["fast_interp"] + + +@cython.boundscheck(False) +@cython.cdivision(True) +def fast_interp(xmin0, dx0, A0, y0, out0, beyond_val=np.nan): + cdef double rq, q + cdef int iq + cdef long i, Asize, ysize + cdef npx.float64_t xmin, dx + cdef npx.float64_t[:] out + cdef npx.float64_t[:] A, y + cdef npx.float64_t beyond + + beyond=beyond_val + xmin = xmin0 + dx = dx0 + A = A0 + y = y0 + ysize = y.size + out = out0 + Asize = A.size + + if out.size != ysize: + raise ValueError("out and y must have the same size") + + with nogil: + for i in prange(ysize): + q = (y[i] - xmin) / dx + iq = int(floor(q)) + rq = (q-iq) + if iq+1 >= Asize or iq < 0: + out[i] = beyond + else: + out[i] = rq * A[iq+1] + (1-rq)*A[iq] diff --git a/external/cosmotool/python/_project.pyx b/external/cosmotool/python/_project.pyx new file mode 100644 index 0000000..9d61cec --- /dev/null +++ b/external/cosmotool/python/_project.pyx @@ -0,0 +1,890 @@ +from cpython cimport bool +from cython cimport view +from cython.parallel import prange, parallel +from libc.math cimport sin, cos, abs, floor, round, sqrt +import numpy as np +cimport numpy as npx +cimport cython +from copy cimport * + +ctypedef npx.float64_t DTYPE_t +DTYPE=np.float64 +FORMAT_DTYPE="d" + +__all__=["project_cic","line_of_sight_projection","spherical_projection","DTYPE","interp3d","interp2d"] + +cdef extern from "project_tool.hpp" namespace "": + + DTYPE_t compute_projection(DTYPE_t *vertex_value, DTYPE_t *u, DTYPE_t *u0, DTYPE_t rho) nogil + +cdef extern from "openmp.hpp" namespace "CosmoTool": + int smp_get_max_threads() nogil + int smp_get_thread_id() nogil + + +@cython.boundscheck(False) +@cython.cdivision(True) +@cython.wraparound(False) +cdef void interp3d_INTERNAL_periodic(DTYPE_t x, DTYPE_t y, + DTYPE_t z, + DTYPE_t[:,:,:] d, DTYPE_t Lbox, DTYPE_t *retval) nogil: + + cdef int Ngrid = d.shape[0] + cdef DTYPE_t inv_delta = Ngrid/Lbox + cdef int ix, iy, iz + cdef DTYPE_t f[2][2][2] + cdef DTYPE_t rx, ry, rz + cdef int jx, jy, jz + + rx = (inv_delta*x) + ry = (inv_delta*y) + rz = (inv_delta*z) + + ix = int(floor(rx)) + iy = int(floor(ry)) + iz = int(floor(rz)) + + + rx -= ix + ry -= iy + rz -= iz + + ix = ix % Ngrid + iy = iy % Ngrid + iz = iz % Ngrid + + jx = (ix+1)%Ngrid + jy = (iy+1)%Ngrid + jz = (iz+1)%Ngrid + + ix = ix%Ngrid + iy = iy%Ngrid + iz = iz%Ngrid + + f[0][0][0] = (1-rx)*(1-ry)*(1-rz) + f[1][0][0] = ( rx)*(1-ry)*(1-rz) + f[0][1][0] = (1-rx)*( ry)*(1-rz) + f[1][1][0] = ( rx)*( ry)*(1-rz) + + f[0][0][1] = (1-rx)*(1-ry)*( rz) + f[1][0][1] = ( rx)*(1-ry)*( rz) + f[0][1][1] = (1-rx)*( ry)*( rz) + f[1][1][1] = ( rx)*( ry)*( rz) + + retval[0] = \ + d[ix ,iy ,iz ] * f[0][0][0] + \ + d[jx ,iy ,iz ] * f[1][0][0] + \ + d[ix ,jy ,iz ] * f[0][1][0] + \ + d[jx ,jy ,iz ] * f[1][1][0] + \ + d[ix ,iy ,jz ] * f[0][0][1] + \ + d[jx ,iy ,jz ] * f[1][0][1] + \ + d[ix ,jy ,jz ] * f[0][1][1] + \ + d[jx ,jy ,jz ] * f[1][1][1] + +@cython.boundscheck(False) +@cython.cdivision(True) +@cython.wraparound(False) +cdef void ngp3d_INTERNAL_periodic(DTYPE_t x, DTYPE_t y, + DTYPE_t z, + DTYPE_t[:,:,:] d, DTYPE_t Lbox, DTYPE_t *retval) nogil: + + cdef int Ngrid = d.shape[0] + cdef DTYPE_t inv_delta = Ngrid/Lbox + cdef int ix, iy, iz + cdef DTYPE_t f[2][2][2] + cdef DTYPE_t rx, ry, rz + cdef int jx, jy, jz + + rx = (inv_delta*x) + ry = (inv_delta*y) + rz = (inv_delta*z) + + ix = int(round(rx)) + iy = int(round(ry)) + iz = int(round(rz)) + + + ix = ix%Ngrid + iy = iy%Ngrid + iz = iz%Ngrid + + retval[0] = d[ix ,iy ,iz ] + + +@cython.boundscheck(False) +@cython.cdivision(True) +@cython.wraparound(False) +cdef void ngp3d_INTERNAL(DTYPE_t x, DTYPE_t y, + DTYPE_t z, + DTYPE_t[:,:,:] d, DTYPE_t Lbox, DTYPE_t *retval, DTYPE_t inval) nogil: + + cdef int Ngrid = d.shape[0] + cdef DTYPE_t inv_delta = Ngrid/Lbox + cdef int ix, iy, iz + cdef DTYPE_t f[2][2][2] + cdef DTYPE_t rx, ry, rz + cdef int jx, jy, jz + + rx = (inv_delta*x) + ry = (inv_delta*y) + rz = (inv_delta*z) + + ix = int(round(rx)) + iy = int(round(ry)) + iz = int(round(rz)) + + if ((ix < 0) or (ix+1) >= Ngrid or (iy < 0) or (iy+1) >= Ngrid or (iz < 0) or (iz+1) >= Ngrid): + retval[0] = inval + return + + retval[0] = d[ix ,iy ,iz ] + + +@cython.boundscheck(False) +@cython.cdivision(True) +@cython.wraparound(False) +cdef void interp3d_INTERNAL(DTYPE_t x, DTYPE_t y, + DTYPE_t z, + DTYPE_t[:,:,:] d, DTYPE_t Lbox, DTYPE_t *retval, DTYPE_t inval) nogil: + + cdef int Ngrid = d.shape[0] + cdef DTYPE_t inv_delta = Ngrid/Lbox + cdef int ix, iy, iz + cdef DTYPE_t f[2][2][2] + cdef DTYPE_t rx, ry, rz + + rx = (inv_delta*x) + ry = (inv_delta*y) + rz = (inv_delta*z) + + ix = int(floor(rx)) + iy = int(floor(ry)) + iz = int(floor(rz)) + + rx -= ix + ry -= iy + rz -= iz + + if ((ix < 0) or (ix+1) >= Ngrid or (iy < 0) or (iy+1) >= Ngrid or (iz < 0) or (iz+1) >= Ngrid): + retval[0] = inval + return + # assert ((ix >= 0) and ((ix+1) < Ngrid)) +# assert ((iy >= 0) and ((iy+1) < Ngrid)) +# assert ((iz >= 0) and ((iz+1) < Ngrid)) + + f[0][0][0] = (1-rx)*(1-ry)*(1-rz) + f[1][0][0] = ( rx)*(1-ry)*(1-rz) + f[0][1][0] = (1-rx)*( ry)*(1-rz) + f[1][1][0] = ( rx)*( ry)*(1-rz) + + f[0][0][1] = (1-rx)*(1-ry)*( rz) + f[1][0][1] = ( rx)*(1-ry)*( rz) + f[0][1][1] = (1-rx)*( ry)*( rz) + f[1][1][1] = ( rx)*( ry)*( rz) + + retval[0] = \ + d[ix ,iy ,iz ] * f[0][0][0] + \ + d[ix+1,iy ,iz ] * f[1][0][0] + \ + d[ix ,iy+1,iz ] * f[0][1][0] + \ + d[ix+1,iy+1,iz ] * f[1][1][0] + \ + d[ix ,iy ,iz+1] * f[0][0][1] + \ + d[ix+1,iy ,iz+1] * f[1][0][1] + \ + d[ix ,iy+1,iz+1] * f[0][1][1] + \ + d[ix+1,iy+1,iz+1] * f[1][1][1] + +@cython.boundscheck(False) +def interp3d(x not None, y not None, + z not None, + npx.ndarray[DTYPE_t, ndim=3] d not None, DTYPE_t Lbox, + bool periodic=False, bool centered=True, bool ngp=False, DTYPE_t inval = 0): + """ interp3d(x,y,z,d,Lbox,periodic=False,centered=True,ngp=False) -> interpolated values + + Compute the tri-linear interpolation of the given field (d) at the given position (x,y,z). It assumes that they are box-centered coordinates. So (x,y,z) == (0,0,0) is equivalent to the pixel at (Nx/2,Ny/2,Nz/2) with Nx,Ny,Nz = d.shape. If periodic is set, it assumes the box is periodic +""" + cdef npx.ndarray[DTYPE_t] out + cdef DTYPE_t[:] out_slice + cdef DTYPE_t[:] ax, ay, az + cdef DTYPE_t[:,:,:] in_slice + cdef DTYPE_t retval + cdef long i + cdef long Nelt + cdef int myperiodic, myngp + cdef DTYPE_t shifter + + myperiodic = periodic + myngp = ngp + + if centered: + shifter = Lbox/2 + else: + shifter = 0 + + if d.shape[0] != d.shape[1] or d.shape[0] != d.shape[2]: + raise ValueError("Grid must have a cubic shape") + + + ierror = IndexError("Interpolating outside range") + if type(x) == np.ndarray or type(y) == np.ndarray or type(z) == np.ndarray: + if type(x) != np.ndarray or type(y) != np.ndarray or type(z) != np.ndarray: + raise ValueError("All or no array. No partial arguments") + + ax = x + ay = y + az = z + assert ax.size == ay.size and ax.size == az.size + + out = np.empty(x.shape, dtype=DTYPE) + out_slice = out + in_slice = d + Nelt = ax.size + with nogil: + if not myngp: + if myperiodic: + for i in prange(Nelt): + interp3d_INTERNAL_periodic(shifter+ax[i], shifter+ay[i], shifter+az[i], in_slice, Lbox, &out_slice[i]) + else: + for i in prange(Nelt): + interp3d_INTERNAL(shifter+ax[i], shifter+ay[i], shifter+az[i], in_slice, Lbox, &out_slice[i], inval) + else: + if myperiodic: + for i in prange(Nelt): + ngp3d_INTERNAL_periodic(shifter+ax[i], shifter+ay[i], shifter+az[i], in_slice, Lbox, &out_slice[i]) + else: + for i in prange(Nelt): + ngp3d_INTERNAL(shifter+ax[i], shifter+ay[i], shifter+az[i], in_slice, Lbox, &out_slice[i], inval) + return out + else: + if not myngp: + if periodic: + interp3d_INTERNAL_periodic(shifter+x, shifter+y, shifter+z, d, Lbox, &retval) + else: + interp3d_INTERNAL(shifter+x, shifter+y, shifter+z, d, Lbox, &retval, inval) + else: + if periodic: + ngp3d_INTERNAL_periodic(shifter+x, shifter+y, shifter+z, d, Lbox, &retval) + else: + ngp3d_INTERNAL(shifter+x, shifter+y, shifter+z, d, Lbox, &retval, inval) + return retval + +@cython.boundscheck(False) +@cython.cdivision(True) +cdef DTYPE_t interp2d_INTERNAL_periodic(DTYPE_t x, DTYPE_t y, + npx.ndarray[DTYPE_t, ndim=2] d, DTYPE_t Lbox) except? 0: + + cdef int Ngrid = d.shape[0] + cdef DTYPE_t inv_delta = Ngrid/Lbox + cdef int ix, iy + cdef DTYPE_t f[2][2] + cdef DTYPE_t rx, ry + cdef int jx, jy + + rx = (inv_delta*x + Ngrid/2) + ry = (inv_delta*y + Ngrid/2) + + ix = int(floor(rx)) + iy = int(floor(ry)) + + rx -= ix + ry -= iy + + while ix < 0: + ix += Ngrid + while iy < 0: + iy += Ngrid + + jx = (ix+1)%Ngrid + jy = (iy+1)%Ngrid + + assert ((ix >= 0) and ((jx) < Ngrid)) + assert ((iy >= 0) and ((jy) < Ngrid)) + + f[0][0] = (1-rx)*(1-ry) + f[1][0] = ( rx)*(1-ry) + f[0][1] = (1-rx)*( ry) + f[1][1] = ( rx)*( ry) + + return \ + d[ix ,iy ] * f[0][0] + \ + d[jx ,iy ] * f[1][0] + \ + d[ix ,jy ] * f[0][1] + \ + d[jx ,jy ] * f[1][1] + + +@cython.boundscheck(False) +@cython.cdivision(True) +cdef DTYPE_t interp2d_INTERNAL(DTYPE_t x, DTYPE_t y, + npx.ndarray[DTYPE_t, ndim=2] d, DTYPE_t Lbox) except? 0: + + cdef int Ngrid = d.shape[0] + cdef DTYPE_t inv_delta = Ngrid/Lbox + cdef int ix, iy + cdef DTYPE_t f[2][2] + cdef DTYPE_t rx, ry + + rx = (inv_delta*x + Ngrid/2) + ry = (inv_delta*y + Ngrid/2) + + ix = int(floor(rx)) + iy = int(floor(ry)) + + rx -= ix + ry -= iy + + if ((ix < 0) or (ix+1) >= Ngrid): + raise IndexError("X coord out of bound (ix=%d, x=%g)" % (ix,x)) + if ((iy < 0) or (iy+1) >= Ngrid): + raise IndexError("Y coord out of bound (iy=%d, y=%g)" % (iy,y)) +# assert ((ix >= 0) and ((ix+1) < Ngrid)) +# assert ((iy >= 0) and ((iy+1) < Ngrid)) +# assert ((iz >= 0) and ((iz+1) < Ngrid)) + + f[0][0] = (1-rx)*(1-ry) + f[1][0] = ( rx)*(1-ry) + f[0][1] = (1-rx)*( ry) + f[1][1] = ( rx)*( ry) + + return \ + d[ix ,iy ] * f[0][0] + \ + d[ix+1,iy ] * f[1][0] + \ + d[ix ,iy+1] * f[0][1] + \ + d[ix+1,iy+1] * f[1][1] + +def interp2d(x not None, y not None, + npx.ndarray[DTYPE_t, ndim=2] d not None, DTYPE_t Lbox, + bool periodic=False): + cdef npx.ndarray[DTYPE_t] out + cdef npx.ndarray[DTYPE_t] ax, ay + cdef int i + + if d.shape[0] != d.shape[1]: + raise ValueError("Grid must have a square shape") + + if type(x) == np.ndarray or type(y) == np.ndarray: + if type(x) != np.ndarray or type(y) != np.ndarray: + raise ValueError("All or no array. No partial arguments") + + ax = x + ay = y + assert ax.size == ay.size + + out = np.empty(x.shape, dtype=DTYPE) + if periodic: + for i in range(ax.size): + out[i] = interp2d_INTERNAL_periodic(ax[i], ay[i], d, Lbox) + else: + for i in range(ax.size): + out[i] = interp2d_INTERNAL(ax[i], ay[i], d, Lbox) + + return out + else: + if periodic: + return interp2d_INTERNAL_periodic(x, y, d, Lbox) + else: + return interp2d_INTERNAL(x, y, d, Lbox) + + +@cython.boundscheck(False) +@cython.cdivision(True) +cdef void INTERNAL_project_cic_no_mass(DTYPE_t[:,:,:] g, + DTYPE_t[:,:] x, int Ngrid, double Lbox, double shifter) nogil: + cdef double delta_Box = Ngrid/Lbox + cdef int i + cdef double a[3] + cdef double c[3] + cdef int b[3] + cdef int do_not_put + + for i in range(x.shape[0]): + + do_not_put = 0 + for j in range(3): + a[j] = (x[i,j]+shifter)*delta_Box + b[j] = int(floor(a[j])) + a[j] -= b[j] + c[j] = 1-a[j] + if b[j] < 0 or b[j]+1 >= Ngrid: + do_not_put = True + + if not do_not_put: + g[b[0],b[1],b[2]] += c[0]*c[1]*c[2] + g[b[0]+1,b[1],b[2]] += a[0]*c[1]*c[2] + g[b[0],b[1]+1,b[2]] += c[0]*a[1]*c[2] + g[b[0]+1,b[1]+1,b[2]] += a[0]*a[1]*c[2] + + g[b[0],b[1],b[2]+1] += c[0]*c[1]*a[2] + g[b[0]+1,b[1],b[2]+1] += a[0]*c[1]*a[2] + g[b[0],b[1]+1,b[2]+1] += c[0]*a[1]*a[2] + g[b[0]+1,b[1]+1,b[2]+1] += a[0]*a[1]*a[2] + +@cython.boundscheck(False) +@cython.cdivision(True) +cdef void INTERNAL_project_cic_no_mass_periodic(DTYPE_t[:,:,:] g, + DTYPE_t[:,:] x, int Ngrid, double Lbox, double shifter) nogil: + cdef double delta_Box = Ngrid/Lbox + cdef int i + cdef double a[3] + cdef double c[3] + cdef int b[3] + cdef int b1[3] + cdef int do_not_put + cdef DTYPE_t[:,:] ax + cdef DTYPE_t[:,:,:] ag + + ax = x + ag = g + + for i in range(x.shape[0]): + + do_not_put = 0 + for j in range(3): + a[j] = (ax[i,j]+shifter)*delta_Box + b[j] = int(floor(a[j])) + b1[j] = (b[j]+1) % Ngrid + + a[j] -= b[j] + c[j] = 1-a[j] + + b[j] %= Ngrid + + ag[b[0],b[1],b[2]] += c[0]*c[1]*c[2] + ag[b1[0],b[1],b[2]] += a[0]*c[1]*c[2] + ag[b[0],b1[1],b[2]] += c[0]*a[1]*c[2] + ag[b1[0],b1[1],b[2]] += a[0]*a[1]*c[2] + + ag[b[0],b[1],b1[2]] += c[0]*c[1]*a[2] + ag[b1[0],b[1],b1[2]] += a[0]*c[1]*a[2] + ag[b[0],b1[1],b1[2]] += c[0]*a[1]*a[2] + ag[b1[0],b1[1],b1[2]] += a[0]*a[1]*a[2] + + +@cython.boundscheck(False) +@cython.cdivision(True) +cdef void INTERNAL_project_cic_with_mass(DTYPE_t[:,:,:] g, + DTYPE_t[:,:] x, + DTYPE_t[:] mass, + int Ngrid, double Lbox, double shifter) nogil: + cdef double delta_Box = Ngrid/Lbox + cdef int i + cdef double a[3] + cdef double c[3] + cdef DTYPE_t m0 + cdef int b[3] + + for i in range(x.shape[0]): + + do_not_put = False + for j in range(3): + a[j] = (x[i,j]+shifter)*delta_Box + b[j] = int(a[j]) + a[j] -= b[j] + c[j] = 1-a[j] + if b[j] < 0 or b[j]+1 >= Ngrid: + do_not_put = True + + if not do_not_put: + m0 = mass[i] + + g[b[0],b[1],b[2]] += c[0]*c[1]*c[2]*m0 + g[b[0]+1,b[1],b[2]] += a[0]*c[1]*c[2]*m0 + g[b[0],b[1]+1,b[2]] += c[0]*a[1]*c[2]*m0 + g[b[0]+1,b[1]+1,b[2]] += a[0]*a[1]*c[2]*m0 + + g[b[0],b[1],b[2]+1] += c[0]*c[1]*a[2]*m0 + g[b[0]+1,b[1],b[2]+1] += a[0]*c[1]*a[2]*m0 + g[b[0],b[1]+1,b[2]+1] += c[0]*a[1]*a[2]*m0 + g[b[0]+1,b[1]+1,b[2]+1] += a[0]*a[1]*a[2]*m0 + +@cython.boundscheck(False) +@cython.cdivision(True) +cdef void INTERNAL_project_cic_with_mass_periodic(DTYPE_t[:,:,:] g, + DTYPE_t[:,:] x, + DTYPE_t[:] mass, + int Ngrid, double Lbox, double shifter) nogil: + cdef double half_Box = 0.5*Lbox, m0 + cdef double delta_Box = Ngrid/Lbox + cdef int i + cdef double a[3] + cdef double c[3] + cdef int b[3] + cdef int b1[3] + + for i in range(x.shape[0]): + + for j in range(3): + a[j] = (x[i,j]+shifter)*delta_Box + b[j] = int(floor(a[j])) + b1[j] = b[j]+1 + while b1[j] < 0: + b1[j] += Ngrid + while b1[j] >= Ngrid: + b1[j] -= Ngrid + + a[j] -= b[j] + c[j] = 1-a[j] + + m0 = mass[i] + g[b[0],b[1],b[2]] += c[0]*c[1]*c[2]*m0 + g[b1[0],b[1],b[2]] += a[0]*c[1]*c[2]*m0 + g[b[0],b1[1],b[2]] += c[0]*a[1]*c[2]*m0 + g[b1[0],b1[1],b[2]] += a[0]*a[1]*c[2]*m0 + + g[b[0],b[1],b1[2]] += c[0]*c[1]*a[2]*m0 + g[b1[0],b[1],b1[2]] += a[0]*c[1]*a[2]*m0 + g[b[0],b1[1],b1[2]] += c[0]*a[1]*a[2]*m0 + g[b1[0],b1[1],b1[2]] += a[0]*a[1]*a[2]*m0 + + +def project_cic(npx.ndarray[DTYPE_t, ndim=2] x not None, npx.ndarray[DTYPE_t, ndim=1] mass, int Ngrid, + double Lbox, bool periodic = False, centered=True): + """ + project_cic(x array (N,3), mass (may be None), Ngrid, Lbox, periodict, centered=True) + + This function does a Cloud-In-Cell projection of a 3d unstructured dataset. First argument is a Nx3 array of coordinates. + Second argument is an optinal mass. Ngrid is the size output grid and Lbox is the physical size of the grid. + """ + cdef npx.ndarray[DTYPE_t, ndim=3] g + cdef double shifter + cdef bool local_periodic + + local_periodic = periodic + + if centered: + shifter = 0.5*Lbox + else: + shifter = 0 + + if x.shape[1] != 3: + raise ValueError("Invalid shape for x array") + + if mass is not None and mass.shape[0] != x.shape[0]: + raise ValueError("Mass array and coordinate array must have the same number of elements") + + g = np.zeros((Ngrid,Ngrid,Ngrid),dtype=DTYPE) + + if not local_periodic: + if mass is None: + with nogil: + INTERNAL_project_cic_no_mass(g, x, Ngrid, Lbox, shifter) + else: + with nogil: + INTERNAL_project_cic_with_mass(g, x, mass, Ngrid, Lbox, shifter) + else: + if mass is None: + with nogil: + INTERNAL_project_cic_no_mass_periodic(g, x, Ngrid, Lbox, shifter) + else: + with nogil: + INTERNAL_project_cic_with_mass_periodic(g, x, mass, Ngrid, Lbox, shifter) + + return g + +def tophat_fourier_internal(npx.ndarray[DTYPE_t, ndim=1] x not None): + cdef int i + cdef npx.ndarray[DTYPE_t] y + cdef DTYPE_t x0 + + y = np.empty(x.size, dtype=DTYPE) + + for i in range(x.size): + x0 = x[i] + if abs(x0)<1e-5: + y[i] = 1 + else: + y[i] = (3*(sin(x0) - x0 * cos(x0))/(x0**3)) + + return y + +def tophat_fourier(x not None): + cdef npx.ndarray[DTYPE_t, ndim=1] b + + if type(x) != np.ndarray: + raise ValueError("x must be a Numpy array") + + b = np.array(x, dtype=DTYPE).ravel() + + b = tophat_fourier_internal(b) + + return b.reshape(x.shape) + + + +@cython.boundscheck(False) +@cython.cdivision(True) +cdef DTYPE_t cube_integral(DTYPE_t u[3], DTYPE_t u0[3], int r[1], DTYPE_t alpha_max) nogil: + cdef DTYPE_t tmp_a + cdef DTYPE_t v[3] + cdef int i, j + + for i in xrange(3): + if u[i] == 0.: + continue + + if u[i] < 0: + tmp_a = -u0[i]/u[i] + else: + tmp_a = (1-u0[i])/u[i] + + if tmp_a < alpha_max: + alpha_max = tmp_a + j = i + + for i in range(3): + u0[i] += u[i]*alpha_max + + r[0] = j + + return alpha_max + +@cython.boundscheck(False) +@cython.cdivision(True) +cdef DTYPE_t cube_integral_trilin(DTYPE_t u[3], DTYPE_t u0[3], int r[1], DTYPE_t vertex_value[8], DTYPE_t alpha_max) nogil: + cdef DTYPE_t I, tmp_a + cdef DTYPE_t v[3] + cdef DTYPE_t term[4] + cdef int i, j, q + + j = 0 + for i in range(3): + if u[i] == 0.: + continue + + if u[i] < 0: + tmp_a = -u0[i]/u[i] + else: + tmp_a = (1-u0[i])/u[i] + + if tmp_a < alpha_max: + alpha_max = tmp_a + j = i + + I = compute_projection(vertex_value, u, u0, alpha_max) + + for i in xrange(3): + u0[i] += u[i]*alpha_max + + # alpha_max is the integration length + # we integrate between 0 and alpha_max (curvilinear coordinates) + r[0] = j + + return I + +@cython.boundscheck(False) +cdef DTYPE_t integrator0(DTYPE_t[:,:,:] density, + DTYPE_t u[3], DTYPE_t u0[3], int u_delta[3], int iu0[3], int jumper[1], DTYPE_t alpha_max) nogil: + cdef DTYPE_t d + + d = density[iu0[0], iu0[1], iu0[2]] + + return cube_integral(u, u0, jumper, alpha_max)*d + +@cython.boundscheck(False) +cdef DTYPE_t integrator1(DTYPE_t[:,:,:] density, + DTYPE_t u[3], DTYPE_t u0[3], int u_delta[3], int iu0[3], int jumper[1], DTYPE_t alpha_max) nogil: + cdef DTYPE_t vertex_value[8] + cdef DTYPE_t d + cdef int a[3][2] + cdef int i + + for i in xrange(3): + a[i][0] = iu0[i] + a[i][1] = iu0[i]+1 + + vertex_value[0 + 2*0 + 4*0] = density[a[0][0], a[1][0], a[2][0]] + vertex_value[1 + 2*0 + 4*0] = density[a[0][1], a[1][0], a[2][0]] + vertex_value[0 + 2*1 + 4*0] = density[a[0][0], a[1][1], a[2][0]] + vertex_value[1 + 2*1 + 4*0] = density[a[0][1], a[1][1], a[2][0]] + + vertex_value[0 + 2*0 + 4*1] = density[a[0][0], a[1][0], a[2][1]] + vertex_value[1 + 2*0 + 4*1] = density[a[0][1], a[1][0], a[2][1]] + vertex_value[0 + 2*1 + 4*1] = density[a[0][0], a[1][1], a[2][1]] + vertex_value[1 + 2*1 + 4*1] = density[a[0][1], a[1][1], a[2][1]] + + return cube_integral_trilin(u, u0, jumper, vertex_value, alpha_max) + + + +@cython.boundscheck(False) +cdef DTYPE_t C_line_of_sight_projection(DTYPE_t[:,:,:] density, + DTYPE_t a_u[3], + DTYPE_t min_distance, + DTYPE_t max_distance, DTYPE_t[:] shifter, int integrator_id) nogil except? 0: + + cdef DTYPE_t u[3] + cdef DTYPE_t ifu0[3] + cdef DTYPE_t u0[3] + cdef DTYPE_t utot[3] + cdef int u_delta[3] + cdef int iu0[3] + cdef int i + cdef int N = density.shape[0] + cdef int half_N = density.shape[0]/2 + cdef int completed + cdef DTYPE_t I0, d, dist2, delta, s, max_distance2 + cdef int jumper[1] + + cdef DTYPE_t (*integrator)(DTYPE_t[:,:,:], + DTYPE_t u[3], DTYPE_t u0[3], int u_delta[3], int iu0[3], int jumper[1], DTYPE_t alpha_max) nogil + + if integrator_id == 0: + integrator = integrator0 + else: + integrator = integrator1 + + max_distance2 = max_distance**2 + + for i in range(3): + u[i] = a_u[i] + u0[i] = a_u[i]*min_distance + ifu0[i] = half_N+u0[i]+shifter[i] + if (ifu0[i] <= 0 or ifu0[i] >= N): + return 0 + iu0[i] = int(floor(ifu0[i])) + u0[i] = ifu0[i]-iu0[i] + u_delta[i] = 1 if iu0[i] > 0 else -1 + if (not ((iu0[i]>= 0) and (iu0[i] < N))): + with gil: + raise RuntimeError("iu0[%d] = %d !!" % (i,iu0[i])) + + if (not (u0[i]>=0 and u0[i]<=1)): + with gil: + raise RuntimeError("u0[%d] = %g !" % (i,u0[i])) + + completed = 0 + if ((iu0[0] >= N-1) or (iu0[0] <= 0) or + (iu0[1] >= N-1) or (iu0[1] <= 0) or + (iu0[2] >= N-1) or (iu0[2] <= 0)): + completed = 1 + + I0 = 0 + jumper[0] = 0 + dist2 = 0 + while completed == 0: + I0 += integrator(density, u, u0, u_delta, iu0, jumper, max_distance-sqrt(dist2)) + + if u[jumper[0]] < 0: + iu0[jumper[0]] -= 1 + u0[jumper[0]] = 1 + else: + iu0[jumper[0]] += 1 + u0[jumper[0]] = 0 + + + if ((iu0[0] >= N-1) or (iu0[0] <= 0) or + (iu0[1] >= N-1) or (iu0[1] <= 0) or + (iu0[2] >= N-1) or (iu0[2] <= 0)): + completed = 1 + else: + dist2 = 0 + for i in range(3): + delta = iu0[i]+u0[i]-half_N-shifter[i] + dist2 += delta*delta + + if (dist2 > max_distance2): + # Remove the last portion of the integral + #delta = sqrt(dist2) - max_distance + #I0 -= d*delta + completed = 1 + + return I0 + +def line_of_sight_projection(DTYPE_t[:,:,:] density not None, + DTYPE_t[:] a_u not None, + DTYPE_t min_distance, + DTYPE_t max_distance, DTYPE_t[:] shifter not None, int integrator_id=0): + cdef DTYPE_t u[3] + + u[0] = a_u[0] + u[1] = a_u[1] + u[2] = a_u[2] + + return C_line_of_sight_projection(density, + u, + min_distance, + max_distance, shifter, integrator_id) + +cdef double _spherical_projloop(double theta, double phi, DTYPE_t[:,:,:] density, + double min_distance, double max_distance, + DTYPE_t[:] shifter, int integrator_id) nogil: + cdef DTYPE_t u0[3] + + stheta = sin(theta) + u0[0] = cos(phi)*stheta + u0[1] = sin(phi)*stheta + u0[2] = cos(theta) + + return C_line_of_sight_projection(density, u0, min_distance, max_distance, shifter, integrator_id) + + +@cython.boundscheck(False) +def spherical_projection(int Nside, + npx.ndarray[DTYPE_t, ndim=3] density not None, + DTYPE_t min_distance, + DTYPE_t max_distance, int progress=1, int integrator_id=0, DTYPE_t[:] shifter = None, int booster=-1): + """ + spherical_projection(Nside, density, min_distance, max_distance, progress=1, integrator_id=0, shifter=None, booster=-1) + + Keyword arguments: + progress (int): show progress if it is equal to 1 + integrator_id (int): specify the order of integration along the line of shift + shifter (DTYPE_t array): this is an array of size 3. It specifies the amount of shift to apply to the center, in unit of voxel + booster (int): what is the frequency of refreshment of the progress bar. Small number decreases performance by locking the GIL. + + + Arguments: + Nside (int): Nside of the returned map + density (NxNxN array): this is the density field, expressed as a cubic array + min_distance (float): lower bound of the integration + max_distance (float): upper bound of the integration + + Returns: + an healpix map, as a 1-dimensional array. + """ + import healpy as hp + import progressbar as pb + cdef int i + cdef DTYPE_t[:] theta,phi + cdef DTYPE_t[:,:,:] density_view + cdef DTYPE_t[:] outm + cdef int[:] job_done + cdef npx.ndarray[DTYPE_t, ndim=1] outm_array + cdef long N, N0 + cdef double stheta + cdef int tid + + if shifter is None: + shifter = view.array(shape=(3,), format=FORMAT_DTYPE, itemsize=sizeof(DTYPE_t)) + shifter[:] = 0 + + print("allocating map") + outm_array = np.empty(hp.nside2npix(Nside),dtype=DTYPE) + print("initializing views") + outm = outm_array + density_view = density + + print("progress?") + if progress != 0: + p = pb.ProgressBar(maxval=outm.size,widgets=[pb.Bar(), pb.ETA()]).start() + + N = smp_get_max_threads() + N0 = outm.size + + if booster < 0: + booster = 1#000 + + job_done = view.array(shape=(N,), format="i", itemsize=sizeof(int)) + job_done[:] = 0 + theta,phi = hp.pix2ang(Nside, np.arange(N0)) + with nogil, parallel(): + tid = smp_get_thread_id() + for i in prange(N0,schedule='dynamic',chunksize=256): + if progress != 0 and (i%booster) == 0: + with gil: + p.update(_mysum(job_done)) + outm[i] = _spherical_projloop(theta[i], phi[i], density_view, min_distance, max_distance, shifter, integrator_id) + job_done[tid] += 1 + + if progress: + p.finish() + + + return outm_array diff --git a/external/cosmotool/python/copy.pxd b/external/cosmotool/python/copy.pxd new file mode 100644 index 0000000..182bbba --- /dev/null +++ b/external/cosmotool/python/copy.pxd @@ -0,0 +1,22 @@ +cimport cython +cimport numpy as npx + +ctypedef fused sum_type: + cython.int + cython.float + npx.uint64_t + npx.uint32_t + +@cython.boundscheck(False) +cdef inline sum_type _mysum(sum_type[:] jobs) nogil: + cdef sum_type s + cdef npx.uint64_t N + cdef int i + + s = 0 + N = jobs.shape[0] + for i in xrange(N): + s += jobs[i] + return s + + diff --git a/external/cosmotool/python/cosmotool/__init__.py b/external/cosmotool/python/cosmotool/__init__.py new file mode 100644 index 0000000..c5b1c79 --- /dev/null +++ b/external/cosmotool/python/cosmotool/__init__.py @@ -0,0 +1,23 @@ +from ._cosmotool import * +from ._project import * +from ._cosmo_power import * +from ._cosmo_cic import * +from ._fast_interp import * +from .grafic import writeGrafic, writeWhitePhase, readGrafic, readWhitePhase +from .borg import read_borg_vol +from .cic import cicParticles +try: + import pyopencl + from .cl_cic import cl_CIC_Density +except: + print("No opencl support") + +from .simu import loadRamsesAll, simpleWriteGadget, SimulationBare +from .timing import time_block, timeit, timeit_quiet +from .bispectrum import bispectrum, powerspectrum +from .smooth import smooth_particle_density + +try: + from .fftw import CubeFT +except ImportError: + print("No FFTW support") diff --git a/external/cosmotool/python/cosmotool/bispectrum.py b/external/cosmotool/python/cosmotool/bispectrum.py new file mode 100644 index 0000000..6247f66 --- /dev/null +++ b/external/cosmotool/python/cosmotool/bispectrum.py @@ -0,0 +1,127 @@ +import numpy as np + +try: + import cffi + import os + + _ffi = cffi.FFI() + _ffi.cdef(""" + + void CosmoTool_compute_bispectrum( + double *delta_hat, size_t Nx, size_t Ny, size_t Nz, + size_t *Ntriangles, + double* B, double delta_k, size_t Nk ) ; + void CosmoTool_compute_powerspectrum( + double *delta_hat, size_t Nx, size_t Ny, size_t Nz, + size_t *Ncounts, + double* P, double delta_k, size_t Nk ); + + """); + + _pathlib = os.path.dirname(os.path.abspath(__file__)) + _lib = _ffi.dlopen(os.path.join(_pathlib,"_cosmo_bispectrum.so")) +except Exception as e: + print(repr(e)) + raise RuntimeError("Failed to initialize _cosmo_bispectrum module") + + +def bispectrum(delta, delta_k, Nk, fourier=True): + """bispectrum(delta, fourier=True) + + Args: + * delta: a 3d density field, can be Fourier modes if fourier set to True + + + Return: + * A 3d array of the binned bispectrum +""" + + if len(delta.shape) != 3: + raise ValueError("Invalid shape for delta") + try: + delta_k = float(delta_k) + Nk = int(Nk) + except: + raise ValueError() + + if not fourier: + delta = np.fft.rfftn(delta) + N1,N2,N3 = delta.shape + rN3 = (N3-1)*2 + delta_hat_buf = np.empty((N1*N2*N3*2),dtype=np.double) + delta_hat_buf[::2] = delta.real.ravel() + delta_hat_buf[1::2] = delta.imag.ravel() + + size_size = _ffi.sizeof("size_t") + if size_size == 4: + triangle_buf = np.zeros((Nk,Nk,Nk),dtype=np.int32) + elif size_size == 8: + triangle_buf = np.zeros((Nk,Nk,Nk),dtype=np.int64) + else: + raise RuntimeError("Internal error, do not know how to map size_t") + + B_buf = np.zeros((Nk*Nk*Nk*2), dtype=np.double) + + _lib.CosmoTool_compute_bispectrum( \ + _ffi.cast("double *", delta_hat_buf.ctypes.data), \ + N1, N2, rN3, \ + _ffi.cast("size_t *", triangle_buf.ctypes.data), \ + _ffi.cast("double *", B_buf.ctypes.data), \ + delta_k, \ + Nk) + B_buf = B_buf.reshape((Nk,Nk,Nk,2)) + return triangle_buf, B_buf[...,0]+1j*B_buf[...,1] + +def powerspectrum(delta, delta_k, Nk, fourier=True): + """powerspectrum(delta, fourier=True) + + Args: + * delta: a 3d density field, can be Fourier modes if fourier set to True + + + Return: + * A 3d array of the binned bispectrum +""" + + if len(delta.shape) != 3: + raise ValueError("Invalid shape for delta") + try: + delta_k = float(delta_k) + Nk = int(Nk) + except: + raise ValueError() + + if not fourier: + delta = np.fft.rfftn(delta) + N1,N2,N3 = delta.shape + delta_hat_buf = np.empty((N1*N2*N3*2),dtype=np.double) + delta_hat_buf[::2] = delta.real.ravel() + delta_hat_buf[1::2] = delta.imag.ravel() + + size_size = _ffi.sizeof("size_t") + if size_size == 4: + count_buf = np.zeros((Nk,),dtype=np.int32) + elif size_size == 8: + count_buf = np.zeros((Nk,),dtype=np.int64) + else: + raise RuntimeError("Internal error, do not know how to map size_t") + + B_buf = np.zeros((Nk,), dtype=np.double) + + _lib.CosmoTool_compute_powerspectrum( \ + _ffi.cast("double *", delta_hat_buf.ctypes.data), \ + N1, N2, N3, \ + _ffi.cast("size_t *", count_buf.ctypes.data), \ + _ffi.cast("double *", B_buf.ctypes.data), \ + delta_k, \ + Nk) + return count_buf, B_buf[...] + + +if __name__=="__main__": + delta=np.zeros((16,16,16)) + delta[0,0,0]=1 + delta[3,2,1]=1 + b = powerspectrum(delta, 1, 16, fourier=False) + a = bispectrum(delta, 1, 16, fourier=False) + print(a[0].max()) diff --git a/external/cosmotool/python/cosmotool/borg.py b/external/cosmotool/python/cosmotool/borg.py new file mode 100644 index 0000000..fe10e2a --- /dev/null +++ b/external/cosmotool/python/cosmotool/borg.py @@ -0,0 +1,265 @@ +### +### BORG code is from J. Jasche +### +import io +import numpy as np +from numpy import * +import os.path +import array +import glob + +class BorgVolume(object): + + def __init__(self, density, ranges): + self.density = density + self.ranges = ranges + +def build_filelist(fdir): + #builds list of all borg density fields which may be distributed over several directories + + fname_0=glob.glob(fdir[0]+'initial_density_*') + fname_1=glob.glob(fdir[0]+'final_density_*') + + fdir=fdir[1:] #eliminate first element + + for fd in fdir: + fname_0=fname_0+glob.glob(fd+'initial_density_*') + fname_1=fname_1+glob.glob(fd+'final_density_*') + + return fname_0, fname_1 + +def read_borg_vol(BORGFILE): + """ Reading routine for BORG data + """ + + openfile=open(BORGFILE,'rb') + + period=0 + N0=0 + N1=0 + N2=0 + + xmin=0 + xmax=0 + + ymin=0 + ymax=0 + + zmin=0 + zmax=0 + nlines=0 + + while True: + line=openfile.readline() + s=line.rstrip('\n') + r=s.rsplit(' ') + + if size(r)==5 : + if r[0] =="define": + if r[1]=="Lattice" : + N0=int(r[2]) + N1=int(r[3]) + N2=int(r[4]) + + if size(r)==11 : + if r[4] =="BoundingBox": + xmin=float(r[5]) + xmax=float(r[6]) + ymin=float(r[7]) + ymax=float(r[8]) + zmin=float(r[9]) + zmax=float(r[10].rstrip(',')) + + if r[0]=='@1': break + + ranges=[] + ranges.append(xmin) + ranges.append(xmax) + ranges.append(ymin) + ranges.append(ymax) + ranges.append(zmin) + ranges.append(zmax) + + #now read data + data=np.fromfile(openfile, '>f4') + data=data.reshape(N2,N0,N1) + return BorgVolume(data,ranges) + +def read_spec( fname ): + """ Reading routine for ARES spectrum samples + """ + x,y=np.loadtxt( fname ,usecols=(0,1),unpack=True) + + return x , y + + +def read_bias_nmean( fname ): + """ Reading routine for ARES bias data + """ + x,b0,b1,nmean=np.loadtxt( fname ,usecols=(0,1,2,3),unpack=True) + + return x , b0, b1, nmean + +def read_nmean( fname ): + """ Reading routine for BORG bias data + """ + x,nmean=np.loadtxt( fname ,usecols=(0,1),unpack=True) + + return x, nmean + +def get_grid_values(xx,data, ranges): + """ return values at grid positions + """ + xmin=ranges[0] + xmax=ranges[1] + + ymin=ranges[2] + ymax=ranges[3] + + zmin=ranges[4] + zmax=ranges[5] + + Lx= xmax-xmin + Ly= ymax-ymin + Lz= zmax-zmin + + Nx=shape(data)[0] + Ny=shape(data)[1] + Nz=shape(data)[2] + + dx=Lx/float(Nx) + dy=Ly/float(Ny) + dz=Lz/float(Nz) + + idx=(xx[:,0]-xmin)/dx + idy=(xx[:,1]-ymin)/dz + idz=(xx[:,2]-zmin)/dy + + idx=idx.astype(int) + idy=idy.astype(int) + idz=idz.astype(int) + + idflag=np.where( (idx>-1)*(idx-1)*(idy-1)*(idz= 0) { + int next_part = part_list[current_part]; + part_list[current_part] = -1; + while (next_part != -1) { + int p = part_list[next_part]; + part_list[next_part] = current_part; + current_part = next_part; + next_part = p; + } + part_mesh[mid] = current_part; + } +} + +__kernel void dance(__global const BASIC_TYPE *pos, + __global BASIC_TYPE *density, + __global int *part_mesh, __global int *part_list, const int N, const BASIC_TYPE delta, const BASIC_TYPE shift_pos) +{ + int m[NDIM]; + int shifter = 1; + int i; + int first, i_part; + int idx = 0; + + for (i = 0; i < NDIM; i++) { + m[i] = get_global_id(i); + idx += shifter * m[i]; + shifter *= N; + } + + first = 1; + +//BEGIN LOOPER +%(looperFor)s +//END LOOPER + + int idx_dance = 0; + BASIC_TYPE w = 0; +//LOOPER INDEX + int r[NDIM] = { %(looperVariables)s }; +//END LOOPER + + i_part = part_mesh[idx]; + while (i_part != -1) { + BASIC_TYPE w0 = 1; + + for (int d = 0; d < NDIM; d++) { + BASIC_TYPE x; + BASIC_TYPE q; + BASIC_TYPE dx; + + if (CENTERED) + x = pos[i_part*NDIM + d]*delta - shift_pos; + else + x = pos[i_part*NDIM + d]*delta; + q = floor(x); + dx = x - q; + + w0 *= (r[d] == 1) ? dx : ((BASIC_TYPE)1-dx); + } + + i_part = part_list[i_part]; + w += w0; + } + + shifter = 1; + for (i = 0; i < NDIM; i++) { + idx_dance += shifter * ((m[i]+r[i])%%N); + shifter *= N; + } + + density[idx_dance] += w; + + // One dance done. Wait for everybody for the next iteration + barrier(CLK_GLOBAL_MEM_FENCE); +%(looperForEnd)s +} +''' + +class CIC_CL(object): + + def __init__(self, context, ndim=2, ktype=np.float32, centered=False): + global CIC_PREKERNEL, CIC_KERNEL + + translator = {} + if ktype == np.float32: + translator['cicType'] = 'float' + pragmas = '' + elif ktype == np.float64: + translator['cicType'] = 'double' + pragmas = '#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n' + else: + raise ValueError("Invalid ktype") + + # 2 dimensions + translator['ndim'] = ndim + translator['centered'] = '1' if centered else '0' + looperVariables = ','.join(['id%d' % d for d in range(ndim)]) + looperFor = '\n'.join(['for (int id{dim}=0; id{dim} < 2; id{dim}++) {{'.format(dim=d) for d in range(ndim)]) + looperForEnd = '}' * ndim + + kern = pragmas + CIC_PREKERNEL.format(**translator) + (CIC_KERNEL % {'looperVariables': looperVariables, 'looperFor': looperFor, 'looperForEnd':looperForEnd}) + self.kern_code = kern + self.ctx = context + self.queue = cl.CommandQueue(context)#, properties=cl.OUT_OF_ORDER_EXEC_MODE_ENABLE) + self.ktype = ktype + self.ndim = ndim + self.prog = cl.Program(self.ctx, kern).build() + self.centered = centered + + def run(self, particles, Ng, L): + assert particles.strides[1] == self.ktype().itemsize # This is C-ordering + assert particles.shape[1] == self.ndim + + print("Start again") + + ndim = self.ndim + part_pos = cl_array.to_device(self.queue, particles) + part_mesh = cl_array.empty(self.queue, (Ng,)*ndim, np.int32, order='C') + density = cl_array.zeros(self.queue, (Ng,)*ndim, self.ktype, order='C') + part_list = cl_array.empty(self.queue, (particles.shape[0],), np.int32, order='C') + shift_pos = 0.5*L if self.centered else 0 + + if True: + delta = Ng/L + + with time_block("Init pcell array"): + e = self.prog.init_pcell(self.queue, (Ng**ndim,), None, part_mesh.data, np.int32(-1)) + e.wait() + + with time_block("Init idx array"): + e=self.prog.init_pcell(self.queue, (particles.shape[0],), None, part_list.data, np.int32(-1)) + e.wait() + + with time_block("Build indices"): + self.prog.build_indices(self.queue, (particles.shape[0],), None, + part_pos.data, part_mesh.data, part_list.data, np.int32(Ng), self.ktype(delta), self.ktype(shift_pos)) + + if True: + with time_block("Reverse list"): + lastevt = self.prog.reverse_list(self.queue, (Ng**ndim,), None, part_mesh.data, part_list.data) + # We require pmax pass, particles are ordered according to part_idx + + with time_block("dance"): + self.prog.dance(self.queue, (Ng,)*ndim, None, part_pos.data, density.data, part_mesh.data, part_list.data, np.int32(Ng), self.ktype(delta), self.ktype(shift_pos)) + + self.queue.finish() + del part_pos + del part_mesh + del part_list + with time_block("download"): + return density.get() + + +def cl_CIC_Density(particles, Ngrid, Lbox, context=None, periodic=True, centered=False): + """ + cl_CIC_Density(particles (Nx3), Ngrid, Lbox, context=None, periodic=True, centered=False) +""" + if context is None: + context = cl.create_some_context() + + ktype = particles.dtype + if ktype != np.float32 and ktype != np.float64: + raise ValueError("particles may only be float32 or float64") + + if len(particles.shape) != 2 or particles.shape[1] != 3: + raise ValueError("particles may only be a Nx3 array") + + cic = CIC_CL(context, ndim=3, centered=centered, ktype=ktype) + + return cic.run(particles, Ngrid, Lbox) diff --git a/external/cosmotool/python/cosmotool/config.py.in b/external/cosmotool/python/cosmotool/config.py.in new file mode 100644 index 0000000..7603b85 --- /dev/null +++ b/external/cosmotool/python/cosmotool/config.py.in @@ -0,0 +1 @@ +install_prefix="@CMAKE_INSTALL_PREFIX@" diff --git a/external/cosmotool/python/cosmotool/ctpv.py b/external/cosmotool/python/cosmotool/ctpv.py new file mode 100644 index 0000000..22a9d22 --- /dev/null +++ b/external/cosmotool/python/cosmotool/ctpv.py @@ -0,0 +1,211 @@ +import numpy as np +from contextlib import contextmanager + +class ProgrammableParticleLoad(object): + + @staticmethod + def main_script(source, particles, aname="default", aux=None): + import vtk + from vtk.util import numpy_support as ns + + out = source.GetOutput() + vv = vtk.vtkPoints() + + assert len(particles.shape) == 2 + assert particles.shape[1] == 3 + + vv.SetData(ns.numpy_to_vtk(np.ascontiguousarray(particles.astype(np.float64)), deep=1)) + vv.SetDataTypeToDouble() + + out.Allocate(1,1) + out.SetPoints(vv) + + if aux is not None: + for n,a in aux: + a_vtk = ns.numpy_to_vtk( + np.ascontiguousarray(a.astype(np.float64) + ), + deep=1) + a_vtk.SetName(n) + out.GetPointData().AddArray(a_vtk) + + out.InsertNextCell(vtk.VTK_VERTEX, particles.shape[0], range(particles.shape[0])) + + @staticmethod + def request_information(source): + pass + + + +class ProgrammableParticleHistoryLoad(object): + + @staticmethod + def main_script(source, particles, velocities=None, aname="default",addtime=False): + import vtk + from vtk.util import numpy_support as ns + + out = source.GetOutput() + vv = vtk.vtkPoints() + + assert len(particles.shape) == 3 + assert particles.shape[2] == 3 + + if not velocities is None: + for i,j in zip(velocities.shape,particles.shape): + assert i==j + + Ntime,Npart,_ = particles.shape + + vv.SetData(ns.numpy_to_vtk(np.ascontiguousarray(particles.reshape((Ntime*Npart,3)).astype(np.float64)), deep=1)) + vv.SetDataTypeToDouble() + + out.Allocate(1,1) + out.SetPoints(vv) + + if not velocities is None: + print("Adding velocities") + vel_vtk = ns.numpy_to_vtk(np.ascontiguousarray(velocities.reshape((Ntime*Npart,3)).astype(np.float64)), deep=1) + vel_vtk.SetName("velocities") + out.GetPointData().AddArray(vel_vtk) + + if addtime: + timearray = np.arange(Ntime)[:,None].repeat(Npart, axis=1).reshape(Ntime*Npart) + timearray = ns.numpy_to_vtk(np.ascontiguousarray(timearray.astype(np.float64)), deep=1) + timearray.SetName("timearray") + out.GetPointData().AddArray(timearray) + + out.InsertNextCell(vtk.VTK_VERTEX, particles.shape[0], range(particles.shape[0])) + for p in range(Npart): + out.InsertNextCell(vtk.VTK_LINE, Ntime, range(p, p + Npart*Ntime, Npart) ) + + + @staticmethod + def request_information(source): + pass + + +class ProgrammableDensityLoad(object): + + @staticmethod + def main_script(source, density, extents=None, aname="default"): + import vtk + from vtk.util import numpy_support + + if len(density.shape) > 3: + _, Nx, Ny, Nz = density.shape + else: + Nx, Ny, Nz = density.shape + + ido = source.GetOutput() + ido.SetDimensions(Nx, Ny, Nz) + if not extents is None: + origin = extents[:6:2] + spacing = (extents[1]-extents[0])/Nx, (extents[3]-extents[2])/Ny, (extents[5]-extents[4])/Nz + else: + origin = (-1, -1, -1) + spacing = 2.0 / Nx, 2.0/Ny, 2.0/Nz + + ido.SetOrigin(*origin) + ido.SetSpacing(*spacing) + ido.SetExtent([0,Nx-1,0,Ny-1,0,Nz-1]) + if len(density.shape) > 3 and density.shape[0] == 3: + N = Nx*Ny*Nz + density = density.transpose().astype(np.float64).reshape((N,3)) + arr = numpy_support.numpy_to_vtk(density, deep=1) + else: + arr = numpy_support.numpy_to_vtk(density.transpose().astype(np.float64).ravel(), deep=1) + arr.SetName(aname) + ido.GetPointData().AddArray(arr) + + @staticmethod + def request_information(source, density=None, dims=None): + import vtk + + Nx = Ny = Nz = None + if not density is None: + Nx, Ny, Nz = density.shape + elif not dims is None: + Nx, Ny, Nz = dims + else: + raise ValueError("Need at least a density or dims") + + source.GetExecutive().GetOutputInformation(0).Set(vtk.vtkStreamingDemandDrivenPipeline.WHOLE_EXTENT(), 0, Nx-1, 0, Ny-1, 0, Nz-1) + + @staticmethod + def prepare_timesteps_info(algorithm, timesteps): + + def SetOutputTimesteps(algorithm, timesteps): + executive = algorithm.GetExecutive() + outInfo = executive.GetOutputInformation(0) + outInfo.Remove(executive.TIME_STEPS()) + for timestep in timesteps: + outInfo.Append(executive.TIME_STEPS(), timestep) + outInfo.Remove(executive.TIME_RANGE()) + outInfo.Append(executive.TIME_RANGE(), timesteps[0]) + outInfo.Append(executive.TIME_RANGE(), timesteps[-1]) + + SetOutputTimesteps(algorithm, timesteps) + + @staticmethod + @contextmanager + def get_timestep(algorithm): + + def GetUpdateTimestep(algorithm): + """Returns the requested time value, or None if not present""" + executive = algorithm.GetExecutive() + outInfo = executive.GetOutputInformation(0) + if not outInfo.Has(executive.UPDATE_TIME_STEP()): + return None + return outInfo.Get(executive.UPDATE_TIME_STEP()) + + # This is the requested time-step. This may not be exactly equal to the + # timesteps published in RequestInformation(). Your code must handle that + # correctly + req_time = GetUpdateTimestep(algorithm) + + output = algorithm.GetOutput() + + yield req_time + + # Now mark the timestep produced. + output.GetInformation().Set(output.DATA_TIME_STEP(), req_time) + + +def load_borg(pdo, restart_name, mcmc_name, info=False, aname="BORG"): + import h5py as h5 + + with h5.File(restart_name) as f: + N0 = f["/info/scalars/N0"][:] + N1 = f["/info/scalars/N1"][:] + N2 = f["/info/scalars/N2"][:] + L0 = f["/info/scalars/L0"][:] + L1 = f["/info/scalars/L1"][:] + L2 = f["/info/scalars/L2"][:] + c0 = f["/info/scalars/corner0"][:] + c1 = f["/info/scalars/corner1"][:] + c2 = f["/info/scalars/corner2"][:] + + if not info: + with h5.File(mcmc_name) as f: + d = f["/scalars/BORG_final_density"][:]+1 + + ProgrammableDensityLoad.main_script(pdo, d, extents=[c0,c0+L0,c1,c1+L1,c2,c2+L2], aname=aname) + else: + ProgrammableDensityLoad.request_information(pdo, dims=[N0,N1,N2]) + + +def load_borg_galaxies(pdo, restart_name, cid=0, info=False, aname="Galaxies"): + import h5py as h5 + + with h5.File(restart_name) as f: + gals = f['/info/galaxy_catalog_%d/galaxies' % cid] + ra = gals['phi'][:] + dec = gals['theta'][:] + r = gals['r'][:] + + if not info: + x = r * np.cos(ra)*np.cos(dec) + y = r * np.sin(ra)*np.cos(dec) + z = r * np.sin(dec) + parts = np.array([x,y,z]).transpose() + ProgrammableParticleLoad.main_script(pdo, parts) diff --git a/external/cosmotool/python/cosmotool/fftw.py b/external/cosmotool/python/cosmotool/fftw.py new file mode 100644 index 0000000..57f43d1 --- /dev/null +++ b/external/cosmotool/python/cosmotool/fftw.py @@ -0,0 +1,45 @@ +import pyfftw +import multiprocessing +import numpy as np +import numexpr as ne + +class CubeFT(object): + def __init__(self, L, N, max_cpu=-1, width=32): + + if width==32: + fourier_type='complex64' + real_type='float32' + elif width==64: + fourier_type='complex128' + real_type='float64' + else: + raise ValueError("Invalid bitwidth (must be 32 or 64)") + + self.N = N + self.align = pyfftw.simd_alignment + self.L = float(L) + self.max_cpu = multiprocessing.cpu_count() if max_cpu < 0 else max_cpu + self._dhat = pyfftw.n_byte_align_empty((self.N,self.N,self.N//2+1), self.align, dtype=fourier_type) + self._density = pyfftw.n_byte_align_empty((self.N,self.N,self.N), self.align, dtype=real_type) + self._irfft = pyfftw.FFTW(self._dhat, self._density, axes=(0,1,2), direction='FFTW_BACKWARD', threads=self.max_cpu)#, normalize_idft=False) + self._rfft = pyfftw.FFTW(self._density, self._dhat, axes=(0,1,2), threads=self.max_cpu) #, normalize_idft=False) + + def rfft(self): + return ne.evaluate('c*a', out=self._dhat, local_dict={'c':self._rfft(normalise_idft=False),'a':(self.L/self.N)**3}, casting='unsafe') + + def irfft(self): + return ne.evaluate('c*a', out=self._density, local_dict={'c':self._irfft(normalise_idft=False),'a':(1/self.L)**3}, casting='unsafe') + + def get_dhat(self): + return self._dhat + def set_dhat(self, in_dhat): + self._dhat[:] = in_dhat + dhat = property(get_dhat, set_dhat, None) + + def get_density(self): + return self._density + def set_density(self, d): + self._density[:] = d + density = property(get_density, set_density, None) + + diff --git a/external/cosmotool/python/cosmotool/grafic.py b/external/cosmotool/python/cosmotool/grafic.py new file mode 100644 index 0000000..04eb5b6 --- /dev/null +++ b/external/cosmotool/python/cosmotool/grafic.py @@ -0,0 +1,108 @@ +import struct +import numpy as np + +def readGrafic(filename): + """This function reads a grafic file. + + Arguments: + filename (str): the path to the grafic file + + Returns: + a tuple containing: + * the array held in the grafic file + * the size of the box + * the scale factor + * the mean matter density :math:`\Omega_\mathrm{m}` + * the dark energy density :math:`\Omega_\Lambda` + * the hubble constant, relative to 100 km/s/Mpc + * xoffset + * yoffset + * zoffset + """ + with open(filename, mode="rb") as f: + p = struct.unpack("IIIIffffffffI", f.read(4*11 + 2*4)) + checkPoint0, Nx, Ny, Nz, delta, xoff, yoff, zoff, scalefac, omega0, omegalambda0, h, checkPoint1 = p + if checkPoint0 != checkPoint1 or checkPoint0 != 4*11: + raise ValueError("Invalid unformatted access") + + a = np.empty((Nx,Ny,Nz), dtype=np.float32) + + BoxSize = delta * Nx * h + xoff *= h + yoff *= h + zoff *= h + + checkPoint = 4*Ny*Nz + for i in range(Nx): + checkPoint = struct.unpack("I", f.read(4))[0] + if checkPoint != 4*Ny*Nz: + raise ValueError("Invalid unformatted access") + + a[i, :, :] = np.fromfile(f, dtype=np.float32, count=Ny*Nz).reshape((Ny, Nz)) + + checkPoint = struct.unpack("I", f.read(4))[0] + if checkPoint != 4*Ny*Nz: + raise ValueError("Invalid unformatted access") + + return a, BoxSize, scalefac, omega0, omegalambda0, h, xoff, yoff,zoff + +def writeGrafic(filename, field, BoxSize, scalefac, **cosmo): + + with open(filename, mode="wb") as f: + checkPoint = 4*11 + + Nx,Ny,Nz = field.shape + delta = BoxSize/Nx/cosmo['h'] + bad = 0.0 + + f.write(struct.pack("IIIIffffffffI", checkPoint, + Nx, Ny, Nz, + delta, + bad, bad, bad, + scalefac, + cosmo['omega_M_0'], cosmo['omega_lambda_0'], 100*cosmo['h'], checkPoint)) + checkPoint = 4*Ny*Nz + field = field.reshape(field.shape, order='F') + for i in range(Nx): + f.write(struct.pack("I", checkPoint)) + f.write(field[i].astype(np.float32).tostring()) + f.write(struct.pack("I", checkPoint)) + + +def writeWhitePhase(filename, field): + + with open(filename, mode="wb") as f: + Nx,Ny,Nz = field.shape + + checkPoint = 4*4 + f.write(struct.pack("IIIIII", checkPoint, Nx, Ny, Nz, 0, checkPoint)) + + field = field.reshape(field.shape, order='F') + checkPoint = struct.pack("I", 4*Ny*Nz) + for i in range(Nx): + f.write(checkPoint) + f.write(field[i].astype(np.float32).tostring()) + f.write(checkPoint) + + +def readWhitePhase(filename): + with open(filename, mode="rb") as f: + _, Nx, Ny, Nz, _, _ = struct.unpack("IIIIII", f.read(4*4+2*4)) + + a = np.empty((Nx,Ny,Nz), dtype=np.float32) + + checkPoint_ref = 4*Ny*Nz + + for i in range(Nx): + if struct.unpack("I", f.read(4))[0] != checkPoint_ref: + raise ValueError("Invalid unformatted access") + + b = np.fromfile(f, dtype=np.float32, count=Ny*Nz).reshape((Ny, Nz)) + if i==0: + print(b) + a[i, : ,:] = b + + if struct.unpack("I", f.read(4))[0] != checkPoint_ref: + raise ValueError("Invalid unformatted access") + + return a diff --git a/external/cosmotool/python/cosmotool/simu.py b/external/cosmotool/python/cosmotool/simu.py new file mode 100644 index 0000000..2389926 --- /dev/null +++ b/external/cosmotool/python/cosmotool/simu.py @@ -0,0 +1,154 @@ +import warnings +from _cosmotool import * + +class SimulationBare(PySimulationBase): + + def __init__(self, *args): + if len(args) == 0: + return + + if not isinstance(args[0], PySimulationBase): + raise TypeError("Simulation object to mirror must be a PySimulationBase") + s = args[0] + + self.positions = [q.copy() for q in s.getPositions()] if s.getPositions() is not None else None + self.velocities = [q.copy() for q in s.getVelocities()] if s.getVelocities() is not None else None + self.identifiers = s.getIdentifiers().copy() if s.getIdentifiers() is not None else None + self.types = s.getTypes().copy() if s.getTypes() is not None else None + self.boxsize = s.getBoxsize() + self.time = s.getTime() + self.Hubble = s.getHubble() + self.Omega_M = s.getOmega_M() + self.Omega_Lambda = s.getOmega_Lambda() + try: + self.masses = s.getMasses().copy() if s.getMasses() is not None else None + except Exception as e: + warnings.warn("Unexpected exception: " + repr(e)) + + + def merge(self, other): + + def _safe_merge(a, b): + if b is not None: + if a is not None: + a = [np.append(q, r) for q,r in zip(a,b)] + else: + a = b + return a + + def _safe_merge0(a, b): + if b is not None: + if a is not None: + a = np.append(a, b) + else: + a = b + return a + + + assert self.time == other.getTime() + assert self.Hubble == other.getHubble() + assert self.boxsize == other.getBoxsize() + assert self.Omega_M == other.getOmega_M() + assert self.Omega_Lambda == other.getOmega_Lambda() + + self.positions = _safe_merge(self.positions, other.getPositions()) + self.velocities = _safe_merge(self.velocities, other.getVelocities()) + self.identifiers = _safe_merge0(self.identifiers, other.getIdentifiers()) + self.types = _safe_merge0(self.types, other.getTypes()) + try: + self.masses = _safe_merge0(self.masses, other.getMasses()) + except Exception as e: + warnings.warn("Unexpected exception: " + repr(e)); + self.masses = None + + def getTypes(self): + return self.types + + def getPositions(self): + return self.positions + + def getVelocities(self): + return self.velocities + + def getIdentifiers(self): + return self.identifiers + + def getMasses(self): + return self.masses + + def getTime(self): + return self.time + + def getHubble(self): + return self.Hubble + + def getBoxsize(self): + return self.boxsize + + def getOmega_M(self): + return self.Omega_M + + def getOmega_Lambda(self): + return self.Omega_Lambda + + +def simpleWriteGadget(filename, positions, boxsize=1.0, Hubble=100, Omega_M=0.30, time=1, velocities=None, identifiers=None): + + s = SimulationBare() + + s.positions = positions + + if velocities: + s.velocities = velocities + else: + s.velocities = [np.zeros(positions[0].size,dtype=np.float32)]*3 + + if identifiers: + s.identifiers = identifiers + else: + s.identifiers = np.arange(positions[0].size, dtype=np.int64) + + s.Hubble = Hubble + s.time = time + s.Omega_M = Omega_M + s.Omega_Lambda = 1-Omega_M + s.boxsize = boxsize + + writeGadget(filename, s) + +def loadRamsesAll(basepath, snapshot_id, **kwargs): + """This function loads an entire ramses snapshot in memory. The keyword arguments are the one accepted + by cosmotool.loadRamses + + Args: + basepath (str): The base path of the snapshot (i.e. the directory holding the output_XXXXX directories) + snapshot_id (int): The identifier of the snapshot to load. + + Keyword args: + verboseMulti (bool): If true, print some progress information on loading multiple files + + See Also: + cosmotool.loadRamses + + """ + cpu_id = 0 + output = None + verbose = kwargs.get('verboseMulti',False) + new_kw = dict(kwargs) + if 'verboseMulti' in new_kw: + del new_kw['verboseMulti'] + while True: + base = "%s/output_%05d" % (basepath,snapshot_id) + if verbose: + print("Loading sub-snapshot %s (cpu_id=%d)" % (base,cpu_id)) + s = loadRamses(base, snapshot_id, cpu_id, **new_kw) + if s == None: + break + if output == None: + output = SimulationBare(s) + else: + output.merge(s) + + cpu_id += 1 + + return output diff --git a/external/cosmotool/python/cosmotool/smooth.py b/external/cosmotool/python/cosmotool/smooth.py new file mode 100644 index 0000000..418ca5d --- /dev/null +++ b/external/cosmotool/python/cosmotool/smooth.py @@ -0,0 +1,109 @@ +from .config import install_prefix +import subprocess +import os +try: + from tempfile import TemporaryDirectory +except: + from backports.tempfile import TemporaryDirectory + +import h5py as h5 +import numpy as np +import weakref + +def smooth_particle_density( + position, + velocities=None, + radius=1e6, + boxsize=None, + resolution=128, + center=None, tmpprefix=None ): + """Use adaptive smoothing to produce density and momentum fields. + The algorithm is originally described in [1]. + + Parameters: + position : numpy array NxQ + the particle positions + if Q==3, only positions. Q==6 means full space phase + velocities : Optional numpy array Nx3. + It is only optional if the above Q is 6. + radius : float + Maximum radius to which we need to compute fields + boxsize : float + Size of the box for the generated fields + resolution : int + Resolution of the output boxes + center : list of 3 floats + Center of the new box. It depends on the convention + for particles. If those are between [0, L], then [0,0,0] + is correct. If those are [-L/2,L/2] then you should set + [L/2,L/2,L/2]. + tmpprefix : string + prefix of the temporary directory that will be used. + It needs to have a lot of space available. By default + '/tmp/ will be typically used. + + + Returns + ------- + dictionnary + The dict has two entries: 'rho' for the density, and 'p' for the momenta. + Once the dictionary is garbage collected all temporary files and directories + will be cleared automatically. + + + Raises + ------ + ValueError + if arguments are invalid + + + .. [1] S. Colombi, M. Chodorowski, + "Cosmic velocity-gravity in redshift space", MNRAS, 2007, 375, 1 + """ + if len(position.shape) != 2: + raise ValueError("Invalid position array shape") + + if velocities is None: + if position.shape[1] != 6: + raise ValueError("Position must be phase space if no velocities are given") + + + if boxsize is None: + raise ValueError("Need a boxsize") + + cx,cy,cz=center + tmpdir = TemporaryDirectory(prefix=tmpprefix) + h5_file = os.path.join(tmpdir.name, 'particles.h5') + with h5.File(h5_file, mode="w") as f: + data = f.create_dataset('particles', shape=(position.shape[0],7), dtype=np.float32) + data[:,:3] = position[:,:3] + if velocities is not None: + data[:,3:6] = velocities[:,:3] + else: + data[:,3:6] = position[:,3:] + data[:,6] = 1 + + + ret = \ + subprocess.run([ + os.path.join(install_prefix,'bin','simple3DFilter'), + h5_file, + str(radius), + str(boxsize), + str(resolution), + str(cx), str(cy), str(cz) + ], cwd=tmpdir.name) + + + f0 = h5.File(os.path.join(tmpdir.name,'fields.h5'), mode="r") + def cleanup_f0(): + f0.close() + tmpdir.cleanup() + + class Dict(dict): + pass + + t = Dict(rho=f0['density'], p=[f0['p0'], f0['p1'], f0['p2']]) + t._tmpdir_=tmpdir + weakref.finalize(t, cleanup_f0) + return t diff --git a/external/cosmotool/python/cosmotool/timing.py b/external/cosmotool/python/cosmotool/timing.py new file mode 100644 index 0000000..cad8463 --- /dev/null +++ b/external/cosmotool/python/cosmotool/timing.py @@ -0,0 +1,53 @@ +import time +from contextlib import contextmanager + +@contextmanager +def time_block(name): + """ + This generator measure the time taken by a step, and prints the result + in second to the console. + + Arguments: + name (str): prefix to print + """ + ts = time.time() + yield + te = time.time() + + print('%s %2.2f sec' % (name, te-ts)) + +def timeit(method): + """This decorator add a timing request for each call to the decorated function. + + Arguments: + method (function): the method to decorate + """ + + def timed(*args, **kw): + ts = time.time() + result = method(*args, **kw) + te = time.time() + + print('%r (%r, %r) %2.2f sec' % (method.__name__, args, kw, te-ts)) + return result + + return timed + +def timeit_quiet(method): + """This decorator add a timing request for each call to the decorated function. + Same as cosmotool.timeit_ but is quieter by not printing the values of the arguments. + + Arguments: + method (function): the method to decorate + """ + + def timed(*args, **kw): + ts = time.time() + result = method(*args, **kw) + te = time.time() + + print('%r %2.2f sec' % (method.__name__, te-ts)) + return result + + return timed + diff --git a/external/cosmotool/python/cppHelper.hpp b/external/cosmotool/python/cppHelper.hpp new file mode 100644 index 0000000..4f838a0 --- /dev/null +++ b/external/cosmotool/python/cppHelper.hpp @@ -0,0 +1,45 @@ +#include "config.hpp" +#include "fortran.hpp" + +static void customCosmotoolHandler() +{ + try { + if (PyErr_Occurred()) + ; + else + throw; + } catch (const CosmoTool::InvalidArgumentException& e) { + PyErr_SetString(PyExc_ValueError, "Invalid argument"); + } catch (const CosmoTool::NoSuchFileException& e) { + PyErr_SetString(PyExc_IOError, "No such file"); + } catch (const CosmoTool::InvalidUnformattedAccess& e) { + PyErr_SetString(PyExc_RuntimeError, "Invalid fortran unformatted access"); + } catch (const CosmoTool::Exception& e) { + PyErr_SetString(PyExc_RuntimeError, e.what()); + } catch (const std::bad_alloc& exn) { + PyErr_SetString(PyExc_MemoryError, exn.what()); + } catch (const std::bad_cast& exn) { + PyErr_SetString(PyExc_TypeError, exn.what()); + } catch (const std::domain_error& exn) { + PyErr_SetString(PyExc_ValueError, exn.what()); + } catch (const std::invalid_argument& exn) { + PyErr_SetString(PyExc_ValueError, exn.what()); + } catch (const std::ios_base::failure& exn) { + // Unfortunately, in standard C++ we have no way of distinguishing EOF + // from other errors here; be careful with the exception mask + PyErr_SetString(PyExc_IOError, exn.what()); + } catch (const std::out_of_range& exn) { + // Change out_of_range to IndexError + PyErr_SetString(PyExc_IndexError, exn.what()); + } catch (const std::overflow_error& exn) { + PyErr_SetString(PyExc_OverflowError, exn.what()); + } catch (const std::range_error& exn) { + PyErr_SetString(PyExc_ArithmeticError, exn.what()); + } catch (const std::underflow_error& exn) { + PyErr_SetString(PyExc_ArithmeticError, exn.what()); + } catch (const std::exception& exn) { + PyErr_SetString(PyExc_RuntimeError, exn.what()); + } catch(...) { + PyErr_SetString(PyExc_RuntimeError, "Unknown exception"); + } +} diff --git a/external/cosmotool/python/project_tool.hpp b/external/cosmotool/python/project_tool.hpp new file mode 100644 index 0000000..c185c44 --- /dev/null +++ b/external/cosmotool/python/project_tool.hpp @@ -0,0 +1,120 @@ + +// Only in 3d + +template +static T project_tool(T *vertex_value, T *u, T *u0) +{ + T ret0 = 0; + for (unsigned int i = 0; i < 8; i++) + { + unsigned int c[3] = { i & 1, (i>>1)&1, (i>>2)&1 }; + int epsilon[3]; + T ret = 0; + + for (int q = 0; q < 3; q++) + epsilon[q] = 2*c[q]-1; + + for (int q = 0; q < ProdType::numProducts; q++) + ret += ProdType::product(u, u0, epsilon, q); + ret *= vertex_value[i]; + ret0 += ret; + } + + return ret0; +} + + +template +static inline T get_u0(const T& u0, int epsilon) +{ + return (1-epsilon)/2 + epsilon*u0; +// return (epsilon > 0) ? u0 : (1-u0); +} + +template +struct ProductTerm0 +{ + static const int numProducts = 1; + + static inline T product(T *u, T *u0, int *epsilon, int q) + { + T a = 1; + + for (unsigned int r = 0; r < 3; r++) + a *= get_u0(u0[r], epsilon[r]); + return a; + } +}; + + +template +struct ProductTerm1 +{ + static const int numProducts = 3; + + static T product(T *u, T *u0, int *epsilon, int q) + { + T a = 1; + T G[3]; + + for (unsigned int r = 0; r < 3; r++) + { + G[r] = get_u0(u0[r], epsilon[r]); + } + + T F[3] = { G[1]*G[2], G[0]*G[2], G[0]*G[1] }; + + return F[q] * u[q] * epsilon[q]; + } +}; + +template +struct ProductTerm2 +{ + static const int numProducts = 3; + + static inline T product(T *u, T *u0, int *epsilon, int q) + { + T a = 1; + T G[3]; + + for (unsigned int r = 0; r < 3; r++) + { + G[r] = get_u0(u0[r], epsilon[r]); + } + + T F[3] = { epsilon[1]*epsilon[2]*u[1]*u[2], epsilon[0]*epsilon[2]*u[0]*u[2], epsilon[0]*epsilon[1]*u[0]*u[1] }; + + return F[q] * G[q]; + } +}; + + + +template +struct ProductTerm3 +{ + static const int numProducts = 1; + + static inline T product(T *u, T *u0, int *epsilon, int q) + { + return epsilon[0]*epsilon[1]*epsilon[2]*u[0]*u[1]*u[2]; + } +}; + + +template +T compute_projection(T *vertex_value, T *u, T *u0, T rho) +{ + T ret; + + ret = project_tool >(vertex_value, u, u0) * rho; + ret += project_tool >(vertex_value, u, u0) * rho * rho / 2; + ret += project_tool >(vertex_value, u, u0) * rho * rho * rho / 3; + ret += project_tool >(vertex_value, u, u0) * rho * rho * rho * rho / 4; + return ret; +} + +template +double compute_projection(double *vertex_value, double *u, double *u0, double rho); + diff --git a/external/cosmotool/python/safe_gadget.hpp b/external/cosmotool/python/safe_gadget.hpp new file mode 100644 index 0000000..3de6a67 --- /dev/null +++ b/external/cosmotool/python/safe_gadget.hpp @@ -0,0 +1,29 @@ +#include "config.hpp" +#include "loadGadget.hpp" +#include + +static inline +CosmoTool::SimuData *loadGadgetMulti_safe(const std::string& fname, int flags, int gadgetFormat) +{ + try + { + return CosmoTool::loadGadgetMulti(fname.c_str(), -1, flags, gadgetFormat); + } + catch (const CosmoTool::Exception& e) + { + return 0; + } +} + + +static inline +CosmoTool::SimuData **alloc_simudata(int n) +{ + return new CosmoTool::SimuData *[n]; +} + +static inline +void del_simudata(CosmoTool::SimuData **s) +{ + delete[] s; +} diff --git a/external/cosmotool/python_sample/build_2lpt_ksz.py b/external/cosmotool/python_sample/build_2lpt_ksz.py new file mode 100644 index 0000000..f6585d2 --- /dev/null +++ b/external/cosmotool/python_sample/build_2lpt_ksz.py @@ -0,0 +1,111 @@ +import healpy as hp +import numpy as np +import cosmotool as ct +import argparse +import h5py as h5 +from matplotlib import pyplot as plt + +Mpc=3.08e22 +rhoc = 1.8864883524081933e-26 # m^(-3) +sigmaT = 6.6524e-29 +mp = 1.6726e-27 +lightspeed = 299792458. +v_unit = 1e3 # Unit of 1 km/s +T_cmb=2.725 +h = 0.71 +Y = 0.245 #The Helium abundance +Omega_matter = 0.26 +Omega_baryon=0.0445 + +G=6.67e-11 +MassSun=2e30 +frac_electron = 1.0 # Hmmmm +frac_gas_galaxy = 0.14 +mu = 1/(1-0.5*Y) + +baryon_fraction = Omega_baryon / Omega_matter + +ksz_normalization = T_cmb*sigmaT*v_unit/(lightspeed*mu*mp) * baryon_fraction +rho_mean_matter = Omega_matter * (3*(100e3/Mpc)**2/(8*np.pi*G)) + +parser=argparse.ArgumentParser(description="Generate Skymaps from CIC maps") +parser.add_argument('--boxsize', type=float, required=True) +parser.add_argument('--Nside', type=int, default=128) +parser.add_argument('--base_h5', type=str, required=True) +parser.add_argument('--base_fig', type=str, required=True) +parser.add_argument('--start', type=int, required=True) +parser.add_argument('--end', type=int, required=True) +parser.add_argument('--step', type=int, required=True) +parser.add_argument('--minval', type=float, default=-0.5) +parser.add_argument('--maxval', type=float, default=0.5) +parser.add_argument('--depth_min', type=float, default=10) +parser.add_argument('--depth_max', type=float, default=60) +parser.add_argument('--iid', type=int, default=0) +parser.add_argument('--ksz_map', type=str, required=True) +args = parser.parse_args() + +L = args.boxsize +Nside = args.Nside + +def build_sky_proj(density, dmax=60.,dmin=0,iid=0): + + N = density.shape[0] + ix = (np.arange(N)-0.5)*L/N - 0.5 * L + + + dist2 = (ix[:,None,None]**2 + ix[None,:,None]**2 + ix[None,None,:]**2) + + flux = density.transpose().astype(ct.DTYPE) # / dist2 + dmax=N*dmax/L + dmin=N*dmin/L + if iid == 0: + shifter = np.array([0.5,0.5,0.5]) + else: + shifter = np.array([0.,0.,0.]) + + projsky1 = ct.spherical_projection(Nside, flux, dmin, dmax, integrator_id=iid, shifter=shifter, progress=1) + + return projsky1*L/N + +def build_unit_vectors(N): + ii = np.arange(N)*L/N - 0.5*L + d = np.sqrt(ii[:,None,None]**2 + ii[None,:,None]**2 + ii[None,None,:]**2) + d[N/2,N/2,N/2] = 1 + ux = ii[:,None,None] / d + uy = ii[None,:,None] / d + uz = ii[None,None,:] / d + + return ux,uy,uz + +for i in xrange(args.start,args.end,args.step): + ff=plt.figure(1) + plt.clf() + with h5.File(args.base_h5 % i, mode="r") as f: + p = f['velocity'][:] + davg = np.average(np.average(np.average(f['density'][:],axis=0),axis=0),axis=0) + p /= davg # Now we have momentum scaled to the mean density + + # Rescale by Omega_b / Omega_m + p = p.astype(np.float64) + print p.max(), p.min(), ksz_normalization, rho_mean_matter + p *= ksz_normalization*rho_mean_matter + p *= 1e6 # Put it in uK + p *= -1 # ksz has a minus + + ux,uy,uz = build_unit_vectors(p.shape[0]) + pr = p[:,:,:,0] * ux + p[:,:,:,1] * uy + p[:,:,:,2] * uz + print p.max(), p.min() + print pr.max()*Mpc, pr.min()*Mpc + + @ct.timeit_quiet + def run_proj(): + return build_sky_proj(pr*Mpc, dmin=args.depth_min,dmax=args.depth_max,iid=args.iid) + + proj = run_proj() + + hp.write_map(args.ksz_map % i, proj) + + hp.mollview(proj, fig=1, coord='CG', cmap=plt.cm.coolwarm, title='Sample %d' % i, min=args.minval, + max=args.maxval) + + ff.savefig(args.base_fig % i) diff --git a/external/cosmotool/python_sample/build_2lpt_skymap.py b/external/cosmotool/python_sample/build_2lpt_skymap.py new file mode 100644 index 0000000..0e421c3 --- /dev/null +++ b/external/cosmotool/python_sample/build_2lpt_skymap.py @@ -0,0 +1,76 @@ +import matplotlib +matplotlib.use('Agg') +import healpy as hp +import numpy as np +import cosmotool as ct +import argparse +import h5py as h5 +from matplotlib import pyplot as plt + +parser=argparse.ArgumentParser(description="Generate Skymaps from CIC maps") +parser.add_argument('--boxsize', type=float, required=True) +parser.add_argument('--Nside', type=int, default=128) +parser.add_argument('--base_cic', type=str, required=True) +parser.add_argument('--base_fig', type=str, required=True) +parser.add_argument('--start', type=int, required=True) +parser.add_argument('--end', type=int, required=True) +parser.add_argument('--step', type=int, required=True) +parser.add_argument('--minval', type=float, default=0) +parser.add_argument('--maxval', type=float, default=4) +parser.add_argument('--depth_min', type=float, default=10) +parser.add_argument('--depth_max', type=float, default=60) +parser.add_argument('--iid', type=int, default=0) +parser.add_argument('--proj_cat', type=bool, default=False) +args = parser.parse_args() + +#INDATA="/nethome/lavaux/Copy/PlusSimulation/BORG/Input_Data/2m++.npy" +INDATA="2m++.npy" +tmpp = np.load(INDATA) + +L = args.boxsize +Nside = args.Nside + +def build_sky_proj(density, dmax=60.,dmin=0,iid=0): + + N = density.shape[0] + ix = (np.arange(N)-0.5)*L/N - 0.5 * L + +# dist2 = (ix[:,None,None]**2 + ix[None,:,None]**2 + ix[None,None,:]**2) + + flux = density.transpose().astype(ct.DTYPE) # / dist2 + dmax=N*dmax/L + dmin=N*dmin/L + if iid == 0: + shifter = np.array([0.5,0.5,0.5]) + else: + shifter = np.array([0.,0.,0.]) + + projsky1 = ct.spherical_projection(Nside, flux, dmin, dmax, integrator_id=iid, shifter=shifter) + + return projsky1*L/N + +l,b = tmpp['gal_long'],tmpp['gal_lat'] + +l = np.radians(l) +b = np.pi/2 - np.radians(b) + +dcmb = tmpp['velcmb']/100. + +idx = np.where((dcmb>args.depth_min)*(dcmb100*dmin)*(cat['distance'] dmax: + continue + + Lgal = DA**2*10**(0.4*(tmpp_cat['Msun']-i['K2MRS']+25)) + + M_K=i['K2MRS']-5*np.log10(DA)-25 + # Skip too bright galaxies + if M_K < bright: + continue + + profiler = ksz.KSZ_Isothermal(Lgal, x, y=y, sculpt=sculpt) + + idx0 = hp.query_disc(Nside, (x0,y0,z0), 3*profiler.rGalaxy/DA) + + xp1 = xp[idx0] + yp1 = yp[idx0] + zp1 = zp[idx0] + N2_1 = N2[idx0] + + cos_theta = ne.evaluate('(x0*xp1+y0*yp1+z0*zp1)/(sqrt(x0**2+y0**2+z0**2)*(N2_1))') + + idx,idx_masked,m = profiler.projected_profile(cos_theta, DA) + idx = idx0[idx] + idx_masked = idx0[idx_masked] + ksz_template[idx] += m + ksz_mask[idx_masked] = 0 + if do_hubble: + ksz_hubble_template[idx] += m*DA + + ne.evaluate('ksz_template*ksz_normalization', out=ksz_template) + + result =ksz_template, ksz_mask + if do_hubble: + ne.evaluate('ksz_hubble_template*ksz_normalization', out=ksz_hubble_template) + return result + ( ksz_hubble_template,) + else: + return result + +def get_args(): + parser=argparse.ArgumentParser(description="Generate Skymaps from CIC maps") + parser.add_argument('--Nside', type=int, default=128) + parser.add_argument('--minval', type=float, default=-0.5) + parser.add_argument('--maxval', type=float, default=0.5) + parser.add_argument('--depth_min', type=float, default=10) + parser.add_argument('--depth_max', type=float, default=60) + parser.add_argument('--ksz_map', type=str, required=True) + parser.add_argument('--base_fig', type=str, default="kszfig.png") + parser.add_argument('--build_dipole', action='store_true') + parser.add_argument('--degrade', type=int, default=-1) + parser.add_argument('--y',type=float,default=0.0) + parser.add_argument('--x',type=float,default=2.37) + parser.add_argument('--random', action='store_true') + parser.add_argument('--perturb', type=float, default=0) + parser.add_argument('--hubble_monopole', action='store_true') + parser.add_argument('--remove_bright', type=float, default=-np.inf) + parser.add_argument('--bright_file', type=str) + parser.add_argument('--lg', action='store_true') + parser.add_argument('--sculpt_beam', type=float, default=-1) + return parser.parse_args() + +def main(): + + args = get_args() + + ff=plt.figure(1) + plt.clf() + v=[] + + print("Generating map...") + + with open("crap.txt", mode="r") as f: + bright_list = [l.split('#')[0].strip(" \t\n\r") for l in f] + + if args.bright_file: + with open(args.bright_file, mode="r") as f: + idx_name = f.readline().split(',').index('name_2') + bright_list = bright_list + [l.split(',')[idx_name] for l in f] + + print("Built bright point source list: " + repr(bright_list)) + r = generate_from_catalog(args.depth_min,args.depth_max,args.Nside,perturb=args.perturb,y=args.y,do_random=args.random,do_hubble=args.hubble_monopole,x=args.x,bright=args.remove_bright,use_vlg=args.lg,bright_list=bright_list,sculpt=args.sculpt_beam) + hubble_map = None + if args.hubble_monopole: + proj,mask,hubble_map = r + else: + proj,mask = r + + if args.degrade > 0: + proj *= mask + proj = hp.ud_grade(proj, nside_out=args.degrade) + if hubble_map is not None: + hubble_map *= mask + hubble_map = hp.ud_grade(hubble_map, nside_out=args.degrade) + mask = hp.ud_grade(mask, nside_out=args.degrade) + Nside = args.degrade + else: + Nside = args.Nside + + hp.write_map(args.ksz_map + ".fits", proj) + hp.write_map(args.ksz_map + "_mask.fits", mask) + + if args.build_dipole: + x,y,z=hp.pix2vec(Nside, np.arange(hp.nside2npix(Nside))) + hp.write_map(args.ksz_map + "_x.fits", proj*x) + hp.write_map(args.ksz_map + "_y.fits", proj*y) + hp.write_map(args.ksz_map + "_z.fits", proj*z) + + if args.hubble_monopole: + hp.write_map(args.ksz_map + "_hubble.fits", hubble_map) + + hp.mollview(proj*100*1e6, fig=1, coord='GG', cmap=plt.cm.coolwarm, title='', min=args.minval, + max=args.maxval) + + ff.savefig(args.base_fig) + +main() diff --git a/external/cosmotool/python_sample/build_nbody_ksz.py b/external/cosmotool/python_sample/build_nbody_ksz.py new file mode 100644 index 0000000..39dbffc --- /dev/null +++ b/external/cosmotool/python_sample/build_nbody_ksz.py @@ -0,0 +1,109 @@ +import healpy as hp +import numpy as np +import cosmotool as ct +import argparse +import h5py as h5 +from matplotlib import pyplot as plt + +Mpc=3.08e22 +rhoc = 1.8864883524081933e-26 # m^(-3) +sigmaT = 6.6524e-29 +mp = 1.6726e-27 +lightspeed = 299792458. +v_unit = 1e3 # Unit of 1 km/s +T_cmb=2.725 +h = 0.71 +Y = 0.245 #The Helium abundance +Omega_matter = 0.26 +Omega_baryon=0.0445 + +G=6.67e-11 +MassSun=2e30 +frac_electron = 1.0 # Hmmmm +frac_gas_galaxy = 0.14 +mu = 1/(1-0.5*Y) + +baryon_fraction = Omega_baryon / Omega_matter + +ksz_normalization = T_cmb*sigmaT*v_unit/(lightspeed*mu*mp) * baryon_fraction +rho_mean_matter = Omega_matter * (3*(100e3/Mpc)**2/(8*np.pi*G)) + +parser=argparse.ArgumentParser(description="Generate Skymaps from CIC maps") +parser.add_argument('--boxsize', type=float, required=True) +parser.add_argument('--Nside', type=int, default=128) +parser.add_argument('--base_h5', type=str, required=True) +parser.add_argument('--base_fig', type=str, required=True) +parser.add_argument('--start', type=int, required=True) +parser.add_argument('--end', type=int, required=True) +parser.add_argument('--step', type=int, required=True) +parser.add_argument('--minval', type=float, default=-0.5) +parser.add_argument('--maxval', type=float, default=0.5) +parser.add_argument('--depth_min', type=float, default=10) +parser.add_argument('--depth_max', type=float, default=60) +parser.add_argument('--iid', type=int, default=0) +parser.add_argument('--ksz_map', type=str, required=True) +args = parser.parse_args() + +L = args.boxsize +Nside = args.Nside + +def build_sky_proj(density, dmax=60.,dmin=0,iid=0): + + N = density.shape[0] + ix = (np.arange(N)-0.5)*L/N - 0.5 * L + + + dist2 = (ix[:,None,None]**2 + ix[None,:,None]**2 + ix[None,None,:]**2) + + flux = density.transpose().astype(ct.DTYPE) # / dist2 + dmax=N*dmax/L + dmin=N*dmin/L + if iid == 0: + shifter = np.array([0.5,0.5,0.5]) + else: + shifter = np.array([0.,0.,0.]) + + projsky1 = ct.spherical_projection(Nside, flux, dmin, dmax, integrator_id=iid, shifter=shifter) + + return projsky1*L/N + +def build_unit_vectors(N): + ii = np.arange(N)*L/N - 0.5*L + d = np.sqrt(ii[:,None,None]**2 + ii[None,:,None]**2 + ii[None,None,:]**2) + d[N/2,N/2,N/2] = 1 + ux = ii[:,None,None] / d + uy = ii[None,:,None] / d + uz = ii[None,None,:] / d + + return ux,uy,uz + +for i in xrange(args.start,args.end,args.step): + ff=plt.figure(1) + plt.clf() + with h5.File(args.base_h5 % i, mode="r") as f: + p = f['velocity'][:] + davg = np.average(np.average(np.average(f['density'][:],axis=0),axis=0),axis=0) + p /= davg # Now we have momentum scaled to the mean density + + # Rescale by Omega_b / Omega_m + p = p.astype(np.float64) + print p.max(), p.min(), ksz_normalization, rho_mean_matter + p *= -ksz_normalization*rho_mean_matter*1e6 + + ux,uy,uz = build_unit_vectors(p.shape[0]) + pr = p[:,:,:,0] * ux + p[:,:,:,1] * uy + p[:,:,:,2] * uz + print p.max(), p.min() + print pr.max()*Mpc, pr.min()*Mpc + + @ct.timeit_quiet + def run_proj(): + return build_sky_proj(pr*Mpc, dmin=args.depth_min,dmax=args.depth_max,iid=args.iid) + + run_proj() + + hp.write_map(args.ksz_map % i, proj) + + hp.mollview(proj, fig=1, coord='CG', cmap=plt.cm.coolwarm, title='Sample %d' % i, min=args.minval, + max=args.maxval) + + ff.savefig(args.base_fig % i) diff --git a/external/cosmotool/python_sample/build_nbody_ksz_from_galaxies.py b/external/cosmotool/python_sample/build_nbody_ksz_from_galaxies.py new file mode 100644 index 0000000..780a588 --- /dev/null +++ b/external/cosmotool/python_sample/build_nbody_ksz_from_galaxies.py @@ -0,0 +1,167 @@ +import healpy as hp +import numpy as np +import cosmotool as ct +import argparse +import h5py as h5 +import matplotlib +matplotlib.use('Agg') +from matplotlib import pyplot as plt +import ksz +from ksz.constants import * +from cosmotool import interp3d + +def wrapper_impulsion(f): + + class _Wrapper(object): + def __init__(self): + pass + + def __getitem__(self,direction): + + if 'velocity' in f: + return f['velocity'][:,:,:,direction] + + n = "p%d" % direction + return f[n] + + return _Wrapper() + +parser=argparse.ArgumentParser(description="Generate Skymaps from CIC maps") +parser.add_argument('--boxsize', type=float, required=True) +parser.add_argument('--Nside', type=int, default=128) +parser.add_argument('--base_h5', type=str, required=True) +parser.add_argument('--base_fig', type=str, required=True) +parser.add_argument('--start', type=int, required=True) +parser.add_argument('--end', type=int, required=True) +parser.add_argument('--step', type=int, required=True) +parser.add_argument('--minval', type=float, default=-0.5) +parser.add_argument('--maxval', type=float, default=0.5) +parser.add_argument('--depth_min', type=float, default=10) +parser.add_argument('--depth_max', type=float, default=60) +parser.add_argument('--iid', type=int, default=0) +parser.add_argument('--ksz_map', type=str, required=True) +args = parser.parse_args() + +L = args.boxsize +Nside = args.Nside + +def build_unit_vectors(N): + ii = np.arange(N)*L/N - 0.5*L + d = np.sqrt(ii[:,None,None]**2 + ii[None,:,None]**2 + ii[None,None,:]**2) + d[N/2,N/2,N/2] = 1 + ux = ii[:,None,None] / d + uy = ii[None,:,None] / d + uz = ii[None,None,:] / d + + return ux,uy,uz + +def build_radial_v(v): + N = v[0].shape[0] + u = build_unit_vectors(N) + vr = v[0] * u[2] + vr += v[1] * u[1] + vr += v[2] * u[0] + + return vr.transpose() + +def generate_from_catalog(vfield,Boxsize,dmin,dmax): + import progressbar as pbar + + cat = np.load("2m++.npy") + + + cat['distance'] = cat['best_velcmb'] + cat = cat[np.where((cat['distance']>100*dmin)*(cat['distance']10)*(dcmb<60)) + +plt.figure(1) +plt.clf() +if True: + with h5.File("fields.h5", mode="r") as f: + d = f["density"][:].transpose() + d /= np.average(np.average(np.average(d,axis=0),axis=0),axis=0) + proj = build_sky_proj(d, dmin=10,dmax=60.) + proj0 = proj1 = proj +else: + d = np.load("icgen/dcic0.npy") + proj0 = build_sky_proj(1+d, dmin=10,dmax=60.) + d = np.load("icgen/dcic1.npy") + proj1 = build_sky_proj(1+d, dmin=10,dmax=60.) + +hp.mollview(proj0, fig=1, coord='CG', max=60, cmap=plt.cm.coolwarm) +hp.projscatter(b[idx], l[idx], lw=0, color='g', s=5.0, alpha=0.8) + +plt.figure(2) +plt.clf() +hp.mollview(proj1, fig=2, coord='CG', max=60, cmap=plt.cm.coolwarm) +hp.projscatter(b[idx], l[idx], lw=0, color='g', s=5.0, alpha=0.8) diff --git a/external/cosmotool/python_sample/gen_2lpt_asmooth.py b/external/cosmotool/python_sample/gen_2lpt_asmooth.py new file mode 100644 index 0000000..8de386a --- /dev/null +++ b/external/cosmotool/python_sample/gen_2lpt_asmooth.py @@ -0,0 +1,48 @@ +import os +import h5py as h5 +import numpy as np +import cosmotool as ct +import icgen as bic +import icgen.cosmogrowth as cg +import sys +import argparse + +#ADAPT_SMOOTH="/home/bergeron1NS/lavaux/Software/cosmotool/build/sample/simple3DFilter" +ADAPT_SMOOTH="/home/guilhem/PROJECTS/cosmotool/build/sample/simple3DFilter" +cosmo={'omega_M_0':0.3175, 'h':0.6711} +cosmo['omega_lambda_0']=1-cosmo['omega_M_0'] +cosmo['omega_k_0'] = 0 +cosmo['omega_B_0']=0.049 +cosmo['SIGMA8']=0.8344 +cosmo['ns']=0.9624 +N0=256 + +doSimulation=True + +astart=1. + +parser=argparse.ArgumentParser(description="Generate CIC density from 2LPT") +parser.add_argument('--start', type=int, required=True) +parser.add_argument('--end', type=int, required=True) +parser.add_argument('--step', type=int, required=True) +parser.add_argument('--base', type=str, required=True) +parser.add_argument('--N', type=int, default=256) +parser.add_argument('--output', type=str, default="fields_%d.h5") +parser.add_argument('--supersample', type=int, default=1) +args = parser.parse_args() + + + +for i in [4629]:#xrange(args.start, args.end, args.step): + print i + + pos,vel,density,N,L,_,_ = bic.run_generation("%s/initial_density_%d.dat" % (args.base,i), 0.001, astart, + cosmo, supersample=args.supersample, do_lpt2=True) + + q = pos + vel + [np.ones(vel[0].shape[0])] + + with h5.File("particles.h5", mode="w") as f: + f.create_dataset("particles", data=np.array(q).transpose()) + + os.system(ADAPT_SMOOTH + " %s %lg %lg %d %lg %lg %lg" % ("particles.h5", 3000000, L, args.N, 0, 0, 0)) + os.rename("fields.h5", args.output % i) diff --git a/external/cosmotool/python_sample/gen_2lpt_density.py b/external/cosmotool/python_sample/gen_2lpt_density.py new file mode 100644 index 0000000..76636db --- /dev/null +++ b/external/cosmotool/python_sample/gen_2lpt_density.py @@ -0,0 +1,55 @@ +import numexpr as ne +import numpy as np +import cosmotool as ct +import icgen as bic +import icgen.cosmogrowth as cg +import sys +import argparse + +cosmo={'omega_M_0':0.3175, 'h':0.6711} +cosmo['omega_lambda_0']=1-cosmo['omega_M_0'] +cosmo['omega_k_0'] = 0 +cosmo['omega_B_0']=0.049 +cosmo['SIGMA8']=0.8344 +cosmo['ns']=0.9624 +N0=256 + +doSimulation=True + +astart=1. + +parser=argparse.ArgumentParser(description="Generate CIC density from 2LPT") +parser.add_argument('--start', type=int, required=True) +parser.add_argument('--end', type=int, required=True) +parser.add_argument('--step', type=int, required=True) +parser.add_argument('--base', type=str, required=True) +parser.add_argument('--N', type=int, default=256) +parser.add_argument('--output', type=str, default="dcic_%d.npy") +parser.add_argument('--supersample', type=int, default=1) +parser.add_argument('--rsd', action='store_true') +args = parser.parse_args() + + + +for i in xrange(args.start, args.end, args.step): + print i +# pos,_,density,N,L,_ = bic.run_generation("/nethome/lavaux/remote/borg_2m++_128/initial_density_%d.dat" % i, 0.001, astart, cosmo, supersample=2, do_lpt2=True) + pos,vel,density,N,L,_,_ = bic.run_generation("%s/initial_density_%d.dat" % (args.base,i), 0.001, astart, + cosmo, supersample=args.supersample, do_lpt2=True, needvel=True) + + if args.rsd: + inv_r2 = ne.evaluate('1/sqrt(x**2+y**2+z**2)', + local_dict={'x':pos[0], 'y':pos[1], 'z':pos[2]}) + rsd = lambda p,v: ne.evaluate('x + (x*vx)*inv_r2 / H', + local_dict={'x':p, 'inv_r2':inv_r2, + 'vx':v, 'H':100.0}, out=p, casting='unsafe') + + rsd(pos[0], vel[0]) + rsd(pos[1], vel[1]) + rsd(pos[2], vel[2]) + + dcic = ct.cicParticles(pos, L, args.N) + dcic /= np.average(np.average(np.average(dcic, axis=0), axis=0), axis=0) + dcic -= 1 + + np.save(args.output % i, dcic) diff --git a/external/cosmotool/python_sample/icgen/__init__.py b/external/cosmotool/python_sample/icgen/__init__.py new file mode 100644 index 0000000..7c8d705 --- /dev/null +++ b/external/cosmotool/python_sample/icgen/__init__.py @@ -0,0 +1,2 @@ +from borgicgen import * +import cosmogrowth diff --git a/external/cosmotool/python_sample/icgen/borgadaptor.py b/external/cosmotool/python_sample/icgen/borgadaptor.py new file mode 100644 index 0000000..07637df --- /dev/null +++ b/external/cosmotool/python_sample/icgen/borgadaptor.py @@ -0,0 +1,55 @@ +import numpy as np + +def fourier_analysis(borg_vol): + L = (borg_vol.ranges[1]-borg_vol.ranges[0]) + N = borg_vol.density.shape[0] + + return np.fft.rfftn(borg_vol.density)*(L/N)**3, L, N + +def borg_upgrade_sampling(dhat, supersample): + N = dhat.shape[0] + N2 = N * supersample + dhat_new = np.zeros((N2, N2, N2/2+1), dtype=np.complex128) + + hN = N/2 + dhat_new[:hN, :hN, :hN+1] = dhat[:hN, :hN, :] + dhat_new[:hN, (N2-hN):N2, :hN+1] = dhat[:hN, hN:, :] + dhat_new[(N2-hN):N2, (N2-hN):N2, :hN+1] = dhat[hN:, hN:, :] + dhat_new[(N2-hN):N2, :hN, :hN+1] = dhat[hN:, :hN, :] + + return dhat_new, N2 + +def half_pixel_shift(borg, doshift=False): + + dhat,L,N = fourier_analysis(borg) + if not doshift: + return dhat, L + + return bare_half_pixel_shift(dhat, L, N) + +def bare_half_pixel_shift(dhat, L, N, doshift=False): + +# dhat_new,N2 = borg_upgrade_sampling(dhat, 2) +# d = (np.fft.irfftn(dhat_new)*(N2/L)**3)[1::2,1::2,1::2] +# del dhat_new +# dhat = np.fft.rfftn(d)*(L/N)**3 +# return dhat, L + +# dhat2 = np.zeros((N,N,N),dtype=np.complex128) +# dhat2[:,:,:N/2+1] = dhat +# dhat2[N:0:-1, N:0:-1, N:N/2:-1] = np.conj(dhat[1:,1:,1:N/2]) +# dhat2[0, N:0:-1, N:N/2:-1] = np.conj(dhat[0, 1:, 1:N/2]) +# dhat2[N:0:-1, 0, N:N/2:-1] = np.conj(dhat[1:, 0, 1:N/2]) +# dhat2[0,0,N:N/2:-1] = np.conj(dhat[0, 0, 1:N/2]) + + ik = np.fft.fftfreq(N,d=L/N)*2*np.pi + phi = 0.5*L/N*(ik[:,None,None]+ik[None,:,None]+ik[None,None,:(N/2+1)]) +# phi %= 2*np.pi + phase = np.cos(phi)+1j*np.sin(phi) + dhat = dhat*phase + dhat[N/2,:,:] = 0 + dhat[:,N/2,:] = 0 + dhat[:,:,N/2] = 0 + + return dhat, L + diff --git a/external/cosmotool/python_sample/icgen/borgicgen.py b/external/cosmotool/python_sample/icgen/borgicgen.py new file mode 100644 index 0000000..78f1805 --- /dev/null +++ b/external/cosmotool/python_sample/icgen/borgicgen.py @@ -0,0 +1,228 @@ +import cosmotool as ct +import numpy as np +import cosmolopy as cpy +from cosmogrowth import * +import borgadaptor as ba + +@ct.timeit +def gen_posgrid(N, L, delta=1, dtype=np.float32): + """ Generate an ordered lagrangian grid""" + + ix = (np.arange(N)*(L/N*delta)).astype(dtype) + + x = ix[:,None,None].repeat(N, axis=1).repeat(N, axis=2) + y = ix[None,:,None].repeat(N, axis=0).repeat(N, axis=2) + z = ix[None,None,:].repeat(N, axis=0).repeat(N, axis=1) + + return x.reshape((x.size,)), y.reshape((y.size,)), z.reshape((z.size,)) + +def bin_power(P, L, bins=20, range=(0,1.), dev=False): + + N = P.shape[0] + ik = np.fft.fftfreq(N, d=L/N)*2*np.pi + + k = np.sqrt(ik[:,None,None]**2 + ik[None,:,None]**2 + ik[None,None,:(N/2+1)]**2) + + H,b = np.histogram(k, bins=bins, range=range) + Hw,b = np.histogram(k, bins=bins, weights=P, range=range) + + if dev: + return Hw/(H-1), 0.5*(b[1:]+b[0:bins]), 1.0/np.sqrt(H) + else: + return Hw/(H-1), 0.5*(b[1:]+b[0:bins]) + +def compute_power_from_borg(input_borg, a_borg, cosmo, bins=10, range=(0,1)): + borg_vol = ct.read_borg_vol(input_borg) + N = borg_vol.density.shape[0] + + cgrowth = CosmoGrowth(**cosmo) + D1 = cgrowth.D(1) + D1_0 = D1/cgrowth.D(a_borg) + print("D1_0=%lg" % D1_0) + + density_hat, L = ba.half_pixel_shift(borg_vol) + + return bin_power(D1_0**2*np.abs(density_hat)**2/L**3, L, bins=bins, range=range) + +def compute_ref_power(L, N, cosmo, bins=10, range=(0,1), func='HU_WIGGLES'): + ik = np.fft.fftfreq(N, d=L/N)*2*np.pi + + k = np.sqrt(ik[:,None,None]**2 + ik[None,:,None]**2 + ik[None,None,:(N/2+1)]**2) + p = ct.CosmologyPower(**cosmo) + p.setFunction(func) + p.normalize(cosmo['SIGMA8']) + + return bin_power(p.compute(k)*cosmo['h']**3, L, bins=bins, range=range) + + +def do_supergenerate(density, density_out=None, mulfac=None,zero_fill=False,Pk=None,L=None,h=None): + + N = density.shape[0] + + if density_out is None: + assert mulfac is not None + Ns = mulfac*N + density_out = np.zeros((Ns,Ns,Ns/2+1), dtype=np.complex128) + density_out[:] = np.nan + elif mulfac is None: + mulfac = density_out.shape[0] / N + Ns = density_out.shape[0] + assert (density_out.shape[0] % N) == 0 + + assert len(density_out.shape) == 3 + assert density_out.shape[0] == density_out.shape[1] + assert density_out.shape[2] == (density_out.shape[0]/2+1) + + hN = N/2 + density_out[:hN, :hN, :hN+1] = density[:hN, :hN, :] + density_out[:hN, (Ns-hN):Ns, :hN+1] = density[:hN, hN:, :] + density_out[(Ns-hN):Ns, (Ns-hN):Ns, :hN+1] = density[hN:, hN:, :] + density_out[(Ns-hN):Ns, :hN, :hN+1] = density[hN:, :hN, :] + + if mulfac > 1: + + cond=np.isnan(density_out) + if zero_fill: + density_out[cond] = 0 + else: + + if Pk is not None: + assert L is not None and h is not None + + @ct.timeit_quiet + def build_Pk(): + ik = np.fft.fftfreq(Ns, d=L/Ns)*2*np.pi + k = ne.evaluate('sqrt(kx**2 + ky**2 + kz**2)', {'kx':ik[:,None,None], 'ky':ik[None,:,None], 'kz':ik[None,None,:(Ns/2+1)]}) + return Pk.compute(k)*L**3 + + print np.where(np.isnan(density_out))[0].size + Nz = np.count_nonzero(cond) + amplitude = np.sqrt(build_Pk()[cond]/2) if Pk is not None else (1.0/np.sqrt(2)) + density_out.real[cond] = np.random.randn(Nz) * amplitude + density_out.imag[cond] = np.random.randn(Nz) * amplitude + print np.where(np.isnan(density_out))[0].size + + # Now we have to fix the Nyquist plane + hNs = Ns/2 + nyquist = density_out[:, :, hNs] + Nplane = nyquist.size + nyquist.flat[:Nplane/2] = np.sqrt(2.0)*nyquist.flat[Nplane:Nplane/2:-1].conj() + + + return density_out + +@ct.timeit_quiet +def run_generation(input_borg, a_borg, a_ic, cosmo, supersample=1, supergenerate=1, do_lpt2=True, shiftPixel=False, psi_instead=False, needvel=True, func='HU_WIGGLES'): + """ Generate particles and velocities from a BORG snapshot. Returns a tuple of + (positions,velocities,N,BoxSize,scale_factor).""" + + borg_vol = ct.read_borg_vol(input_borg) + N = borg_vol.density.shape[0] + + cgrowth = CosmoGrowth(**cosmo) + + density, L = ba.half_pixel_shift(borg_vol, doshift=shiftPixel) + + + # Compute LPT scaling coefficient + D1 = cgrowth.D(a_ic) + D1_0 = D1/cgrowth.D(a_borg) + Dborg = cgrowth.D(a_borg)/cgrowth.D(1.0) + print "D1_0=%lg" % D1_0 + + if supergenerate>1: + print("Doing supergeneration (factor=%d)" % supergenerate) + p = ct.CosmologyPower(**cosmo) + p.setFunction(func) + p.normalize(cosmo['SIGMA8']*Dborg) + density = do_supergenerate(density,mulfac=supergenerate,Pk=p,L=L,h=cosmo['h']) + + lpt = LagrangianPerturbation(-density, L, fourier=True, supersample=supersample) + + # Generate grid + posq = gen_posgrid(N*supersample, L) + vel= [] + posx = [] + + velmul = cgrowth.compute_velmul(a_ic) if not psi_instead else 1 + + D2 = -3./7 * D1_0**2 + + if do_lpt2: + psi2 = lpt.lpt2('all') + for j in xrange(3): + # Generate psi_j (displacement along j) + print("LPT1 axis=%d" % j) + psi = D1_0*lpt.lpt1(j) + psi = psi.reshape((psi.size,)) + if do_lpt2: + print("LPT2 axis=%d" % j) + psi += D2 * psi2[j].reshape((psi2[j].size,)) + # Generate posx + posx.append(((posq[j] + psi)%L).astype(np.float32)) + # Generate vel + if needvel: + vel.append((psi*velmul).astype(np.float32)) + + print("velmul=%lg" % (cosmo['h']*velmul)) + + lpt.cube.dhat = lpt.dhat + density = lpt.cube.irfft() + density *= (cgrowth.D(1)/cgrowth.D(a_borg)) + + return posx,vel,density,N*supergenerate*supersample,L,a_ic,cosmo + + +@ct.timeit_quiet +def whitify(density, L, cosmo, supergenerate=1, zero_fill=False, func='HU_WIGGLES'): + + N = density.shape[0] + p = ct.CosmologyPower(**cosmo) + p.setFunction(func) + p.normalize(cosmo['SIGMA8']) + + @ct.timeit_quiet + def build_Pk(): + ik = np.fft.fftfreq(N, d=L/N)*2*np.pi + k = np.sqrt(ik[:,None,None]**2 + ik[None,:,None]**2 + ik[None,None,:(N/2+1)]**2) + return p.compute(k)*L**3 + + Pk = build_Pk() + Pk[0,0,0]=1 + + cube = CubeFT(L, N) + cube.density = density + density_hat = cube.rfft() + density_hat /= np.sqrt(Pk) + + Ns = N*supergenerate + + density_hat_super = do_supergenerate(density_hat, mulfac=supergenerate) + + cube = CubeFT(L, Ns) + cube.dhat = density_hat_super + return np.fft.irfftn(density_hat_super)*Ns**1.5 + + + +def write_icfiles(*generated_ic, **kwargs): + """Write the initial conditions from the tuple returned by run_generation""" + + supergenerate=kwargs.get('supergenerate', 1) + zero_fill=kwargs.get('zero_fill', False) + posx,vel,density,N,L,a_ic,cosmo = generated_ic + + ct.simpleWriteGadget("Data/borg.gad", posx, velocities=vel, boxsize=L, Hubble=cosmo['h'], Omega_M=cosmo['omega_M_0'], time=a_ic) + for i,c in enumerate(["z","y","x"]): + ct.writeGrafic("Data/ic_velc%s" % c, vel[i].reshape((N,N,N)), L, a_ic, **cosmo) + + ct.writeGrafic("Data/ic_deltab", density, L, a_ic, **cosmo) + + ct.writeWhitePhase("Data/white.dat", whitify(density, L, cosmo, supergenerate=supergenerate,zero_fill=zero_fill)) + + with file("Data/white_params", mode="w") as f: + f.write("4\n%lg, %lg, %lg\n" % (cosmo['omega_M_0'], cosmo['omega_lambda_0'], 100*cosmo['h'])) + f.write("%lg\n%lg\n-%lg\n0,0\n" % (cosmo['omega_B_0'],cosmo['ns'],cosmo['SIGMA8'])) + f.write("-%lg\n1\n0\n\n\n\n\n" % L) + f.write("2\n\n0\nwhite.dat\n0\npadding_white.dat\n") + diff --git a/external/cosmotool/python_sample/icgen/cosmogrowth.py b/external/cosmotool/python_sample/icgen/cosmogrowth.py new file mode 100644 index 0000000..f5f3652 --- /dev/null +++ b/external/cosmotool/python_sample/icgen/cosmogrowth.py @@ -0,0 +1,202 @@ +import numexpr as ne +import multiprocessing +import pyfftw +import weakref +import numpy as np +import cosmolopy as cpy +import cosmotool as ct + +class CubeFT(object): + def __init__(self, L, N, max_cpu=-1): + + self.N = N + self.align = pyfftw.simd_alignment + self.L = L + self.max_cpu = multiprocessing.cpu_count() if max_cpu < 0 else max_cpu + self._dhat = pyfftw.n_byte_align_empty((self.N,self.N,self.N/2+1), self.align, dtype='complex64') + self._density = pyfftw.n_byte_align_empty((self.N,self.N,self.N), self.align, dtype='float32') + self._irfft = pyfftw.FFTW(self._dhat, self._density, axes=(0,1,2), direction='FFTW_BACKWARD', threads=self.max_cpu, normalize_idft=False) + self._rfft = pyfftw.FFTW(self._density, self._dhat, axes=(0,1,2), threads=self.max_cpu, normalize_idft=False) + + def rfft(self): + return ne.evaluate('c*a', local_dict={'c':self._rfft(normalise_idft=False),'a':(self.L/self.N)**3}) + + def irfft(self): + return ne.evaluate('c*a', local_dict={'c':self._irfft(normalise_idft=False),'a':(1/self.L)**3}) + + def get_dhat(self): + return self._dhat + def set_dhat(self, in_dhat): + self._dhat[:] = in_dhat + dhat = property(get_dhat, set_dhat, None) + + def get_density(self): + return self._density + def set_density(self, d): + self._density[:] = d + density = property(get_density, set_density, None) + + +class CosmoGrowth(object): + + def __init__(self, **cosmo): + self.cosmo = cosmo + + def D(self, a): + return cpy.perturbation.fgrowth(1/a-1, self.cosmo['omega_M_0'], unnormed=True) + + def compute_E(self, a): + om = self.cosmo['omega_M_0'] + ol = self.cosmo['omega_lambda_0'] + ok = self.cosmo['omega_k_0'] + + E = np.sqrt(om/a**3 + ol + ok/a**2) + + H2 = -3*om/a**4 - 2*ok/a**3 + + Eprime = 0.5*H2/E + + return E,Eprime + + def Ddot(self, a): + E,Eprime = self.compute_E(a) + D = self.D(a) + Ddot_D = Eprime/E + 2.5 * self.cosmo['omega_M_0']/(a**3*E**2*D) + Ddot_D *= a + return Ddot_D + + def compute_velmul(self, a): + E,_ = self.compute_E(a) + velmul = self.Ddot(a) + velmul *= 100 * a * E + return velmul + + + + + +class LagrangianPerturbation(object): + + def __init__(self,density,L, fourier=False, supersample=1, max_cpu=-1): + + self.L = L + self.N = density.shape[0] + + self.max_cpu = max_cpu + self.cube = CubeFT(self.L, self.N, max_cpu=max_cpu) + + if not fourier: + self.cube.density = density + self.dhat = self.cube.rfft().copy() + else: + self.dhat = density.copy() + + if supersample > 1: + self.upgrade_sampling(supersample) + self.ik = np.fft.fftfreq(self.N, d=L/self.N)*2*np.pi + self._kx = self.ik[:,None,None] + self._ky = self.ik[None,:,None] + self._kz = self.ik[None,None,:(self.N/2+1)] + self.cache = {}#weakref.WeakValueDictionary() + + @ct.timeit_quiet + def upgrade_sampling(self, supersample): + N2 = self.N * supersample + N = self.N + dhat_new = np.zeros((N2, N2, N2/2+1), dtype=np.complex128) + + hN = N/2 + dhat_new[:hN, :hN, :hN+1] = self.dhat[:hN, :hN, :] + dhat_new[:hN, (N2-hN):N2, :hN+1] = self.dhat[:hN, hN:, :] + dhat_new[(N2-hN):N2, (N2-hN):N2, :hN+1] = self.dhat[hN:, hN:, :] + dhat_new[(N2-hN):N2, :hN, :hN+1] = self.dhat[hN:, :hN, :] + + self.dhat = dhat_new + self.N = N2 + self.cube = CubeFT(self.L, self.N, max_cpu=self.max_cpu) + + @ct.timeit_quiet + def _gradient(self, phi, direction): + if direction == 'all': + dirs = [0,1,2] + copy = True + else: + dirs = [direction] + copy = False + ret=[] + for dir in dirs: + ne.evaluate('phi_hat * i * kv / (kx**2 + ky**2 + kz**2)', out=self.cube.dhat, + local_dict={'i':-1j, 'phi_hat':phi, 'kv':self._kdir(dir), + 'kx':self._kx, 'ky':self._ky, 'kz':self._kz},casting='unsafe') +# self.cube.dhat = self._kdir(direction)*1j*phi + self.cube.dhat[0,0,0] = 0 + x = self.cube.irfft() + ret.append(x.copy() if copy else x) + return ret[0] if len(ret)==1 else ret + + @ct.timeit_quiet + def lpt1(self, direction=0): + return self._gradient(self.dhat, direction) + + def new_shape(self,direction, q=3, half=False): + N0 = (self.N/2+1) if half else self.N + return ((1,)*direction) + (N0,) + ((1,)*(q-1-direction)) + + def _kdir(self, direction, q=3): + if direction != q-1: + return self.ik.reshape(self.new_shape(direction, q=q)) + else: + return self.ik[:self.N/2+1].reshape(self.new_shape(direction, q=q, half=True)) + + def _get_k2(self, q=3): + if 'k2' in self.cache: + return self.cache['k2'] + + k2 = self._kdir(0, q=q)**2 + for d in xrange(1,q): + k2 = k2 + self._kdir(d, q=q)**2 + + self.cache['k2'] = k2 + return k2 + + def _do_irfft(self, array, copy=True): + if copy: + self.cube.dhat = array + return self.cube.irfft() + + def _do_rfft(self, array, copy=True): + if copy: + self.cube.density = array + return self.cube.rfft() + + @ct.timeit_quiet + def lpt2(self, direction=0): +# k2 = self._get_k2() +# k2[0,0,0] = 1 + + inv_k2 = ne.evaluate('1/(kx**2+ky**2+kz**2)', {'kx':self._kdir(0),'ky':self._kdir(1),'kz':self._kdir(2)}) + inv_k2[0,0,0]=0 + potgen0 = lambda i: ne.evaluate('kdir**2*d*ik2',out=self.cube.dhat,local_dict={'kdir':self._kdir(i),'d':self.dhat,'ik2':inv_k2}, casting='unsafe' ) + potgen = lambda i,j: ne.evaluate('kdir0*kdir1*d*ik2',out=self.cube.dhat,local_dict={'kdir0':self._kdir(i),'kdir1':self._kdir(j),'d':self.dhat,'ik2':inv_k2}, casting='unsafe' ) + + if 'lpt2_potential' not in self.cache: + print("Rebuilding potential...") + div_phi2 = np.zeros((self.N,self.N,self.N), dtype=np.float64) + for j in xrange(3): + q = self._do_irfft( potgen0(j) ).copy() + for i in xrange(j+1, 3): + with ct.time_block("LPT2 elemental (%d,%d)" %(i,j)): + ne.evaluate('div + q * pot', out=div_phi2, + local_dict={'div':div_phi2, 'q':q,'pot':self._do_irfft( potgen0(i), copy=False ) } + ) + ne.evaluate('div - pot**2',out=div_phi2, + local_dict={'div':div_phi2,'pot':self._do_irfft(potgen(i,j), copy=False) } + ) + + phi2_hat = self._do_rfft(div_phi2) + #self.cache['lpt2_potential'] = phi2_hat + del div_phi2 + else: + phi2_hat = self.cache['lpt2_potential'] + + return self._gradient(phi2_hat, direction) diff --git a/external/cosmotool/python_sample/icgen/gen_ic_from_borg.py b/external/cosmotool/python_sample/icgen/gen_ic_from_borg.py new file mode 100644 index 0000000..09e0bbf --- /dev/null +++ b/external/cosmotool/python_sample/icgen/gen_ic_from_borg.py @@ -0,0 +1,24 @@ +import pyfftw +import numpy as np +import cosmotool as ct +import borgicgen as bic +import pickle + +with file("wisdom") as f: + pyfftw.import_wisdom(pickle.load(f)) + +cosmo={'omega_M_0':0.3175, 'h':0.6711} +cosmo['omega_lambda_0']=1-cosmo['omega_M_0'] +cosmo['omega_k_0'] = 0 +cosmo['omega_B_0']=0.049 +cosmo['SIGMA8']=0.8344 +cosmo['ns']=0.9624 + +supergen=1 +zstart=99 +astart=1/(1.+zstart) +halfPixelShift=False +zero_fill=False + +if __name__=="__main__": + bic.write_icfiles(*bic.run_generation("initial_density_1872.dat", 0.001, astart, cosmo, supersample=1, shiftPixel=halfPixelShift, do_lpt2=False, supergenerate=supergen), supergenerate=1, zero_fill=zero_fill) diff --git a/external/cosmotool/python_sample/icgen/test_ic_from_borg.py b/external/cosmotool/python_sample/icgen/test_ic_from_borg.py new file mode 100644 index 0000000..566af86 --- /dev/null +++ b/external/cosmotool/python_sample/icgen/test_ic_from_borg.py @@ -0,0 +1,63 @@ +import numpy as np +import cosmotool as ct +import borgicgen as bic +import cosmogrowth as cg +import sys + +cosmo={'omega_M_0':0.3175, 'h':0.6711} +cosmo['omega_lambda_0']=1-cosmo['omega_M_0'] +cosmo['omega_k_0'] = 0 +cosmo['omega_B_0']=0.049 +cosmo['SIGMA8']=0.8344 +cosmo['ns']=0.9624 +N0=256 + +doSimulation=False +simShift=False + +snap_id=int(sys.argv[1]) +astart=1/100. + +if doSimulation: + s = ct.loadRamsesAll("/nethome/lavaux/remote2/borgsim3/", snap_id, doublePrecision=True) + astart=s.getTime() + L = s.getBoxsize() + + p = s.getPositions() + Nsim = int( np.round( p[0].size**(1./3)) ) + print("Nsim = %d" % Nsim) + + if simShift: + p = [(q-0.5*L/Nsim)%L for q in p] + + dsim = ct.cicParticles(p[::-1], L, N0) + dsim /= np.average(np.average(np.average(dsim, axis=0), axis=0), axis=0) + dsim -= 1 + + dsim_hat = np.fft.rfftn(dsim)*(L/N0)**3 + Psim, bsim = bic.bin_power(np.abs(dsim_hat)**2/L**3, L, range=(0,1.), bins=150) + +pos,_,density,N,L,_,_ = bic.run_generation("initial_density_1872.dat", 0.001, astart, cosmo, supersample=1, do_lpt2=False, supergenerate=2) + +dcic = ct.cicParticles(pos, L, N0) +dcic /= np.average(np.average(np.average(dcic, axis=0), axis=0), axis=0) +dcic -= 1 + +dcic_hat = np.fft.rfftn(dcic)*(L/N0)**3 +dens_hat = np.fft.rfftn(density)*(L/N0)**3 + +Pcic, bcic = bic.bin_power(np.abs(dcic_hat)**2/L**3, L, range=(0,4.), bins=150) +Pdens, bdens = bic.bin_power(np.abs(dens_hat)**2/L**3, L, range=(0,4.), bins=150) + +cgrowth = cg.CosmoGrowth(**cosmo) +D1 = cgrowth.D(astart) +D1_0 = D1/cgrowth.D(1)#0.001) + +Pref, bref = bic.compute_ref_power(L, N0, cosmo, range=(0,4.), bins=150) + +Pcic /= D1_0**2 + +#borg_evolved = ct.read_borg_vol("final_density_1380.dat") +#dborg_hat = np.fft.rfftn(borg_evolved.density)*L**3/borg_evolved.density.size + +#Pborg, bborg = bic.bin_power(np.abs(dborg_hat)**2/L**3, L, range=(0,1.),bins=150) diff --git a/external/cosmotool/python_sample/icgen/test_whitify.py b/external/cosmotool/python_sample/icgen/test_whitify.py new file mode 100644 index 0000000..c563f7f --- /dev/null +++ b/external/cosmotool/python_sample/icgen/test_whitify.py @@ -0,0 +1,41 @@ +import numpy as np +import cosmotool as ct +import borgicgen as bic +from matplotlib import pyplot as plt + +cosmo={'omega_M_0':0.3175, 'h':0.6711} +cosmo['omega_lambda_0']=1-cosmo['omega_M_0'] +cosmo['omega_k_0'] = 0 +cosmo['omega_B_0']=0.049 +cosmo['SIGMA8']=0.8344 +cosmo['ns']=0.9624 + +zstart=50 +astart=1/(1.+zstart) +halfPixelShift=False + +posx,vel,density,N,L,a_ic,cosmo = bic.run_generation("initial_condition_borg.dat", 0.001, astart, cosmo, supersample=1, shiftPixel=halfPixelShift, do_lpt2=False) + +w1 = bic.whitify(density, L, cosmo, supergenerate=1) +w2 = bic.whitify(density, L, cosmo, supergenerate=2) + +N = w1.shape[0] +Ns = w2.shape[0] + +w1_hat = np.fft.rfftn(w1)*(L/N)**3 +w2_hat = np.fft.rfftn(w2)*(L/Ns)**3 + +P1, b1, dev1 = bic.bin_power(np.abs(w1_hat)**2, L, range=(0,3),bins=150,dev=True) +P2, b2, dev2 = bic.bin_power(np.abs(w2_hat)**2, L, range=(0,3),bins=150,dev=True) + +fig = plt.figure(1) +fig.clf() +plt.fill_between(b1, P1*(1-dev1), P1*(1+dev1), label='Supergen=1', color='b') +plt.fill_between(b2, P2*(1-dev2), P2*(1+dev2), label='Supergen=2', color='g', alpha=0.5) +ax = plt.gca() +ax.set_xscale('log') +plt.ylim(0.5,1.5) +plt.xlim(1e-2,4) +plt.axhline(1.0, color='red', lw=4.0) +plt.legend() +plt.show() diff --git a/external/cosmotool/python_sample/ksz/__init__.py b/external/cosmotool/python_sample/ksz/__init__.py new file mode 100644 index 0000000..b26b1bb --- /dev/null +++ b/external/cosmotool/python_sample/ksz/__init__.py @@ -0,0 +1,3 @@ +from .constants import * +from .gal_prof import KSZ_Profile, KSZ_Isothermal, KSZ_NFW + diff --git a/external/cosmotool/python_sample/ksz/constants.py b/external/cosmotool/python_sample/ksz/constants.py new file mode 100644 index 0000000..2a76b27 --- /dev/null +++ b/external/cosmotool/python_sample/ksz/constants.py @@ -0,0 +1,36 @@ +import numpy as np + +Mpc=3.08e22 +rhoc = 1.8864883524081933e-26 # m^(-3) +sigmaT = 6.6524e-29 +mp = 1.6726e-27 +lightspeed = 299792458. +v_unit = 1e3 # Unit of 1 km/s +T_cmb=2.725 +h = 0.71 +Y = 0.245 #The Helium abundance +Omega_matter = 0.26 +Omega_baryon=0.0445 + +G=6.67e-11 +MassSun=2e30 +frac_electron = 1.0 # Hmmmm +frac_gas_galaxy = 0.14 +mu = 1/(1-0.5*Y) + +tmpp_cat={'Msun':3.29, + 'alpha':-0.7286211634758224, + 'Mstar':-23.172904033796893, + 'PhiStar':0.0113246633636846, + 'lbar':393109973.22508669} + +baryon_fraction = Omega_baryon / Omega_matter + +ksz_normalization = -T_cmb*sigmaT*v_unit/(lightspeed*mu*mp) * baryon_fraction +assert ksz_normalization < 0 +rho_mean_matter = Omega_matter * (3*(100e3/Mpc)**2/(8*np.pi*G)) +Lbar = tmpp_cat['lbar'] / Mpc**3 +M_over_L_galaxy = rho_mean_matter / Lbar + + +del np diff --git a/external/cosmotool/python_sample/ksz/gal_prof.py b/external/cosmotool/python_sample/ksz/gal_prof.py new file mode 100644 index 0000000..b32e658 --- /dev/null +++ b/external/cosmotool/python_sample/ksz/gal_prof.py @@ -0,0 +1,175 @@ +import numpy as np +import numexpr as ne +from .constants import * + +# ----------------------------------------------------------------------------- +# Generic profile generator +# ----------------------------------------------------------------------------- + +class KSZ_Profile(object): + R_star= 0.0 # 15 kpc/h + L_gal0 = 10**(0.4*(tmpp_cat['Msun']-tmpp_cat['Mstar'])) + + def __init__(self,sculpt): + """Base class for KSZ profiles + + Arguments: + sculpt (float): If negative, do not sculpt. If positive, there will be a 2d + suppression of the profile with a radius given by sculpt (in arcmins). + """ + self.sculpt = sculpt * np.pi/180/60. + self.rGalaxy = 1.0 + + def evaluate_profile(self, r): + raise NotImplementedError("Abstract function") + + def projected_profile(self, cos_theta,angularDistance): + + idx_base = idx = np.where(cos_theta > 0)[0] + tan_theta_2 = 1/(cos_theta[idx]**2) - 1 + tan_theta_2_max = (self.rGalaxy/angularDistance)**2 + tan_theta_2_min = (self.R_star/angularDistance)**2 + + idx0 = np.where((tan_theta_2 < tan_theta_2_max)) + idx = idx_base[idx0] + tan_theta_2 = tan_theta_2[idx0] + tan_theta = np.sqrt(tan_theta_2) + + r = (tan_theta*angularDistance) + + m,idx_mask = self.evaluate_profile(r) + idx_mask = idx[idx_mask] + + idx_mask = np.append(idx_mask,idx[np.where(tan_theta_2 0: + idx_mask = np.append(idx_mask,idx[tan_theta_2.argmin()]) + + if self.sculpt > 0: + theta = np.arctan(tan_theta) + cond = theta < self.sculpt + m[cond] *= (theta[cond]/self.sculpt)**2 + + return idx,idx_mask,m + + +# ----------------------------------------------------------------------------- +# Isothermal profile generator +# ----------------------------------------------------------------------------- + +class KSZ_Isothermal(KSZ_Profile): + sigma_FP=160e3 #m/s + R_innergal = 0.030 + + def __init__(self, Lgal, x, y=0.0, sculpt=-1): + """Support for Isothermal profile + + Arguments: + Lgal (float): Galaxy luminosity in solar units + x (float): extent of halo in virial radius units + + Keyword arguments: + y (float): Inner part where there is no halo + sculpt (float): If negative, do not sculpt. If positive, there will be a 2d + suppression of the profile with a radius given by sculpt (in arcmins). + """ + + super(KSZ_Isothermal,self).__init__(sculpt) + + self.R_gal = 0.226 * x + self.R_innergal *= y + + self.rho0 = self.sigma_FP**2/(2*np.pi*G) # * (Lgal/L_gal0)**(2./3) + self.rGalaxy = self.R_gal*(Lgal/self.L_gal0)**(1./3) + self.rInnerGalaxy = self.R_innergal*(Lgal/self.L_gal0)**(1./3) + self._prepare() + + def _prepare(self): + pass + + def evaluate_profile(self,r): + rho0, rGalaxy, rInner = self.rho0, self.rGalaxy, self.rInnerGalaxy + + D = {'rho0':rho0, 'rGalaxy':rGalaxy, 'rInner': rInner, 'Mpc':Mpc } + + Q = np.zeros(r.size) + + cond = (r<=1e-10) + Q[cond] = rho0*2/Mpc * (rGalaxy-rInner)/(rGalaxy*rInner) + + cond = (r>0)*(r <= rInner) + D['r'] = r[cond] + Q[cond] = ne.evaluate('rho0*2/(Mpc*r) * (arctan(sqrt( (rGalaxy/r)**2 -1 )) - arctan(sqrt( (rInner/r)**2 - 1 )))', + local_dict=D) + + cond = (r > rInner)*(r <= rGalaxy) + D['r'] = r[cond] + Q[cond] = ne.evaluate('rho0*2/(Mpc*r) * arctan(sqrt( (rGalaxy/r)**2 -1 ))', + local_dict=D) + + return Q,[] #np.where(r +#include +#include +#include +#include "cic.hpp" +#include "loadGadget.hpp" +#include "miniargs.hpp" +#include +#include "hdf5_array.hpp" + +using namespace CosmoTool; +using namespace std; + +int main(int argc, char **argv) +{ + typedef boost::multi_array array_type; + uint32_t res; + char *fname; + int id; + double MPC; + + MiniArgDesc desc[] = { + { "SNAPSHOT", &fname, MINIARG_STRING }, + { "MPC", &MPC, MINIARG_DOUBLE }, + { 0, 0, MINIARG_NULL } + }; + + if (!parseMiniArgs(argc, argv, desc)) + return 1; + + H5::H5File f("density.h5", H5F_ACC_TRUNC); + + + SimuData *p = loadGadgetMulti(fname, 0, 0); + double L0 = p->BoxSize/MPC; + cout << "Will read " << p->TotalNumPart << " particles" << endl; + array_type parts(boost::extents[p->TotalNumPart][7]); + uint64_t q = 0; + + try { + for (int cpuid=0;;cpuid++) { + cout << " = CPU " << cpuid << " = " << endl; + p = loadGadgetMulti(fname, cpuid, NEED_POSITION|NEED_VELOCITY|NEED_MASS); + cout << " = DONE LOAD, COPYING IN PLACE" << endl; + for (uint32_t i = 0; i < p->NumPart; i++) + { + for (int j = 0; j < 3; j++) + { + parts[q][j] = p->Pos[j][i]/MPC; + while (parts[q][j] < 0) parts[q][j] += L0; + while (parts[q][j] >= L0) parts[q][j] -= L0; + parts[q][j] -= L0/2; + } + parts[q][3] = p->Vel[0][i]; + parts[q][4] = p->Vel[1][i]; + parts[q][5] = p->Vel[2][i]; + parts[q][6] = p->Mass[i]; + q++; + } + cout << " = DONE (q=" << q << ")" << endl; + + delete p; + } + } catch (const NoSuchFileException& e) {} + + cout << " ++ WRITING ++" << endl; + hdf5_write_array(f, "particles", parts); + + return 0; +} diff --git a/external/cosmotool/sample/gadgetToDensity.cpp b/external/cosmotool/sample/gadgetToDensity.cpp new file mode 100644 index 0000000..175f12d --- /dev/null +++ b/external/cosmotool/sample/gadgetToDensity.cpp @@ -0,0 +1,96 @@ +/*+ +This is CosmoTool (./sample/gadgetToDensity.cpp) -- Copyright (C) Guilhem Lavaux (2007-2014) + +guilhem.lavaux@gmail.com + +This software is a computer program whose purpose is to provide a toolbox for cosmological +data analysis (e.g. filters, generalized Fourier transforms, power spectra, ...) + +This software is governed by the CeCILL license under French law and +abiding by the rules of distribution of free software. You can use, +modify and/ or redistribute the software under the terms of the CeCILL +license as circulated by CEA, CNRS and INRIA at the following URL +"http://www.cecill.info". + +As a counterpart to the access to the source code and rights to copy, +modify and redistribute granted by the license, users are provided only +with a limited warranty and the software's author, the holder of the +economic rights, and the successive licensors have only limited +liability. + +In this respect, the user's attention is drawn to the risks associated +with loading, using, modifying and/or developing or reproducing the +software by the user in light of its specific status of free software, +that may mean that it is complicated to manipulate, and that also +therefore means that it is reserved for developers and experienced +professionals having in-depth computer knowledge. Users are therefore +encouraged to load and test the software's suitability as regards their +requirements in conditions enabling the security of their systems and/or +data to be ensured and, more generally, to use and operate it in the +same conditions as regards security. + +The fact that you are presently reading this means that you have had +knowledge of the CeCILL license and that you accept its terms. ++*/ +#include +#include +#include +#include "cic.hpp" +#include "loadGadget.hpp" +#include "miniargs.hpp" +#include "yorick.hpp" + +using namespace std; +using namespace CosmoTool; + +int main(int argc, char **argv) +{ + uint32_t res; + char *fname; + int id; + + MiniArgDesc desc[] = { + { "SNAPSHOT", &fname, MINIARG_STRING }, + { "ID", &id, MINIARG_INT }, + { "RESOLUTION", &res, MINIARG_INT }, + { 0, 0, MINIARG_NULL } + }; + + if (!parseMiniArgs(argc, argv, desc)) + return 1; + + SimuData *p = loadGadgetMulti(fname, 0, 0); + double L0 = p->BoxSize; + CICFilter filter(res, L0); + + delete p; + + try { + for (int cpuid=0;;cpuid++) { + p = loadGadgetMulti(fname, cpuid, NEED_POSITION); + for (uint32_t i = 0; i < p->NumPart; i++) + { + CICParticles a; + + a.mass = 1.0; + a.coords[0] = p->Pos[0][i]/1000; + a.coords[1] = p->Pos[1][i]/1000; + a.coords[2] = p->Pos[2][i]/1000; + filter.putParticles(&a, 1); + } + + delete p; + } + } catch (const NoSuchFileException& e) {} + + CICType *denField; + uint32_t Ntot; + filter.getDensityField(denField, Ntot); + + cout << "L0=" << L0 << endl; + cout << "Saving density field" << endl; + uint32_t dimList[] = { res, res, res}; + saveArray("densityField.nc", denField, dimList, 3); + + return 0; +} diff --git a/external/cosmotool/sample/graficToDensity.cpp b/external/cosmotool/sample/graficToDensity.cpp new file mode 100644 index 0000000..41ad955 --- /dev/null +++ b/external/cosmotool/sample/graficToDensity.cpp @@ -0,0 +1,72 @@ +#include +#include +#include +#include +#include +#include "hdf5_array.hpp" +#include "miniargs.hpp" +#include "fortran.hpp" + +using namespace std; +using namespace CosmoTool; + +//#define GRAFIC_GUILHEM + +int main(int argc, char **argv) +{ + uint32_t res; + char *fname; + int id; + + MiniArgDesc desc[] = { + { "GRAFIC", &fname, MINIARG_STRING }, + { 0, 0, MINIARG_NULL } + }; + + if (!parseMiniArgs(argc, argv, desc)) + return 1; + + UnformattedRead ur(fname); + + ur.beginCheckpoint(); + int32_t nx = ur.readInt32(); + int32_t ny = ur.readInt32(); + int32_t nz = ur.readInt32(); + float dx = ur.readReal32(); + float xo = ur.readReal32(); + float yo = ur.readReal32(); + float zo = ur.readReal32(); + float astart = ur.readReal32(); + float omega_m = ur.readReal32(); + float omega_nu = ur.readReal32(); + float h0 = ur.readReal32(); +#ifdef GRAFIC_GUILHEM + float w0 = ur.readReal32(); +#endif + ur.endCheckpoint(); + + cout << "Grafic file: Nx=" << nx << " Ny=" << ny << " Nz=" << nz << endl; + cout << "a_start = " << astart << endl; + cout << "z_start = " << 1/astart - 1 << endl; + cout << "L = " << nx*dx << endl; + + boost::multi_array density(boost::extents[nx][ny][nz]); + + for (int32_t iz = 0; iz < nz; iz++) + { + ur.beginCheckpoint(); + for (int32_t iy = 0; iy < ny; iy++) + { + for (int32_t ix = 0; ix < nx; ix++) + { + density[ix][iy][iz] = ur.readReal32(); + } + } + ur.endCheckpoint(); + } + + H5::H5File f("density.h5", H5F_ACC_TRUNC); + hdf5_write_array(f, "density", density); + + return 0; +} diff --git a/external/cosmotool/sample/simple3DFilter.cpp b/external/cosmotool/sample/simple3DFilter.cpp new file mode 100644 index 0000000..0b8d7da --- /dev/null +++ b/external/cosmotool/sample/simple3DFilter.cpp @@ -0,0 +1,218 @@ +#include "openmp.hpp" +#include "omptl/algorithm" +#include +#include "yorick.hpp" +#include "sphSmooth.hpp" +#include "mykdtree.hpp" +#include "miniargs.hpp" +#include +#include "hdf5_array.hpp" +#include +#include +#include + +using namespace std; +using namespace CosmoTool; + +#define N_SPH 32 + +struct VCoord{ + float v[3]; + float mass; +}; + +using boost::format; +using boost::str; +typedef boost::multi_array array_type; +typedef boost::multi_array array3_type; +typedef boost::multi_array array4_type; + +ComputePrecision getVelocity(const VCoord& v, int i) +{ + return v.mass * v.v[i]; +} + +ComputePrecision getMass(const VCoord& v) +{ + return v.mass; +} + +typedef SPHSmooth MySmooth; +typedef MySmooth::SPHTree MyTree; +typedef MyTree::Cell MyCell; + +template +void computeInterpolatedField(MyTree *tree1, double boxsize, int Nres, double cx, double cy, double cz, + array3_type& bins, array3_type& arr, FuncT func, double rLimit2) +{ +#pragma omp parallel + { + MySmooth smooth1(tree1, N_SPH); + +#pragma omp for schedule(dynamic) + for (int rz = 0; rz < Nres; rz++) + { + double pz = (rz)*boxsize/Nres-cz; + + cout << format("[%d] %d / %d") % smp_get_thread_id() % rz % Nres << endl; + for (int ry = 0; ry < Nres; ry++) + { + double py = (ry)*boxsize/Nres-cy; + for (int rx = 0; rx < Nres; rx++) + { + double px = (rx)*boxsize/Nres-cx; + + MyTree::coords c = { float(px), float(py), float(pz) }; + + double r2 = c[0]*c[0]+c[1]*c[1]+c[2]*c[2]; + if (r2 > rLimit2) + { + arr[rx][ry][rz] = 0; + continue; + } + + uint32_t numInCell = bins[rx][ry][rz]; + if (numInCell > N_SPH) + smooth1.fetchNeighbours(c, numInCell); + else + smooth1.fetchNeighbours(c); + + arr[rx][ry][rz] = smooth1.computeSmoothedValue(c, func); + } + } + } + } +} + +int main(int argc, char **argv) +{ + + char *fname1, *fname2; + double rLimit, boxsize, rLimit2, cx, cy, cz; + int Nres; + + MiniArgDesc args[] = { + { "INPUT DATA1", &fname1, MINIARG_STRING }, + { "RADIUS LIMIT", &rLimit, MINIARG_DOUBLE }, + { "BOXSIZE", &boxsize, MINIARG_DOUBLE }, + { "RESOLUTION", &Nres, MINIARG_INT }, + { "CX", &cx, MINIARG_DOUBLE }, + { "CY", &cy, MINIARG_DOUBLE }, + { "CZ", &cz, MINIARG_DOUBLE }, + { 0, 0, MINIARG_NULL } + }; + + if (!parseMiniArgs(argc, argv, args)) + return 1; + + H5::H5File in_f(fname1, 0); + H5::H5File out_f("fields.h5", H5F_ACC_TRUNC); + array_type v1_data; + uint32_t N1_points, N2_points; + + array3_type bins(boost::extents[Nres][Nres][Nres]); + + rLimit2 = rLimit*rLimit; + + hdf5_read_array(in_f, "particles", v1_data); + assert(v1_data.shape()[1] == 7); + + N1_points = v1_data.shape()[0]; + + cout << "Got " << N1_points << " in the first file." << endl; + + MyCell *allCells_1 = new MyCell[N1_points]; + +#pragma omp parallel for schedule(static) + for (long i = 0; i < Nres*Nres*Nres; i++) + bins.data()[i] = 0; + + cout << "Shuffling data in cells..." << endl; +#pragma omp parallel for schedule(static) + for (int i = 0 ; i < N1_points; i++) + { + for (int j = 0; j < 3; j++) + allCells_1[i].coord[j] = v1_data[i][j]; + for (int k = 0; k < 3; k++) + allCells_1[i].val.pValue.v[k] = v1_data[i][3+k]; + allCells_1[i].val.pValue.mass = v1_data[i][6]; + allCells_1[i].active = true; + allCells_1[i].val.weight = 0.0; + + long rx = floor((allCells_1[i].coord[0]+cx)*Nres/boxsize+0.5); + long ry = floor((allCells_1[i].coord[1]+cy)*Nres/boxsize+0.5); + long rz = floor((allCells_1[i].coord[2]+cz)*Nres/boxsize+0.5); + + if (rx < 0 || rx >= Nres || ry < 0 || ry >= Nres || rz < 0 || rz >= Nres) + continue; + +#pragma omp atomic update + bins[rx][ry][rz]++; + } + v1_data.resize(boost::extents[1][1]); + + hdf5_write_array(out_f, "num_in_cell", bins); + + cout << "Building trees..." << endl; + MyTree tree1(allCells_1, N1_points); + + cout << "Creating smoothing filter..." << endl; + +// array3_type out_rad_1(boost::extents[Nres][Nres][Nres]); + + cout << "Weighing..." << endl; + +#pragma omp parallel + { + MySmooth smooth1(&tree1, N_SPH); + +#pragma omp for schedule(dynamic) + for (int rz = 0; rz < Nres; rz++) + { + double pz = (rz)*boxsize/Nres-cz; + + (cout << rz << " / " << Nres << endl).flush(); + for (int ry = 0; ry < Nres; ry++) + { + double py = (ry)*boxsize/Nres-cy; + for (int rx = 0; rx < Nres; rx++) + { + double px = (rx)*boxsize/Nres-cx; + + MyTree::coords c = { float(px), float(py), float(pz) }; + + double r2 = c[0]*c[0]+c[1]*c[1]+c[2]*c[2]; + if (r2 > rLimit2) + { + continue; + } + + uint32_t numInCell = bins[rx][ry][rz]; + if (numInCell > N_SPH) + smooth1.fetchNeighbours(c, numInCell); + else + smooth1.fetchNeighbours(c); +#pragma omp critical + smooth1.addGridSite(c); + } + } + (cout << " Done " << rz << endl).flush(); + } + } + + cout << "Interpolating..." << endl; + + array3_type interpolated(boost::extents[Nres][Nres][Nres]); + + computeInterpolatedField(&tree1, boxsize, Nres, cx, cy, cz, + bins, interpolated, getMass, rLimit2); + hdf5_write_array(out_f, "density", interpolated); + //out_f.flush(); + for (int i = 0; i < 3; i++) { + computeInterpolatedField(&tree1, boxsize, Nres, cx, cy, cz, + bins, interpolated, boost::bind(getVelocity, _1, i), rLimit2); + hdf5_write_array(out_f, str(format("p%d") % i), interpolated); + } + + return 0; +}; diff --git a/external/cosmotool/sample/simpleDistanceFilter.cpp b/external/cosmotool/sample/simpleDistanceFilter.cpp new file mode 100644 index 0000000..44db8b4 --- /dev/null +++ b/external/cosmotool/sample/simpleDistanceFilter.cpp @@ -0,0 +1,86 @@ +#include "openmp.hpp" +#include "omptl/algorithm" +#include +#include "yorick.hpp" +#include "sphSmooth.hpp" +#include "mykdtree.hpp" +#include "miniargs.hpp" +#include +#include "hdf5_array.hpp" +#include +#include +#include + +using namespace std; +using namespace CosmoTool; + +struct VCoord{ +}; + +using boost::format; +using boost::str; +typedef boost::multi_array array_type_2d; +typedef boost::multi_array array_type_1d; + +typedef KDTree<3,float> MyTree; +typedef MyTree::Cell MyCell; + +int main(int argc, char **argv) +{ + + char *fname1, *fname2; + + MiniArgDesc args[] = { + { "INPUT DATA1", &fname1, MINIARG_STRING }, + { 0, 0, MINIARG_NULL } + }; + + if (!parseMiniArgs(argc, argv, args)) + return 1; + + H5::H5File in_f(fname1, 0); + H5::H5File out_f("distances.h5", H5F_ACC_TRUNC); + array_type_2d v1_data; + array_type_1d dist_data; + uint32_t N1_points; + + hdf5_read_array(in_f, "particles", v1_data); + assert(v1_data.shape()[1] == 7); + + N1_points = v1_data.shape()[0]; + + cout << "Got " << N1_points << " in the first file." << endl; + + MyCell *allCells_1 = new MyCell[N1_points]; + + cout << "Shuffling data in cells..." << endl; +#pragma omp parallel for schedule(static) + for (int i = 0 ; i < N1_points; i++) + { + for (int j = 0; j < 3; j++) + allCells_1[i].coord[j] = v1_data[i][j]; + allCells_1[i].active = true; + } + dist_data.resize(boost::extents[N1_points]); + + cout << "Building trees..." << endl; + MyTree tree1(allCells_1, N1_points); +#pragma omp parallel + { + MyCell **foundCells = new MyCell *[2]; + + #pragma omp for + for (size_t i = 0; i < N1_points; i++) { + double dists[2]; + + tree1.getNearestNeighbours(allCells_1[i].coord, 2, foundCells, dists); + dist_data[i] = dists[1]; + } + + delete[] foundCells; + } + + hdf5_write_array(out_f, "distances", dist_data); + + return 0; +}; diff --git a/external/cosmotool/sample/testHDF5.cpp b/external/cosmotool/sample/testHDF5.cpp new file mode 100644 index 0000000..0857c6a --- /dev/null +++ b/external/cosmotool/sample/testHDF5.cpp @@ -0,0 +1,183 @@ +/*+ +This is CosmoTool (./sample/testHDF5.cpp) -- Copyright (C) Guilhem Lavaux (2007-2014) + +guilhem.lavaux@gmail.com + +This software is a computer program whose purpose is to provide a toolbox for cosmological +data analysis (e.g. filters, generalized Fourier transforms, power spectra, ...) + +This software is governed by the CeCILL license under French law and +abiding by the rules of distribution of free software. You can use, +modify and/ or redistribute the software under the terms of the CeCILL +license as circulated by CEA, CNRS and INRIA at the following URL +"http://www.cecill.info". + +As a counterpart to the access to the source code and rights to copy, +modify and redistribute granted by the license, users are provided only +with a limited warranty and the software's author, the holder of the +economic rights, and the successive licensors have only limited +liability. + +In this respect, the user's attention is drawn to the risks associated +with loading, using, modifying and/or developing or reproducing the +software by the user in light of its specific status of free software, +that may mean that it is complicated to manipulate, and that also +therefore means that it is reserved for developers and experienced +professionals having in-depth computer knowledge. Users are therefore +encouraged to load and test the software's suitability as regards their +requirements in conditions enabling the security of their systems and/or +data to be ensured and, more generally, to use and operate it in the +same conditions as regards security. + +The fact that you are presently reading this means that you have had +knowledge of the CeCILL license and that you accept its terms. ++*/ +#include +#include "hdf5_array.hpp" +#include + +using namespace std; + +struct MyStruct +{ + int a; + double b; + char c; +}; + +struct MyStruct2 +{ + MyStruct base; + int d; +}; + +enum MyColors +{ + RED, GREEN, BLUE +}; + +CTOOL_STRUCT_TYPE(MyStruct, hdf5t_MyStruct, + ((int, a)) + ((double, b)) + ((char, c)) +) + +CTOOL_STRUCT_TYPE(MyStruct2, hdf5t_MyStruct2, + ((MyStruct, base)) + ((int, d)) +) + +CTOOL_ENUM_TYPE(MyColors, hdf5t_MyColors, + (RED) (GREEN) (BLUE) +) + +int main() +{ + typedef boost::multi_array array_type; + typedef boost::multi_array array3_type; + typedef boost::multi_array array_mys_type; + typedef boost::multi_array array_mys_color; + typedef boost::multi_array array_mys_bool; + typedef boost::multi_array array_mys2_type; + typedef boost::multi_array, 2> arrayc_type; + typedef array_type::index index; + + H5::H5File f("test.h5", H5F_ACC_TRUNC); + + H5::Group g = f.createGroup("test_group"); + + array_type A(boost::extents[2][3]); + array_type B, Bprime(boost::extents[1][2]); + array3_type C(boost::extents[2][3][4]); + arrayc_type D, E; + array_mys_type F(boost::extents[10]), G; + array_mys2_type H(boost::extents[10]); + array_mys_color I(boost::extents[2]); + array_mys_bool J(boost::extents[2]); + + I[0] = RED; + I[1] = BLUE; + J[0] = false; + J[1] = true; + + int values = 0; + for (index i = 0; i != 2; i++) + for (index j = 0; j != 3; j++) + A[i][j] = values++; + + for (index i = 0; i != 10; i++) + { + F[i].a = i; + F[i].b = double(i)/4.; + F[i].c = 'r'+i; + H[i].base = F[i]; + H[i].d = 2*i; + } + std::cout << " c = " << ((char *)&F[1])[offsetof(MyStruct, c)] << endl; + + CosmoTool::hdf5_write_array(g, "test_data", A); + CosmoTool::hdf5_write_array(g, "test_struct", F); + CosmoTool::hdf5_write_array(g, "test_struct2", H); + CosmoTool::hdf5_write_array(g, "colors", I); + CosmoTool::hdf5_write_array(g, "bools", J); + CosmoTool::hdf5_read_array(g, "test_data", B); + + int verify = 0; + for (index i = 0; i != 2; i++) + for (index j = 0; j != 3; j++) + if (B[i][j] != verify++) { + std::cout << "Invalid array content" << endl; + abort(); + } + + std::cout << "Testing C " << std::endl; + try + { + CosmoTool::hdf5_read_array(g, "test_data", C); + std::cout << "Did not throw InvalidDimensions" << endl; + abort(); + } + catch (const CosmoTool::InvalidDimensions&) + {} + + std::cout << "Testing Bprime " << std::endl; + try + { + CosmoTool::hdf5_read_array(g, "test_data", Bprime, false, true); + for (index i = 0; i != 1; i++) + for (index j = 0; j != 2; j++) + if (B[i][j] != Bprime[i][j]) { + std::cout << "Invalid array content in Bprime" << endl; + abort(); + } + } + catch (const CosmoTool::InvalidDimensions&) + { + std::cout << "Bad! Dimensions should be accepted" << std::endl; + abort(); + } + + D.resize(boost::extents[2][3]); + D = A; + + CosmoTool::hdf5_write_array(g, "test_data_c", D); + + CosmoTool::hdf5_read_array(g, "test_data_c", E); + + verify = 0; + for (index i = 0; i != 2; i++) + for (index j = 0; j != 3; j++) + if (E[i][j].real() != verify++) { + std::cout << "Invalid array content" << endl; + abort(); + } + + CosmoTool::hdf5_read_array(g, "test_struct", G); + for (index i = 0; i != 10; i++) + if (G[i].a != F[i].a || G[i].b != F[i].b || G[i].c != F[i].c) { + std::cout << "Invalid struct content" << endl; + abort(); + } + + return 0; +} diff --git a/external/cosmotool/src/fourier/fft/fftw_calls_mpi.hpp b/external/cosmotool/src/fourier/fft/fftw_calls_mpi.hpp new file mode 100644 index 0000000..1b14128 --- /dev/null +++ b/external/cosmotool/src/fourier/fft/fftw_calls_mpi.hpp @@ -0,0 +1,101 @@ +#ifndef __MPI_FFTW_UNIFIED_CALLS_HPP +#define __MPI_FFTW_UNIFIED_CALLS_HPP + +#include +#include +#include + +namespace CosmoTool +{ + +static inline void init_fftw_mpi() +{ + fftw_mpi_init(); +} + +static inline void done_fftw_mpi() +{ + fftw_mpi_cleanup(); +} + +template class FFTW_MPI_Calls {}; + + +#define FFTW_MPI_CALLS_BASE(rtype, prefix) \ + template<> \ +class FFTW_MPI_Calls { \ +public: \ + typedef rtype real_type; \ + typedef prefix ## _complex complex_type; \ + typedef prefix ## _plan plan_type; \ + \ + static complex_type *alloc_complex(size_t N) { return prefix ## _alloc_complex(N); } \ + static real_type *alloc_real(size_t N) { return prefix ## _alloc_real(N); } \ + static void free(void *p) { fftw_free(p); } \ +\ + static ptrdiff_t local_size_2d(ptrdiff_t N0, ptrdiff_t N1, MPI_Comm comm, \ + ptrdiff_t *local_n0, ptrdiff_t *local_0_start) { \ + return prefix ## _mpi_local_size_2d(N0, N1, comm, local_n0, local_0_start); \ + } \ +\ + static ptrdiff_t local_size_3d(ptrdiff_t N0, ptrdiff_t N1, ptrdiff_t N2, MPI_Comm comm, \ + ptrdiff_t *local_n0, ptrdiff_t *local_0_start) { \ + return prefix ## _mpi_local_size_3d(N0, N1, N2, comm, local_n0, local_0_start); \ + } \ +\ + static void execute(plan_type p) { prefix ## _execute(p); } \ + static void execute_r2c(plan_type p, real_type *in, complex_type *out) { prefix ## _mpi_execute_dft_r2c(p, in, out); } \ + static void execute_c2r(plan_type p, std::complex *in, real_type *out) { prefix ## _mpi_execute_dft_c2r(p, (complex_type*)in, out); } \ + static void execute_c2r(plan_type p, complex_type *in, real_type *out) { prefix ## _mpi_execute_dft_c2r(p, in, out); } \ + static void execute_r2c(plan_type p, real_type *in, std::complex *out) { prefix ## _mpi_execute_dft_r2c(p, in, (complex_type*)out); } \ +\ + static plan_type plan_dft_r2c_2d(int Nx, int Ny, \ + real_type *in, complex_type *out, \ + MPI_Comm comm, unsigned flags) \ + { \ + return prefix ## _mpi_plan_dft_r2c_2d(Nx, Ny, in, out, \ + comm, flags); \ + } \ + static plan_type plan_dft_c2r_2d(int Nx, int Ny, \ + complex_type *in, real_type *out, \ + MPI_Comm comm, unsigned flags) \ + { \ + return prefix ## _mpi_plan_dft_c2r_2d(Nx, Ny, in, out, \ + comm, flags); \ + } \ + static plan_type plan_dft_r2c_3d(int Nx, int Ny, int Nz, \ + real_type *in, complex_type *out, \ + MPI_Comm comm, unsigned flags) \ + { \ + return prefix ## _mpi_plan_dft_r2c_3d(Nx, Ny, Nz, in, out, comm, flags); \ + } \ + static plan_type plan_dft_c2r_3d(int Nx, int Ny, int Nz, \ + complex_type *in, real_type *out, \ + MPI_Comm comm, \ + unsigned flags) \ + { \ + return prefix ## _mpi_plan_dft_c2r_3d(Nx, Ny, Nz, in, out, comm, flags); \ + } \ +\ + static plan_type plan_dft_r2c(int rank, const ptrdiff_t *n, real_type *in, \ + complex_type *out, MPI_Comm comm, unsigned flags) \ + { \ + return prefix ## _mpi_plan_dft_r2c(rank, n, in, out, comm, flags); \ + } \ + static plan_type plan_dft_c2r(int rank, const ptrdiff_t *n, complex_type *in, \ + real_type *out, MPI_Comm comm, unsigned flags) \ + { \ + return prefix ## _mpi_plan_dft_c2r(rank, n, in, out, comm, flags); \ + } \ + static void destroy_plan(plan_type plan) { prefix ## _destroy_plan(plan); } \ +} + + +FFTW_MPI_CALLS_BASE(double, fftw); +FFTW_MPI_CALLS_BASE(float, fftwf); + +#undef FFTW_MPI_CALLS_BASE + +}; + +#endif diff --git a/external/cosmotool/src/fourier/fft/fftw_complex.hpp b/external/cosmotool/src/fourier/fft/fftw_complex.hpp new file mode 100644 index 0000000..af5d445 --- /dev/null +++ b/external/cosmotool/src/fourier/fft/fftw_complex.hpp @@ -0,0 +1,42 @@ +#ifndef __COSMOTOOL_FFT_COMPLEX_HPP +#define __COSMOTOOL_FFT_COMPLEX_HPP + +#include +#include + +namespace CosmoTool +{ + template + struct adapt_complex { + }; + + template<> struct adapt_complex { + typedef fftw_complex f_type; + typedef std::complex cpp_complex; + + static inline cpp_complex *adapt(f_type *a) { + return reinterpret_cast(a); + } + }; + + template<> struct adapt_complex { + typedef fftwf_complex f_type; + typedef std::complex cpp_complex; + + static inline cpp_complex *adapt(f_type *a) { + return reinterpret_cast(a); + } + }; + + template<> struct adapt_complex { + typedef fftwl_complex f_type; + typedef std::complex cpp_complex; + + static inline cpp_complex *adapt(f_type *a) { + return reinterpret_cast(a); + } + }; + +} + +#endif \ No newline at end of file diff --git a/external/cosmotool/src/hdf5_array.hpp b/external/cosmotool/src/hdf5_array.hpp new file mode 100644 index 0000000..2e73b9a --- /dev/null +++ b/external/cosmotool/src/hdf5_array.hpp @@ -0,0 +1,487 @@ +/*+ +This is CosmoTool (./src/hdf5_array.hpp) -- Copyright (C) Guilhem Lavaux (2007-2014) + +guilhem.lavaux@gmail.com + +This software is a computer program whose purpose is to provide a toolbox for cosmological +data analysis (e.g. filters, generalized Fourier transforms, power spectra, ...) + +This software is governed by the CeCILL license under French law and +abiding by the rules of distribution of free software. You can use, +modify and/ or redistribute the software under the terms of the CeCILL +license as circulated by CEA, CNRS and INRIA at the following URL +"http://www.cecill.info". + +As a counterpart to the access to the source code and rights to copy, +modify and redistribute granted by the license, users are provided only +with a limited warranty and the software's author, the holder of the +economic rights, and the successive licensors have only limited +liability. + +In this respect, the user's attention is drawn to the risks associated +with loading, using, modifying and/or developing or reproducing the +software by the user in light of its specific status of free software, +sthat may mean that it is complicated to manipulate, and that also +therefore means that it is reserved for developers and experienced +professionals having in-depth computer knowledge. Users are therefore +encouraged to load and test the software's suitability as regards their +requirements in conditions enabling the security of their systems and/or +data to be ensured and, more generally, to use and operate it in the +same conditions as regards security. + +The fact that you are presently reading this means that you have had +knowledge of the CeCILL license and that you accept its terms. ++*/ +#ifndef __COSMO_HDF5_ARRAY_HPP +#define __COSMO_HDF5_ARRAY_HPP + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace CosmoTool { +#if (H5_VERS_MAJOR == 1) && (H5_VERS_MINOR <= 8) + typedef H5::CommonFG H5_CommonFileGroup; +#else + typedef H5::Group H5_CommonFileGroup; +#endif + + //!_______________________________________________________________________________________ + //! + //! map types to HDF5 types + //! + //! + //! Leo Goodstadt (04 March 2013), improved with enable_if by Guilhem Lavaux (May 2014) + //!_______________________________________________________________________________________ + + template struct get_hdf5_data_type + { + static H5::DataType type() + { + BOOST_MPL_ASSERT_MSG(0, Unknown_HDF5_data_type, ()); + return H5::PredType::NATIVE_DOUBLE; + } + }; + + //, typename boost::enable_if >::type> \ + // + #define HDF5_TYPE(tl, thdf5) \ + template struct get_hdf5_data_type >::type > \ + { static H5::DataType type() { return H5::PredType::thdf5; }; } + + #define HDF5_SAFE_TYPE(tl, othertl, thdf5) \ + template struct get_hdf5_data_type::value \ + && !boost::is_same::value > \ + >::type \ + > \ + { static H5::DataType type() { return H5::PredType::thdf5; }; } + + + HDF5_SAFE_TYPE(long, int , NATIVE_LONG); + HDF5_SAFE_TYPE(unsigned long, unsigned int , NATIVE_ULONG); + HDF5_SAFE_TYPE(long long, long , NATIVE_LLONG); + HDF5_SAFE_TYPE(unsigned long long, unsigned long, NATIVE_ULLONG); + HDF5_TYPE(char , NATIVE_CHAR); + HDF5_TYPE(unsigned char , NATIVE_UCHAR); + HDF5_TYPE(int , NATIVE_INT); + HDF5_TYPE(unsigned int , NATIVE_UINT); + HDF5_TYPE(float , NATIVE_FLOAT); + HDF5_TYPE(double , NATIVE_DOUBLE); + + #undef HDF5_TYPE + #undef HDF5_SAFE_TYPE + + // Extent generator + template + struct hdf5_extent_gen { + typedef typename boost::detail::multi_array::extent_gen type; + + static inline type build(hsize_t *d) + { + return (hdf5_extent_gen::build(d))[d[r-1]]; + } + }; + + template<> + struct hdf5_extent_gen<0> { + static inline boost::multi_array_types::extent_gen build(hsize_t *d) + { + return boost::extents; + } + }; + + +//!_______________________________________________________________________________________ +//! +//! write_hdf5 multi_array +//! +//! \author Guilhem Lavaux (2014-2015) +//! \author leo Goodstadt (04 March 2013) +//! +//!_______________________________________________________________________________________ + template + void hdf5_write_array(H5_CommonFileGroup& fg, const std::string& data_set_name, + const ArrayType& data, + const hdf5_data_type& datatype, + const std::vector& dimensions, + bool doCreate = true, + bool useBases = false) + { + std::vector memdims(data.shape(), data.shape() + data.num_dimensions()); + H5::DataSpace dataspace(dimensions.size(), dimensions.data()); + H5::DataSpace memspace(memdims.size(), memdims.data()); + + if (useBases) { + std::vector offsets(data.index_bases(), data.index_bases() + data.num_dimensions()); + dataspace.selectHyperslab(H5S_SELECT_SET, memdims.data(), offsets.data()); + } + + H5::DataSet dataset; + if (doCreate) + dataset = fg.createDataSet(data_set_name, datatype, dataspace); + else + dataset = fg.openDataSet(data_set_name); + + dataset.write(data.data(), datatype, memspace, dataspace); + } + + + template + void hdf5_write_array(H5_CommonFileGroup& fg, const std::string& data_set_name, + const ArrayType& data, + const hdf5_data_type& datatype, + bool doCreate = true, + bool useBases = false) + { + std::vector dimensions(data.shape(), data.shape() + data.num_dimensions()); + hdf5_write_array(fg, data_set_name, data, datatype, dimensions, doCreate, useBases); + } + + /* HDF5 complex type */ + template + class hdf5_ComplexType + { + public: + H5::CompType type; + + hdf5_ComplexType() + : type(sizeof(std::complex)) + { + get_hdf5_data_type hdf_data_type; + type.insertMember("r", 0, hdf_data_type.type()); + type.insertMember("i", sizeof(T), hdf_data_type.type()); + type.pack(); + } + + static const hdf5_ComplexType *ctype() + { + static hdf5_ComplexType singleton; + + return &singleton; + } + }; + + template<> struct get_hdf5_data_type > { + static H5::DataType type() { + return hdf5_ComplexType::ctype()->type; + } + }; + + template<> struct get_hdf5_data_type > { + static H5::DataType type() { + return hdf5_ComplexType::ctype()->type; + } + }; + + class hdf5_StringType + { + public: + H5::StrType type; + + hdf5_StringType() + : type(0, H5T_VARIABLE) + { + } + + static const hdf5_StringType *ctype() + { + static hdf5_StringType singleton; + return &singleton; + } + }; + + template<> struct get_hdf5_data_type { + static H5::DataType type() { + return hdf5_StringType::ctype()->type; + } + }; + + class hdf5_BoolType + { + public: + H5::EnumType type; + + hdf5_BoolType() + : type(sizeof(bool)) + { + bool v; + + v = true; + type.insert("TRUE", &v); + v = false; + type.insert("FALSE", &v); + } + static const hdf5_BoolType *ctype() + { + static hdf5_BoolType singleton; + + return &singleton; + } + }; + + template<> struct get_hdf5_data_type { + static H5::DataType type() { + return hdf5_BoolType::ctype()->type; + } + }; + + template + void hdf5_write_array(H5_CommonFileGroup& fg, const std::string& data_set_name, const ArrayType& data ) + { + typedef typename ArrayType::element T; + get_hdf5_data_type hdf_data_type; + + hdf5_write_array(fg, data_set_name, data, hdf_data_type.type()); + } + + // HDF5 array reader + // + // Author Guilhem Lavaux (May 2014) + + class InvalidDimensions: virtual std::exception { + }; + + + // ---------------------------------------------------------------------- + // Conditional resize support + // If the Array type support resize then it is called. Otherwise + // the dimensions are checked and lead to a failure if they are different + + template class array_has_resize { + struct Fallback { int resize; }; + struct Derived: Array, Fallback {}; + + typedef char yes[1]; + typedef char no[2]; + + template struct Check; + + template + static yes& func(Check *); + + template + static no& func(...); + public: + typedef array_has_resize type; + enum { value = sizeof(func(0)) == sizeof(no) }; + }; + + + + template + typename boost::enable_if< + array_has_resize + >::type + hdf5_resize_array(ArrayType& data, std::vector& dims) { + data.resize( + hdf5_extent_gen::build(dims.data()) + ); + } + + template + void hdf5_check_array(ArrayType& data, std::vector& dims) { + for (size_t i = 0; i < data.num_dimensions(); i++) { + if (data.shape()[i] != dims[i]) { + throw InvalidDimensions(); + } + } + } + + template + void hdf5_weak_check_array(ArrayType& data, std::vector& dims) { + for (size_t i = 0; i < data.num_dimensions(); i++) { + if (data.index_bases()[i] < 0) { + // Negative indexes are not supported right now. + throw InvalidDimensions(); + } + if (data.index_bases()[i]+data.shape()[i] > dims[i]) { + throw InvalidDimensions(); + } + } + } + + + template + typename boost::disable_if< + array_has_resize + >::type + hdf5_resize_array(ArrayType& data, std::vector& dims) { + hdf5_check_array(data, dims); + } + + // ---------------------------------------------------------------------- + + template + void hdf5_read_array_typed(H5_CommonFileGroup& fg, const std::string& data_set_name, + ArrayType& data, + const hdf5_data_type& datatype, bool auto_resize = true, bool useBases = false) + { + H5::DataSet dataset = fg.openDataSet(data_set_name); + H5::DataSpace dataspace = dataset.getSpace(); + std::vector dimensions(data.num_dimensions()); + + if ((size_t)dataspace.getSimpleExtentNdims() != (size_t)data.num_dimensions()) + { + throw InvalidDimensions(); + } + + dataspace.getSimpleExtentDims(dimensions.data()); + if (auto_resize) + hdf5_resize_array(data, dimensions); + else { + if (useBases) { + hdf5_weak_check_array(data, dimensions); + + std::vector memdims(data.shape(), data.shape() + data.num_dimensions()); + H5::DataSpace memspace(memdims.size(), memdims.data()); + + std::vector offsets(data.index_bases(), data.index_bases() + data.num_dimensions()); + dataspace.selectHyperslab(H5S_SELECT_SET, memdims.data(), offsets.data()); + + dataset.read(data.data(), datatype, memspace, dataspace); + return; + } else { + hdf5_check_array(data, dimensions); + } + } + dataset.read(data.data(), datatype); + } + + template + void hdf5_read_array(H5_CommonFileGroup& fg, const std::string& data_set_name, ArrayType& data, bool auto_resize = true, + bool useBases = false ) + { + typedef typename ArrayType::element T; + + hdf5_read_array_typed(fg, data_set_name, data, get_hdf5_data_type::type(), auto_resize, useBases); + } + + +#define CTOOL_HDF5_NAME(STRUCT) BOOST_PP_CAT(hdf5_,STRUCT) +#define CTOOL_HDF5_INSERT_ELEMENT(r, STRUCT, element) \ + { \ + ::CosmoTool::get_hdf5_data_type t; \ + position = HOFFSET(STRUCT, BOOST_PP_TUPLE_ELEM(2, 1, element)); \ + const char *field_name = BOOST_PP_STRINGIZE(BOOST_PP_TUPLE_ELEM(2, 1, element)); \ + type.insertMember(field_name, position, t.type()); \ + } + +#define CTOOL_STRUCT_TYPE(STRUCT, TNAME, ATTRIBUTES) \ +namespace CosmoTool { \ + class TNAME { \ + public: \ + H5::CompType type; \ + \ + TNAME() : type(sizeof(STRUCT)) \ + { \ + long position; \ + BOOST_PP_SEQ_FOR_EACH(CTOOL_HDF5_INSERT_ELEMENT, STRUCT, ATTRIBUTES) \ + } \ + \ + static const TNAME *ctype() \ + { \ + static TNAME singleton; \ + return &singleton; \ + } \ + }; \ + template<> struct get_hdf5_data_type { \ + static H5::DataType type() { return TNAME::ctype()->type; }; \ + }; \ +}; + + +#define CTOOL_HDF5_INSERT_ENUM_ELEMENT(r, STRUCT, element) \ + { \ + const char *field_name = BOOST_PP_STRINGIZE(element); \ + STRUCT a = element; \ + type.insert(field_name, &a); \ + } + + +#define CTOOL_ENUM_TYPE(STRUCT, TNAME, ATTRIBUTES) \ +namespace CosmoTool { \ + class TNAME { \ + public: \ + H5::EnumType type; \ + \ + TNAME() : type(sizeof(STRUCT)) \ + { \ + long position; \ + BOOST_PP_SEQ_FOR_EACH(CTOOL_HDF5_INSERT_ENUM_ELEMENT, STRUCT, ATTRIBUTES) \ + } \ + \ + static const TNAME *ctype() \ + { \ + static TNAME singleton; \ + return &singleton; \ + } \ + }; \ + template<> struct get_hdf5_data_type { \ + static H5::DataType type() { return TNAME::ctype()->type; }; \ + }; \ +}; + +#define CTOOL_ARRAY_TYPE(ARRAY_TYPE, DIM, TNAME) \ +namespace CosmoTool { \ + class TNAME { \ + public: \ + H5::ArrayType *type; \ +\ + TNAME() \ + { \ + hsize_t dims[1] = { DIM }; \ + type = new H5::ArrayType(get_hdf5_data_type::type(), 1, dims); \ + } \ + ~TNAME() { delete type; } \ +\ + static const TNAME *ctype() \ + { \ + static TNAME singleton; \ + return &singleton; \ + } \ + }; \ +\ + template<> struct get_hdf5_data_type< ARRAY_TYPE[DIM] > { \ + static H5::DataType type() { return *(TNAME::ctype()->type); }; \ + }; \ +}; + +}; + +#endif + + diff --git a/external/cosmotool/src/octTree.tcc b/external/cosmotool/src/octTree.tcc new file mode 100644 index 0000000..e685ea2 --- /dev/null +++ b/external/cosmotool/src/octTree.tcc @@ -0,0 +1,217 @@ +/*+ +This is CosmoTool (./src/octTree.cpp) -- Copyright (C) Guilhem Lavaux (2007-2014) + +guilhem.lavaux@gmail.com + +This software is a computer program whose purpose is to provide a toolbox for cosmological +data analysis (e.g. filters, generalized Fourier transforms, power spectra, ...) + +This software is governed by the CeCILL license under French law and +abiding by the rules of distribution of free software. You can use, +modify and/ or redistribute the software under the terms of the CeCILL +license as circulated by CEA, CNRS and INRIA at the following URL +"http://www.cecill.info". + +As a counterpart to the access to the source code and rights to copy, +modify and redistribute granted by the license, users are provided only +with a limited warranty and the software's author, the holder of the +economic rights, and the successive licensors have only limited +liability. + +In this respect, the user's attention is drawn to the risks associated +with loading, using, modifying and/or developing or reproducing the +software by the user in light of its specific status of free software, +that may mean that it is complicated to manipulate, and that also +therefore means that it is reserved for developers and experienced +professionals having in-depth computer knowledge. Users are therefore +encouraged to load and test the software's suitability as regards their +requirements in conditions enabling the security of their systems and/or +data to be ensured and, more generally, to use and operate it in the +same conditions as regards security. + +The fact that you are presently reading this means that you have had +knowledge of the CeCILL license and that you accept its terms. ++*/ + +#include +#include +#include +#include "config.hpp" +#include "octTree.hpp" + +namespace CosmoTool { + +using namespace std; + +//#define VERBOSE + +static uint32_t mypow(uint32_t i, uint32_t p) +{ + if (p == 0) + return 1; + else if (p == 1) + return i; + + uint32_t k = p/2; + uint32_t j = mypow(i, k); + if (2*k==p) + return j*j; + else + return j*j*i; +} + +template +OctTree::OctTree(const FCoordinates *particles, octPtr numParticles, + uint32_t maxMeanTreeDepth, uint32_t maxAbsoluteDepth, + uint32_t threshold) +{ + cout << "MeanTree=" << maxMeanTreeDepth << endl; + numCells = mypow(8, maxMeanTreeDepth); + assert(numCells < invalidOctCell); + //#ifdef VERBOSE + cerr << "Allocating " << numCells << " octtree cells" << endl; + //#endif + + for (int j = 0; j < 3; j++) + xMin[j] = particles[0][j]; + + for (octPtr i = 1; i < numParticles; i++) + { + for (int j = 0; j < 3; j++) + { + if (particles[i][j] < xMin[j]) + xMin[j] = particles[i][j]; + } + } + + lenNorm = 0; + for (octPtr i = 0; i < numParticles; i++) + { + for (int j = 0; j < 3; j++) + { + float delta = particles[i][j]-xMin[j]; + if (delta > lenNorm) + lenNorm = delta; + } + } + cout << xMin[0] << " " << xMin[1] << " " << xMin[2] << " lNorm=" << lenNorm << endl; + + cells = new OctCell[numCells]; + Lbox = (float)(octCoordTypeNorm+1); + + cells[0].numberLeaves = 0; + for (int i = 0; i < 8; i++) + cells[0].children[i] = emptyOctCell; + + lastNode = 1; + this->particles = particles; + this->numParticles = numParticles; + buildTree(maxAbsoluteDepth); + //#ifdef VERBOSE + cerr << "Used " << lastNode << " cells" << endl; + //#endif +} + +template +OctTree::~OctTree() +{ + delete cells; +} + +template +void OctTree::buildTree(uint32_t maxAbsoluteDepth) +{ + for (octPtr i = 0; i < numParticles; i++) + { + OctCoords rootCenter = { octCoordCenter, octCoordCenter, octCoordCenter }; + insertParticle(0, // root node + rootCenter, + octCoordCenter, + i, + maxAbsoluteDepth); + } +} + + +template +void OctTree::insertParticle(octPtr node, + const OctCoords& icoord, + octCoordType halfNodeLength, + octPtr particleId, + uint32_t maxAbsoluteDepth) +{ + +#ifdef VERBOSE + cout << "Entering " << node << " (" << icoord[0] << "," << icoord[1] << "," << icoord[2] << ")" << endl; +#endif + int octPos = 0; + int ipos[3] = { 0,0,0}; + octPtr newNode; + OctCoords newCoord; + + cells[node].numberLeaves++; + if (maxAbsoluteDepth == 0) + { + // All children must be invalid. + for (int i = 0 ; i < 8; i++) + cells[node].children[i] = invalidOctCell; + + return; + } + + for (int j = 0; j < 3; j++) + { + float treePos = (particles[particleId][j]-xMin[j])*Lbox/lenNorm; + if ((octPtr)(treePos) > icoord[j]) + { + octPos |= (1 << j); + ipos[j] = 1; + } + } + + if (cells[node].children[octPos] == emptyOctCell) + { + // Put the particle there. + cells[node].children[octPos] = particleId | octParticleMarker; + return; + } + + // If it is a node, explores it. + if (!(cells[node].children[octPos] & octParticleMarker)) + { + assert(halfNodeLength >= 2); + // Compute coordinates + for (int j = 0; j < 3; j++) + newCoord[j] = icoord[j]+(2*ipos[j]-1)*halfNodeLength/2; + insertParticle(cells[node].children[octPos], newCoord, halfNodeLength/2, + particleId, maxAbsoluteDepth-1); + return; + } + + // We have a particle there. + // Make a new node and insert the old particle into this node. + // Insert the new particle into the node also + // Finally put the node in place + + newNode = lastNode++; + assert(lastNode != numCells); + + for (int j = 0; j < 8; j++) + cells[newNode].children[j] = emptyOctCell; + cells[newNode].numberLeaves = 0; + + // Compute coordinates + for (int j = 0; j < 3; j++) + newCoord[j] = icoord[j]+(2*ipos[j]-1)*halfNodeLength/2; + + octPtr oldPartId = cells[node].children[octPos] & octParticleMask; + + insertParticle(newNode, newCoord, halfNodeLength/2, + oldPartId, maxAbsoluteDepth-1); + insertParticle(newNode, newCoord, halfNodeLength/2, + particleId, maxAbsoluteDepth-1); + cells[node].children[octPos] = newNode; +} + + +}; diff --git a/external/cosmotool/src/openmp.hpp b/external/cosmotool/src/openmp.hpp new file mode 100644 index 0000000..7ef2c76 --- /dev/null +++ b/external/cosmotool/src/openmp.hpp @@ -0,0 +1,44 @@ +#ifndef __CTOOL_OPENMP_HPP +#define __CTOOL_OPENMP_HPP + +#ifdef _OPENMP +#include +#endif + +namespace CosmoTool { + + static int smp_get_max_threads() { +#ifdef _OPENMP + return omp_get_max_threads(); +#else + return 1; +#endif + } + + static int smp_get_thread_id() { +#ifdef _OPENMP + return omp_get_thread_num(); +#else + return 0; +#endif + } + + static int smp_get_num_threads() { +#ifdef _OPENMP + return omp_get_num_threads(); +#else + return 1; +#endif + + } + + static void smp_set_nested(bool n) { +#ifdef _OPENMP + omp_set_nested(n ? 1 : 0); +#endif + } + + +}; + +#endif diff --git a/external/cosmotool/src/symbol_visible.hpp b/external/cosmotool/src/symbol_visible.hpp new file mode 100644 index 0000000..d128144 --- /dev/null +++ b/external/cosmotool/src/symbol_visible.hpp @@ -0,0 +1,32 @@ +#ifndef __COSMOTOOL_SYMBOL_VISIBLE_HPP +#define __COSMOTOOL_SYMBOL_VISIBLE_HPP + + +#if defined _WIN32 || defined __CYGWIN__ + #ifdef BUILDING_DLL + #ifdef __GNUC__ + #define CTOOL_DLL_PUBLIC __attribute__ ((dllexport)) + #else + #define CTOOL_DLL_PUBLIC __declspec(dllexport) // Note: actually gcc seems to also supports this syntax. + #endif + #else + #ifdef __GNUC__ + #define CTOOL_DLL_PUBLIC __attribute__ ((dllimport)) + #else + #define CTOOL_DLL_PUBLIC __declspec(dllimport) // Note: actually gcc seems to also supports this syntax. + #endif + #endif + #define CTOOL_DLL_LOCAL +#else + #if __GNUC__ >= 4 + #define CTOOL_DLL_PUBLIC __attribute__ ((visibility ("default"))) + #define CTOOL_DLL_LOCAL __attribute__ ((visibility ("hidden"))) + #else + #define CTOOL_DLL_PUBLIC + #define CTOOL_DLL_LOCAL + #endif +#endif + + + +#endif diff --git a/external/cosmotool/src/tf_fit.hpp b/external/cosmotool/src/tf_fit.hpp new file mode 100644 index 0000000..affcb1d --- /dev/null +++ b/external/cosmotool/src/tf_fit.hpp @@ -0,0 +1,313 @@ +/* The following routines implement all of the fitting formulae in +Eisenstein \& Hu (1997) */ + +/* There are two sets of routines here. The first set, + + TFfit_hmpc(), TFset_parameters(), and TFfit_onek(), + +calculate the transfer function for an arbitrary CDM+baryon universe using +the fitting formula in Section 3 of the paper. The second set, + + TFsound_horizon_fit(), TFk_peak(), TFnowiggles(), and TFzerobaryon(), + +calculate other quantities given in Section 4 of the paper. */ + +#include +#include + +/* ------------------------ DRIVER ROUTINE --------------------------- */ +/* The following is an example of a driver routine you might use. */ +/* Basically, the driver routine needs to call TFset_parameters() to +set all the scalar parameters, and then call TFfit_onek() for each +wavenumber k you desire. */ + +/* While the routines use Mpc^-1 units internally, this driver has been +written to take an array of wavenumbers in units of h Mpc^-1. On the +other hand, if you want to use Mpc^-1 externally, you can do this by +altering the variables you pass to the driver: + omega0 -> omega0*hubble*hubble, hubble -> 1.0 */ + +/* INPUT: omega0 -- the matter density (baryons+CDM) in units of critical + f_baryon -- the ratio of baryon density to matter density + hubble -- the Hubble constant, in units of 100 km/s/Mpc + Tcmb -- the CMB temperature in Kelvin. T<=0 uses the COBE value 2.728. + numk -- the length of the following zero-offset array + k[] -- the array of wavevectors k[0..numk-1] */ + +/* INPUT/OUTPUT: There are three output arrays of transfer functions. +All are zero-offset and, if used, must have storage [0..numk-1] declared +in the calling program. However, if you substitute the NULL pointer for +one or more of the arrays, then that particular transfer function won't +be outputted. The transfer functions are: + + tf_full[] -- The full fitting formula, eq. (16), for the matter + transfer function. + tf_baryon[] -- The baryonic piece of the full fitting formula, eq. 21. + tf_cdm[] -- The CDM piece of the full fitting formula, eq. 17. */ + +/* Again, you can set these pointers to NULL in the function call if +you don't want a particular output. */ + +/* Various intermediate scalar quantities are stored in global variables, +so that you might more easily access them. However, this also means that +you would be better off not simply #include'ing this file in your programs, +but rather compiling it separately, calling only the driver, and using +extern declarations to access the intermediate quantities. */ + +/* ------------------------ FITTING FORMULAE ROUTINES ----------------- */ + +/* There are two routines here. TFset_parameters() sets all the scalar +parameters, while TFfit_onek() calculates the transfer function for a +given wavenumber k. TFfit_onek() may be called many times after a single +call to TFset_parameters() */ + +/* Global variables -- We've left many of the intermediate results as +global variables in case you wish to access them, e.g. by declaring +them as extern variables in your main program. */ +/* Note that all internal scales are in Mpc, without any Hubble constants! */ + +namespace CosmoTool { +struct TF_Transfer { + +float omhh, /* Omega_matter*h^2 */ + obhh, /* Omega_baryon*h^2 */ + theta_cmb, /* Tcmb in units of 2.7 K */ + z_equality, /* Redshift of matter-radiation equality, really 1+z */ + k_equality, /* Scale of equality, in Mpc^-1 */ + z_drag, /* Redshift of drag epoch */ + R_drag, /* Photon-baryon ratio at drag epoch */ + R_equality, /* Photon-baryon ratio at equality epoch */ + sound_horizon, /* Sound horizon at drag epoch, in Mpc */ + k_silk, /* Silk damping scale, in Mpc^-1 */ + alpha_c, /* CDM suppression */ + beta_c, /* CDM log shift */ + alpha_b, /* Baryon suppression */ + beta_b, /* Baryon envelope shift */ + beta_node, /* Sound horizon shift */ + k_peak, /* Fit to wavenumber of first peak, in Mpc^-1 */ + sound_horizon_fit, /* Fit to sound horizon, in Mpc */ + alpha_gamma; /* Gamma suppression in approximate TF */ + +/* Convenience from Numerical Recipes in C, 2nd edition */ + float sqrarg; +#define SQR(a) ((sqrarg=(a)) == 0.0 ? 0.0 : sqrarg*sqrarg) + float cubearg; +#define CUBE(a) ((cubearg=(a)) == 0.0 ? 0.0 : cubearg*cubearg*cubearg) + float pow4arg; +#define POW4(a) ((pow4arg=(a)) == 0.0 ? 0.0 : pow4arg*pow4arg*pow4arg*pow4arg) + /* Yes, I know the last one isn't optimal; it doesn't appear much */ + +void TFset_parameters(float omega0hh, float f_baryon, float Tcmb) +/* Set all the scalars quantities for Eisenstein & Hu 1997 fitting formula */ +/* Input: omega0hh -- The density of CDM and baryons, in units of critical dens, + multiplied by the square of the Hubble constant, in units + of 100 km/s/Mpc */ +/* f_baryon -- The fraction of baryons to CDM */ +/* Tcmb -- The temperature of the CMB in Kelvin. Tcmb<=0 forces use + of the COBE value of 2.728 K. */ +/* Output: Nothing, but set many global variables used in TFfit_onek(). +You can access them yourself, if you want. */ +/* Note: Units are always Mpc, never h^-1 Mpc. */ +{ + float z_drag_b1, z_drag_b2; + float alpha_c_a1, alpha_c_a2, beta_c_b1, beta_c_b2, alpha_b_G, y; + + if (f_baryon<=0.0 || omega0hh<=0.0) { + fprintf(stderr, "TFset_parameters(): Illegal input.\n"); + exit(1); + } + omhh = omega0hh; + obhh = omhh*f_baryon; + if (Tcmb<=0.0) Tcmb=2.728; /* COBE FIRAS */ + theta_cmb = Tcmb/2.7; + + z_equality = 2.50e4*omhh/POW4(theta_cmb); /* Really 1+z */ + k_equality = 0.0746*omhh/SQR(theta_cmb); + + z_drag_b1 = 0.313*pow(omhh,-0.419)*(1+0.607*pow(omhh,0.674)); + z_drag_b2 = 0.238*pow(omhh,0.223); + z_drag = 1291*pow(omhh,0.251)/(1+0.659*pow(omhh,0.828))* + (1+z_drag_b1*pow(obhh,z_drag_b2)); + + R_drag = 31.5*obhh/POW4(theta_cmb)*(1000/(1+z_drag)); + R_equality = 31.5*obhh/POW4(theta_cmb)*(1000/z_equality); + + sound_horizon = 2./3./k_equality*sqrt(6./R_equality)* + log((sqrt(1+R_drag)+sqrt(R_drag+R_equality))/(1+sqrt(R_equality))); + + k_silk = 1.6*pow(obhh,0.52)*pow(omhh,0.73)*(1+pow(10.4*omhh,-0.95)); + + alpha_c_a1 = pow(46.9*omhh,0.670)*(1+pow(32.1*omhh,-0.532)); + alpha_c_a2 = pow(12.0*omhh,0.424)*(1+pow(45.0*omhh,-0.582)); + alpha_c = pow(alpha_c_a1,-f_baryon)* + pow(alpha_c_a2,-CUBE(f_baryon)); + + beta_c_b1 = 0.944/(1+pow(458*omhh,-0.708)); + beta_c_b2 = pow(0.395*omhh, -0.0266); + beta_c = 1.0/(1+beta_c_b1*(pow(1-f_baryon, beta_c_b2)-1)); + + y = z_equality/(1+z_drag); + alpha_b_G = y*(-6.*sqrt(1+y)+(2.+3.*y)*log((sqrt(1+y)+1)/(sqrt(1+y)-1))); + alpha_b = 2.07*k_equality*sound_horizon*pow(1+R_drag,-0.75)*alpha_b_G; + + beta_node = 8.41*pow(omhh, 0.435); + beta_b = 0.5+f_baryon+(3.-2.*f_baryon)*sqrt(pow(17.2*omhh,2.0)+1); + + k_peak = 2.5*3.14159*(1+0.217*omhh)/sound_horizon; + sound_horizon_fit = 44.5*log(9.83/omhh)/sqrt(1+10.0*pow(obhh,0.75)); + + alpha_gamma = 1-0.328*log(431.0*omhh)*f_baryon + 0.38*log(22.3*omhh)* + SQR(f_baryon); + + return; +} + +float TFfit_onek(float k, float *tf_baryon, float *tf_cdm) +/* Input: k -- Wavenumber at which to calculate transfer function, in Mpc^-1. + *tf_baryon, *tf_cdm -- Input value not used; replaced on output if + the input was not NULL. */ +/* Output: Returns the value of the full transfer function fitting formula. + This is the form given in Section 3 of Eisenstein & Hu (1997). + *tf_baryon -- The baryonic contribution to the full fit. + *tf_cdm -- The CDM contribution to the full fit. */ +/* Notes: Units are Mpc, not h^-1 Mpc. */ +{ + float T_c_ln_beta, T_c_ln_nobeta, T_c_C_alpha, T_c_C_noalpha; + float q, xx, xx_tilde, q_eff; + float T_c_f, T_c, s_tilde, T_b_T0, T_b, f_baryon, T_full; + float T_0_L0, T_0_C0, T_0, gamma_eff; + float T_nowiggles_L0, T_nowiggles_C0, T_nowiggles; + + k = fabs(k); /* Just define negative k as positive */ + if (k==0.0) { + if (tf_baryon!=NULL) *tf_baryon = 1.0; + if (tf_cdm!=NULL) *tf_cdm = 1.0; + return 1.0; + } + + q = k/13.41/k_equality; + xx = k*sound_horizon; + + T_c_ln_beta = log(2.718282+1.8*beta_c*q); + T_c_ln_nobeta = log(2.718282+1.8*q); + T_c_C_alpha = 14.2/alpha_c + 386.0/(1+69.9*pow(q,1.08)); + T_c_C_noalpha = 14.2 + 386.0/(1+69.9*pow(q,1.08)); + + T_c_f = 1.0/(1.0+POW4(xx/5.4)); + T_c = T_c_f*T_c_ln_beta/(T_c_ln_beta+T_c_C_noalpha*SQR(q)) + + (1-T_c_f)*T_c_ln_beta/(T_c_ln_beta+T_c_C_alpha*SQR(q)); + + s_tilde = sound_horizon*pow(1+CUBE(beta_node/xx),-1./3.); + xx_tilde = k*s_tilde; + + T_b_T0 = T_c_ln_nobeta/(T_c_ln_nobeta+T_c_C_noalpha*SQR(q)); + T_b = sin(xx_tilde)/(xx_tilde)*(T_b_T0/(1+SQR(xx/5.2))+ + alpha_b/(1+CUBE(beta_b/xx))*exp(-pow(k/k_silk,1.4))); + + f_baryon = obhh/omhh; + T_full = f_baryon*T_b + (1-f_baryon)*T_c; + + /* Now to store these transfer functions */ + if (tf_baryon!=NULL) *tf_baryon = T_b; + if (tf_cdm!=NULL) *tf_cdm = T_c; + return T_full; +} + +/* ======================= Approximate forms =========================== */ + +float TFsound_horizon_fit(float omega0, float f_baryon, float hubble) +/* Input: omega0 -- CDM density, in units of critical density + f_baryon -- Baryon fraction, the ratio of baryon to CDM density. + hubble -- Hubble constant, in units of 100 km/s/Mpc +/* Output: The approximate value of the sound horizon, in h^-1 Mpc. */ +/* Note: If you prefer to have the answer in units of Mpc, use hubble -> 1 +and omega0 -> omega0*hubble^2. */ +{ + float omhh, sound_horizon_fit_mpc; + omhh = omega0*hubble*hubble; + sound_horizon_fit_mpc = + 44.5*log(9.83/omhh)/sqrt(1+10.0*pow(omhh*f_baryon,0.75)); + return sound_horizon_fit_mpc*hubble; +} + +float TFk_peak(float omega0, float f_baryon, float hubble) +/* Input: omega0 -- CDM density, in units of critical density + f_baryon -- Baryon fraction, the ratio of baryon to CDM density. + hubble -- Hubble constant, in units of 100 km/s/Mpc +/* Output: The approximate location of the first baryonic peak, in h Mpc^-1 */ +/* Note: If you prefer to have the answer in units of Mpc^-1, use hubble -> 1 +and omega0 -> omega0*hubble^2. */ +{ + float omhh, k_peak_mpc; + omhh = omega0*hubble*hubble; + k_peak_mpc = 2.5*3.14159*(1+0.217*omhh)/TFsound_horizon_fit(omhh,f_baryon,1.0); + return k_peak_mpc/hubble; +} + +float TFnowiggles(float omega0, float f_baryon, float hubble, + float Tcmb, float k_hmpc) +/* Input: omega0 -- CDM density, in units of critical density + f_baryon -- Baryon fraction, the ratio of baryon to CDM density. + hubble -- Hubble constant, in units of 100 km/s/Mpc + Tcmb -- Temperature of the CMB in Kelvin; Tcmb<=0 forces use of + COBE FIRAS value of 2.728 K + k_hmpc -- Wavenumber in units of (h Mpc^-1). */ +/* Output: The value of an approximate transfer function that captures the +non-oscillatory part of a partial baryon transfer function. In other words, +the baryon oscillations are left out, but the suppression of power below +the sound horizon is included. See equations (30) and (31). */ +/* Note: If you prefer to use wavenumbers in units of Mpc^-1, use hubble -> 1 +and omega0 -> omega0*hubble^2. */ +{ + float k, omhh, theta_cmb, k_equality, q, xx, alpha_gamma, gamma_eff; + float q_eff, T_nowiggles_L0, T_nowiggles_C0; + + k = k_hmpc*hubble; /* Convert to Mpc^-1 */ + omhh = omega0*hubble*hubble; + if (Tcmb<=0.0) Tcmb=2.728; /* COBE FIRAS */ + theta_cmb = Tcmb/2.7; + + k_equality = 0.0746*omhh/SQR(theta_cmb); + q = k/13.41/k_equality; + xx = k*TFsound_horizon_fit(omhh, f_baryon, 1.0); + + alpha_gamma = 1-0.328*log(431.0*omhh)*f_baryon + 0.38*log(22.3*omhh)* + SQR(f_baryon); + gamma_eff = omhh*(alpha_gamma+(1-alpha_gamma)/(1+POW4(0.43*xx))); + q_eff = q*omhh/gamma_eff; + + T_nowiggles_L0 = log(2.0*2.718282+1.8*q_eff); + T_nowiggles_C0 = 14.2 + 731.0/(1+62.5*q_eff); + return T_nowiggles_L0/(T_nowiggles_L0+T_nowiggles_C0*SQR(q_eff)); +} + +/* ======================= Zero Baryon Formula =========================== */ + +float TFzerobaryon(float omega0, float hubble, float Tcmb, float k_hmpc) +/* Input: omega0 -- CDM density, in units of critical density + hubble -- Hubble constant, in units of 100 km/s/Mpc + Tcmb -- Temperature of the CMB in Kelvin; Tcmb<=0 forces use of + COBE FIRAS value of 2.728 K + k_hmpc -- Wavenumber in units of (h Mpc^-1). */ +/* Output: The value of the transfer function for a zero-baryon universe. */ +/* Note: If you prefer to use wavenumbers in units of Mpc^-1, use hubble -> 1 +and omega0 -> omega0*hubble^2. */ +{ + float k, omhh, theta_cmb, k_equality, q, T_0_L0, T_0_C0; + + k = k_hmpc*hubble; /* Convert to Mpc^-1 */ + omhh = omega0*hubble*hubble; + if (Tcmb<=0.0) Tcmb=2.728; /* COBE FIRAS */ + theta_cmb = Tcmb/2.7; + + k_equality = 0.0746*omhh/SQR(theta_cmb); + q = k/13.41/k_equality; + + T_0_L0 = log(2.0*2.718282+1.8*q); + T_0_C0 = 14.2 + 731.0/(1+62.5*q); + return T_0_L0/(T_0_L0+T_0_C0*q*q); +} + +}; + +};