mirror of
https://github.com/DifferentiableUniverseInitiative/JaxPM.git
synced 2025-05-14 12:01:12 +00:00
jaxdecomp proto (#21)
* adding example of distributed solution * put back old functgion * update formatting * add halo exchange and slice pad * apply formatting * implement distributed optimized cic_paint * Use new cic_paint with halo * Fix seed for distributed normal * Wrap interpolation function to avoid all gather * Return normal order frequencies for single GPU * add example * format * add optimised bench script * times in ms * add lpt2 * update benchmark and add slurm * Visualize only final field * Update scripts/distributed_pm.py Co-authored-by: Francois Lanusse <EiffL@users.noreply.github.com> * Adjust pencil type for frequencies * fix painting issue with slabs * Shared operation in fourrier space now take inverted sharding axis for slabs * add assert to make pyright happy * adjust test for hpc-plotter * add PMWD test * bench * format * added github workflow * fix formatting from main * Update for jaxDecomp pure JAX * revert single halo extent change * update for latest jaxDecomp * remove fourrier_space in autoshmap * make normal_field work with single controller * format * make distributed pm work in single controller * merge bench_pm * update to leapfrog * add a strict dependency on jaxdecomp * global mesh no longer needed * kernels.py no longer uses global mesh * quick fix in distributed * pm.py no longer uses global mesh * painting.py no longer uses global mesh * update demo script * quick fix in kernels * quick fix in distributed * update demo * merge hugos LPT2 code * format * Small fix * format * remove duplicate get_ode_fn * update visualizer * update compensate CIC * By default check_rep is false for shard_map * remove experimental distributed code * update PGDCorrection and neural ode to use new fft3d * jaxDecomp pfft3d promotes to complex automatically * remove deprecated stuff * fix painting issue with read_cic * use jnp interp instead of jc interp * delete old slurms * add notebook examples * apply formatting * add distributed zeros * fix code in LPT2 * jit cic_paint * update notebooks * apply formating * get local shape and zeros can be used by users * add a user facing function to create uniform particle grid * use jax interp instead of jax_cosmo * use float64 for enmeshing * Allow applying weights with relative cic paint * Weights can be traced * remove script folder * update example notebooks * delete outdated design file * add readme for tutorials * update readme * fix small error * forgot particles in multi host * clarifying why cic_paint_dx is slower * clarifying the halo size dependence on the box size * ability to choose snapshots number with MultiHost script * Adding animation notebook * Put plotting in package * Add finite difference laplace kernel + powerspec functions from Hugo Co-authored-by: Hugo Simonfroy <hugo.simonfroy@gmail.com> * Put plotting utils in package * By default use absoulute painting with * update code * update notebooks * add tests * Upgrade setup.py to pyproject * Format * format tests * update test dependencies * add test workflow * fix deprecated FftType in jaxpm.kernels * Add aboucaud comments * JAX version is 0.4.35 until Diffrax new release * add numpy explicitly as dependency for tests * fix install order for tests * add numpy to be installed * enforce no build isolation for fastpm * pip install jaxpm test without build isolation * bump jaxdecomp version * revert test workflow * remove outdated tests --------- Co-authored-by: EiffL <fr.eiffel@gmail.com> Co-authored-by: Francois Lanusse <EiffL@users.noreply.github.com> Co-authored-by: Wassim KABALAN <wassim@apc.in2p3.fr> Co-authored-by: Hugo Simonfroy <hugo.simonfroy@gmail.com> Former-commit-id: 8c2e823d4669eac712089bf7f85ffb7912e8232d
This commit is contained in:
parent
a0a79277e5
commit
df8602b318
26 changed files with 1871 additions and 434 deletions
175
tests/conftest.py
Normal file
175
tests/conftest.py
Normal file
|
@ -0,0 +1,175 @@
|
|||
# Parameterized fixture for mesh_shape
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
os.environ["EQX_ON_ERROR"] = "nan"
|
||||
setup_done = False
|
||||
on_cluster = False
|
||||
|
||||
|
||||
def is_on_cluster():
|
||||
global on_cluster
|
||||
return on_cluster
|
||||
|
||||
|
||||
def initialize_distributed():
|
||||
global setup_done
|
||||
global on_cluster
|
||||
if not setup_done:
|
||||
if "SLURM_JOB_ID" in os.environ:
|
||||
on_cluster = True
|
||||
print("Running on cluster")
|
||||
import jax
|
||||
jax.distributed.initialize()
|
||||
setup_done = True
|
||||
on_cluster = True
|
||||
else:
|
||||
print("Running locally")
|
||||
setup_done = True
|
||||
on_cluster = False
|
||||
os.environ["JAX_PLATFORM_NAME"] = "cpu"
|
||||
os.environ[
|
||||
"XLA_FLAGS"] = "--xla_force_host_platform_device_count=8"
|
||||
import jax
|
||||
|
||||
|
||||
@pytest.fixture(
|
||||
scope="session",
|
||||
params=[
|
||||
((32, 32, 32), (256., 256., 256.)), # BOX
|
||||
((32, 32, 64), (256., 256., 512.)), # RECTANGULAR
|
||||
])
|
||||
def simulation_config(request):
|
||||
return request.param
|
||||
|
||||
|
||||
@pytest.fixture(scope="session", params=[0.1, 0.5, 0.8])
|
||||
def lpt_scale_factor(request):
|
||||
return request.param
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def cosmo():
|
||||
from functools import partial
|
||||
|
||||
from jax_cosmo import Cosmology
|
||||
Planck18 = partial(
|
||||
Cosmology,
|
||||
# Omega_m = 0.3111
|
||||
Omega_c=0.2607,
|
||||
Omega_b=0.0490,
|
||||
Omega_k=0.0,
|
||||
h=0.6766,
|
||||
n_s=0.9665,
|
||||
sigma8=0.8102,
|
||||
w0=-1.0,
|
||||
wa=0.0,
|
||||
)
|
||||
|
||||
return Planck18()
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def particle_mesh(simulation_config):
|
||||
from pmesh.pm import ParticleMesh
|
||||
mesh_shape, box_shape = simulation_config
|
||||
return ParticleMesh(BoxSize=box_shape, Nmesh=mesh_shape, dtype='f4')
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def fpm_initial_conditions(cosmo, particle_mesh):
|
||||
import jax_cosmo as jc
|
||||
import numpy as np
|
||||
from jax import numpy as jnp
|
||||
|
||||
# Generate initial particle positions
|
||||
grid = particle_mesh.generate_uniform_particle_grid(shift=0).astype(
|
||||
np.float32)
|
||||
# Interpolate with linear_matter spectrum to get initial density field
|
||||
k = jnp.logspace(-4, 1, 128)
|
||||
pk = jc.power.linear_matter_power(cosmo, k)
|
||||
|
||||
def pk_fn(x):
|
||||
return jnp.interp(x.reshape([-1]), k, pk).reshape(x.shape)
|
||||
|
||||
whitec = particle_mesh.generate_whitenoise(42,
|
||||
type='complex',
|
||||
unitary=False)
|
||||
lineark = whitec.apply(lambda k, v: pk_fn(sum(ki**2 for ki in k)**0.5)**0.5
|
||||
* v * (1 / v.BoxSize).prod()**0.5)
|
||||
init_mesh = lineark.c2r().value # XXX
|
||||
|
||||
return lineark, grid, init_mesh
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def initial_conditions(fpm_initial_conditions):
|
||||
_, _, init_mesh = fpm_initial_conditions
|
||||
return init_mesh
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def solver(cosmo, particle_mesh):
|
||||
from fastpm.core import Cosmology as FastPMCosmology
|
||||
from fastpm.core import Solver
|
||||
ref_cosmo = FastPMCosmology(cosmo)
|
||||
return Solver(particle_mesh, ref_cosmo, B=1)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def fpm_lpt1(solver, fpm_initial_conditions, lpt_scale_factor):
|
||||
|
||||
lineark, grid, _ = fpm_initial_conditions
|
||||
statelpt = solver.lpt(lineark, grid, lpt_scale_factor, order=1)
|
||||
return statelpt
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def fpm_lpt1_field(fpm_lpt1, particle_mesh):
|
||||
return particle_mesh.paint(fpm_lpt1.X).value
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def fpm_lpt2(solver, fpm_initial_conditions, lpt_scale_factor):
|
||||
|
||||
lineark, grid, _ = fpm_initial_conditions
|
||||
statelpt = solver.lpt(lineark, grid, lpt_scale_factor, order=2)
|
||||
return statelpt
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def fpm_lpt2_field(fpm_lpt2, particle_mesh):
|
||||
return particle_mesh.paint(fpm_lpt2.X).value
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def nbody_from_lpt1(solver, fpm_lpt1, particle_mesh, lpt_scale_factor):
|
||||
import numpy as np
|
||||
from fastpm.core import leapfrog
|
||||
|
||||
if lpt_scale_factor == 0.8:
|
||||
pytest.skip("Do not run nbody simulation from scale factor 0.8")
|
||||
|
||||
stages = np.linspace(lpt_scale_factor, 1.0, 10, endpoint=True)
|
||||
|
||||
finalstate = solver.nbody(fpm_lpt1, leapfrog(stages))
|
||||
fpm_mesh = particle_mesh.paint(finalstate.X).value
|
||||
|
||||
return fpm_mesh
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def nbody_from_lpt2(solver, fpm_lpt2, particle_mesh, lpt_scale_factor):
|
||||
import numpy as np
|
||||
from fastpm.core import leapfrog
|
||||
|
||||
if lpt_scale_factor == 0.8:
|
||||
pytest.skip("Do not run nbody simulation from scale factor 0.8")
|
||||
|
||||
stages = np.linspace(lpt_scale_factor, 1.0, 10, endpoint=True)
|
||||
|
||||
finalstate = solver.nbody(fpm_lpt2, leapfrog(stages))
|
||||
fpm_mesh = particle_mesh.paint(finalstate.X).value
|
||||
|
||||
return fpm_mesh
|
Loading…
Add table
Add a link
Reference in a new issue