remove script folder

This commit is contained in:
Wassim KABALAN 2024-10-30 01:58:17 +01:00
parent e9529d35f8
commit 4da4c66472
4 changed files with 0 additions and 656 deletions

View file

@ -1,123 +0,0 @@
import os
from distributed_utils import initialize_distributed, is_on_cluster
os.environ["EQX_ON_ERROR"] = "nan" # avoid an allgather caused by diffrax
initialize_distributed()
import jax
size = jax.device_count()
import jax.numpy as jnp
import jax_cosmo as jc
import numpy as np
from diffrax import (ConstantStepSize, Dopri5, LeapfrogMidpoint, ODETerm,
SaveAt, diffeqsolve)
from jax.experimental import mesh_utils
from jax.experimental.multihost_utils import process_allgather
from jax.sharding import Mesh, NamedSharding
from jax.sharding import PartitionSpec as P
from jaxpm.kernels import interpolate_power_spectrum
from jaxpm.painting import cic_paint_dx
from jaxpm.pm import linear_field, lpt, make_ode_fn
size = 256
mesh_shape = [size] * 3
box_size = [float(size)] * 3
snapshots = jnp.linspace(0.1, 1., 4)
halo_size = 32
pdims = (1, 1)
mesh = None
sharding = None
if jax.device_count() > 1:
pdims = (2, 2)
devices = mesh_utils.create_device_mesh(pdims)
mesh = Mesh(devices.T, axis_names=('x', 'y'))
sharding = NamedSharding(mesh, P('x', 'y'))
@jax.jit
def run_simulation(omega_c, sigma8):
# Create a small function to generate the matter power spectrum
k = jnp.logspace(-4, 1, 128)
pk = jc.power.linear_matter_power(
jc.Planck15(Omega_c=omega_c, sigma8=sigma8), k)
pk_fn = lambda x: interpolate_power_spectrum(x, k, pk, sharding)
# Create initial conditions
initial_conditions = linear_field(mesh_shape,
box_size,
pk_fn,
sharding=sharding,
seed=jax.random.PRNGKey(0))
cosmo = jc.Planck15(Omega_c=omega_c, sigma8=sigma8)
# Initial displacement
dx, p, _ = lpt(cosmo,
initial_conditions,
0.1,
halo_size=halo_size,
sharding=sharding)
# return initial_conditions, cic_paint_dx(dx,
# halo_size=halo_size,
# sharding=sharding), None, None
# Evolve the simulation forward
ode_fn = make_ode_fn(mesh_shape, halo_size=halo_size, sharding=sharding)
term = ODETerm(
lambda t, state, args: jnp.stack(ode_fn(state, t, args), axis=0))
solver = LeapfrogMidpoint()
stepsize_controller = ConstantStepSize()
res = diffeqsolve(term,
solver,
t0=0.1,
t1=1.,
dt0=0.01,
y0=jnp.stack([dx, p], axis=0),
args=cosmo,
saveat=SaveAt(ts=snapshots),
stepsize_controller=stepsize_controller)
# Return the simulation volume at requested
states = res.ys
field = cic_paint_dx(dx, halo_size=halo_size, sharding=sharding)
final_fields = [
cic_paint_dx(state[0], halo_size=halo_size, sharding=sharding)
for state in states
]
return initial_conditions, field, final_fields, res.stats
# Run the simulation
distributed_str = "distributed" if mesh is not None else "single device"
print(f"running {distributed_str} simulation")
init, field, final_fields, stats = run_simulation(0.32, 0.8)
# # Print the statistics
print(stats)
print(f"done now saving")
if is_on_cluster():
rank = jax.process_index()
# # save the final state
np.save(f'initial_conditions_{rank}.npy', init.addressable_data(0))
np.save(f'field_{rank}.npy', field.addressable_data(0))
if final_fields is not None:
for i, final_field in enumerate(final_fields):
np.save(f'final_field_{i}_{rank}.npy',
final_field.addressable_data(0))
else:
gathered_init = process_allgather(init, tiled=True)
gathered_field = process_allgather(field, tiled=True)
np.save(f'initial_conditions.npy', gathered_init)
np.save(f'field.npy', gathered_field)
if final_fields is not None:
for i, final_field in enumerate(final_fields):
gathered_final_field = process_allgather(final_field, tiled=True)
np.save(f'final_field_{i}.npy', gathered_final_field)
print(f"Finished!!")

View file

@ -1,144 +0,0 @@
import os
from math import prod
setup_done = False
on_cluster = False
def is_on_cluster():
global on_cluster
return on_cluster
def initialize_distributed():
global setup_done
global on_cluster
if not setup_done:
if "SLURM_JOB_ID" in os.environ:
on_cluster = True
print("Running on cluster")
import jax
jax.distributed.initialize()
setup_done = True
on_cluster = True
else:
print("Running locally")
setup_done = True
on_cluster = False
os.environ["JAX_PLATFORM_NAME"] = "cpu"
os.environ[
"XLA_FLAGS"] = "--xla_force_host_platform_device_count=4"
import jax
def compare_sharding(sharding1, sharding2):
from jaxdecomp._src.spmd_ops import get_pdims_from_sharding
pdims1 = get_pdims_from_sharding(sharding1)
pdims2 = get_pdims_from_sharding(sharding2)
pdims1 = pdims1 + (1, ) * (3 - len(pdims1))
pdims2 = pdims2 + (1, ) * (3 - len(pdims2))
return pdims1 == pdims2
def replace_none_or_zero(value):
# Replace None or 0 with 1
return 0 if value is None else value
def process_slices(slices_tuple):
start_product = 1
stop_product = 1
for s in slices_tuple:
# Multiply the start and stop values, replacing None/0 with 1
start_product *= replace_none_or_zero(s.start)
stop_product *= replace_none_or_zero(s.stop)
# Return the sum of the two products
return int(start_product + stop_product)
def device_arange(pdims):
import jax
from jax import numpy as jnp
from jax.experimental import mesh_utils
from jax.sharding import Mesh, NamedSharding
from jax.sharding import PartitionSpec as P
devices = mesh_utils.create_device_mesh(pdims)
mesh = Mesh(devices.T, axis_names=('z', 'y'))
sharding = NamedSharding(mesh, P('z', 'y'))
def generate_aranged(x):
x_start = replace_none_or_zero(x[0].start)
y_start = replace_none_or_zero(x[1].start)
a = jnp.array([[x_start + y_start * pdims[0]]])
print(f"index is {x} and value is {a}")
return a
aranged = jax.make_array_from_callback(mesh.devices.shape,
sharding,
data_callback=generate_aranged)
return aranged
def create_ones_spmd_array(global_shape, pdims):
import jax
from jax.experimental import mesh_utils
from jax.sharding import Mesh, NamedSharding
from jax.sharding import PartitionSpec as P
size = jax.device_count()
assert (len(global_shape) == 3)
assert (len(pdims) == 2)
assert (
prod(pdims) == size
), "The product of pdims must be equal to the number of MPI processes"
local_shape = (global_shape[0] // pdims[1], global_shape[1] // pdims[0],
global_shape[2])
# Remap to the global array from the local slice
devices = mesh_utils.create_device_mesh(pdims)
mesh = Mesh(devices.T, axis_names=('z', 'y'))
sharding = NamedSharding(mesh, P('z', 'y'))
global_array = jax.make_array_from_callback(
global_shape,
sharding,
data_callback=lambda _: jax.numpy.ones(local_shape))
return global_array, mesh
# Helper function to create a 3D array and remap it to the global array
def create_spmd_array(global_shape, pdims):
import jax
from jax.experimental import mesh_utils
from jax.sharding import Mesh, NamedSharding
from jax.sharding import PartitionSpec as P
size = jax.device_count()
assert (len(global_shape) == 3)
assert (len(pdims) == 2)
assert (
prod(pdims) == size
), "The product of pdims must be equal to the number of MPI processes"
local_shape = (global_shape[0] // pdims[1], global_shape[1] // pdims[0],
global_shape[2])
# Remap to the global array from the local slicei
devices = mesh_utils.create_device_mesh(pdims)
mesh = Mesh(devices.T, axis_names=('z', 'y'))
sharding = NamedSharding(mesh, P('z', 'y'))
global_array = jax.make_array_from_callback(
global_shape,
sharding,
data_callback=lambda x: jax.random.normal(
jax.random.PRNGKey(process_slices(x)), local_shape))
return global_array, mesh

File diff suppressed because one or more lines are too long

View file

@ -1,168 +0,0 @@
#!/bin/bash
##########################################
## SELECT EITHER tkc@a100 OR tkc@v100 ##
##########################################
#SBATCH --account tkc@a100
##########################################
#SBATCH --job-name=Particle-Mesh # nom du job
# Il est possible d'utiliser une autre partition que celle par default
# en activant l'une des 5 directives suivantes :
##########################################
## SELECT EITHER a100 or v100-32g ##
##########################################
#SBATCH -C a100
##########################################
#******************************************
##########################################
## SELECT Number of nodes and GPUs per node
## For A100 ntasks-per-node and gres=gpu should be 8
## For V100 ntasks-per-node and gres=gpu should be 4
##########################################
#SBATCH --nodes=1 # nombre de noeud
#SBATCH --ntasks-per-node=8 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
#SBATCH --gres=gpu:8 # nombre de GPU par nœud (max 8 avec gpu_p2, gpu_p5)
##########################################
## Le nombre de CPU par tache doit etre adapte en fonction de la partition utilisee. Sachant
## qu'ici on ne reserve qu'un seul GPU par tache (soit 1/4 ou 1/8 des GPU du noeud suivant
## la partition), l'ideal est de reserver 1/4 ou 1/8 des CPU du noeud pour chaque tache:
##########################################
#SBATCH --cpus-per-task=8 # nombre de CPU par tache pour gpu_p5 (1/8 du noeud 8-GPU)
##########################################
# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
#SBATCH --hint=nomultithread # hyperthreading desactive
#SBATCH --time=04:00:00 # temps d'execution maximum demande (HH:MM:SS)
#SBATCH --output=%x_%N_a100.out # nom du fichier de sortie
#SBATCH --error=%x_%N_a100.out # nom du fichier d'erreur (ici commun avec la sortie)
#SBATCH --qos=qos_gpu-dev
#SBATCH --exclusive # ressources dediees
# Nettoyage des modules charges en interactif et herites par defaut
num_nodes=$SLURM_JOB_NUM_NODES
num_gpu_per_node=$SLURM_NTASKS_PER_NODE
OUTPUT_FOLDER_ARGS=1
# Calculate the number of GPUs
nb_gpus=$(( num_nodes * num_gpu_per_node))
module purge
# Decommenter la commande module suivante si vous utilisez la partition "gpu_p5"
# pour avoir acces aux modules compatibles avec cette partition
if [ $num_gpu_per_node -eq 8 ]; then
module load cpuarch/amd
source /gpfsdswork/projects/rech/tkc/commun/venv/a100/bin/activate
else
source /gpfsdswork/projects/rech/tkc/commun/venv/v100/bin/activate
fi
# Chargement des modules
module load nvidia-compilers/23.9 cuda/12.2.0 cudnn/8.9.7.29-cuda openmpi/4.1.5-cuda nccl/2.18.5-1-cuda cmake
module load nvidia-nsight-systems/2024.1.1.59
echo "The number of nodes allocated for this job is: $num_nodes"
echo "The number of GPUs allocated for this job is: $nb_gpus"
export ENABLE_PERFO_STEP=NVTX
export MPI4JAX_USE_CUDA_MPI=1
function profile_python() {
if [ $# -lt 1 ]; then
echo "Usage: profile_python <python_script> [arguments for the script]"
return 1
fi
local script_name=$(basename "$1" .py)
local output_dir="prof_traces/$script_name"
local report_dir="out_prof/$gpu_name/$nb_gpus/$script_name"
if [ $OUTPUT_FOLDER_ARGS -eq 1 ]; then
local args=$(echo "${@:2}" | tr ' ' '_')
# Remove characters '/' and '-' from folder name
args=$(echo "$args" | tr -d '/-')
output_dir="prof_traces/$script_name/$args"
report_dir="out_prof/$gpu_name/$nb_gpus/$script_name/$args"
fi
mkdir -p "$output_dir"
mkdir -p "$report_dir"
srun nsys profile -t cuda,nvtx,osrt,mpi -o "$report_dir/report_rank%q{SLURM_PROCID}" python "$@" > "$output_dir/$script_name.out" 2> "$output_dir/$script_name.err" || true
}
function run_python() {
if [ $# -lt 1 ]; then
echo "Usage: run_python <python_script> [arguments for the script]"
return 1
fi
local script_name=$(basename "$1" .py)
local output_dir="traces/$script_name"
if [ $OUTPUT_FOLDER_ARGS -eq 1 ]; then
local args=$(echo "${@:2}" | tr ' ' '_')
# Remove characters '/' and '-' from folder name
args=$(echo "$args" | tr -d '/-')
output_dir="traces/$script_name/$args"
fi
mkdir -p "$output_dir"
srun python "$@" > "$output_dir/$script_name.out" 2> "$output_dir/$script_name.err" || true
}
# Echo des commandes lancees
set -x
# Pour la partition "gpu_p5", le code doit etre compile avec les modules compatibles
# Execution du code avec binding via bind_gpu.sh : 1 GPU par tache
declare -A pdims_table
# Define the table
pdims_table[1]="1x1"
pdims_table[4]="2x2 1x4"
pdims_table[8]="2x4 1x8"
pdims_table[16]="2x8 1x16"
pdims_table[32]="4x8 1x32"
pdims_table[64]="4x16 1x64"
pdims_table[128]="8x16 16x8 4x32 32x4 1x128 128x1 2x64 64x2"
pdims_table[160]="8x20 20x8 16x10 10x16 5x32 32x5 1x160 160x1 2x80 80x2 4x40 40x4"
#mpch=(128 256 512 1024 2048 4096)
grid=(1024 2048 4096)
pdim="${pdims_table[$nb_gpus]}"
echo "pdims: $pdim"
# Check if pdims is not empty
if [ -z "$pdim" ]; then
echo "pdims is empty"
echo "Number of gpus has to be 8, 16, 32, 64, 128 or 160"
echo "Number of nodes selected: $num_nodes"
echo "Number of gpus per node: $num_gpu_per_node"
exit 1
fi
# GPU name is a100 if num_gpu_per_node is 8, otherwise it is v100
if [ $num_gpu_per_node -eq 8 ]; then
gpu_name="a100"
else
gpu_name="v100"
fi
out_dir="out/$gpu_name/$nb_gpus"
echo "Output dir is : $out_dir"
for g in ${grid[@]}; do
for p in ${pdim[@]}; do
# halo is 1/4 of the grid size
halo_size=$((g / 4))
slaunch scripts/fastpm_jaxdecomp.py -m $g -b $g -p $p -hs $halo_size -ode diffrax -o $out_dir
done
done