#!/bin/bash #SBATCH --job-name=s2_tiny_zi199_zf0_L64_N256_Np128 #SBATCH --output=/data70/hoellinger/WIP3M/s2_tiny_zi199_zf0_L64_N256_Np128/log.log #SBATCH --error=/data70/hoellinger/WIP3M/s2_tiny_zi199_zf0_L64_N256_Np128/err.err #SBATCH --nodes=1 # Number of nodes (value or min-max) #SBATCH --ntasks=128 # The number of tasks (i.e. cores) per node #SBATCH --partition=comp,pscomp # Partition name #SBATCH --time=24:00:00 ##SBATCH --exclusive ##SBATCH --nodelist=i26 # Node name ##SBATCH --mem=64G # Memory pool for all cores (see also --mem-per-cpu) ##SBATCH --array=0-10 # Size of the array ##SBATCH --constraint=? # Constraint e.g. specific node type conda activate p3m export OMP_NUM_THREADS=64 python $WIP3M_ROOT_PATH"src/wip3m/convergence_custom_ts_expl_parser.py" \ --run_id s2_tiny_zi199_zf0_L64_N256_Np128 \ --L 64 \ --N 256 \ --Np 128 \ --Npm 256 \ --n_Tiles 32 \ --z_i 199.0 \ --z_f 0.0 \ --plot_fields True \ --scale_limiter "fac_H_custom" \ --scaling_pmref 2.0 \ --scaling_pm1 1.5 \ --scaling_pm2 1.2 \ --scaling_spm 2.0 \ --scaling_p3m1 2.0 \ --scaling_p3m2 1.5 \ --scaling_p3m3 1.2 exit 0