From fcfad49e3c490aa13d2922cf8dfe9f8702c3ce2a Mon Sep 17 00:00:00 2001 From: Guilhem Lavaux Date: Thu, 25 Feb 2021 14:00:20 +0200 Subject: [PATCH 1/4] Fix the size of the array to be pushed to netcdf --- c_tools/mock/generateMock.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/c_tools/mock/generateMock.cpp b/c_tools/mock/generateMock.cpp index 4c59f88..e36f507 100644 --- a/c_tools/mock/generateMock.cpp +++ b/c_tools/mock/generateMock.cpp @@ -455,7 +455,7 @@ void saveBox(SimuData *&boxed, const std::string& outbox, generateMock_info& arg v.putVar({0}, {size_t(boxed->NumPart)}, particle_id); v2.putVar({0}, {size_t(boxed->NumPart)}, expansion_fac); - v3.putVar({0}, {size_t(boxed->NumPart)}, snapshot_split); + v3.putVar({0}, {size_t(num_snapshots)}, snapshot_split); if (uniqueID != 0) { NcVar v4 = f.addVar("unique_ids_lsb", ncInt, NumPart_dim); From 569a7e40e62e0a5466d33dee7399a3899fe89208 Mon Sep 17 00:00:00 2001 From: Guilhem Lavaux Date: Thu, 25 Feb 2021 14:07:00 +0200 Subject: [PATCH 2/4] Add some temporary instructions for executing on simulations --- README.md | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/README.md b/README.md index 904e27e..4e61e05 100644 --- a/README.md +++ b/README.md @@ -56,6 +56,26 @@ python3 -m void_pipeline your_config_file.py The VIDE tools are all packaged in the `vide` package. +Running with simulation +----------------------- + +!! Temporary measure !! + +After build the vide package and installing it with `python3 setup.py install` (for example), it is possible to find the +script to build the pipeline for simulations in `build/temp.[SOMETHING]/pipeline/prepareInputs.py`. Just copy that script where +you want the analysis and execute it directly with the parameter file as an argument. + +For example: +``` +mkdir $HOME/my_vide_test +cp build/temp.[SOMETHING]/pipeline/prepareInputs.py $HOME/my_vide_test +cp python_tools/void_pipeline/datasets/example_simulation.py $HOME/my_vide_test +mkdir $HOME/my_vide_test/examples +cp examples/example_simulation_z0.0.dat $HOME/my_vide_test/examples +cd $HOME/my_vide_test +python3 prepareInputs.py --all --parm example_simulation.py +python3 -m void_pipeline example_simulation/sim_ss1.0.py +``` Notes for CONDA --------------- From ccc469aa6e925cab074e1a12388d4c389263e5cf Mon Sep 17 00:00:00 2001 From: Guilhem Lavaux Date: Thu, 25 Feb 2021 14:14:33 +0200 Subject: [PATCH 3/4] Add an option to change the merging threshold --- python_tools/vide/backend/launchers.py | 12 ++++++------ python_tools/void_pipeline/__main__.py | 4 ++-- .../void_pipeline/datasets/example_observation.py | 5 +++++ .../void_pipeline/datasets/example_simulation.py | 5 +++++ 4 files changed, 18 insertions(+), 8 deletions(-) diff --git a/python_tools/vide/backend/launchers.py b/python_tools/vide/backend/launchers.py index 84b0681..1d0502c 100644 --- a/python_tools/vide/backend/launchers.py +++ b/python_tools/vide/backend/launchers.py @@ -323,7 +323,7 @@ def launchGenerate(sample, binPath, workDir=None, inputDataDir=None, # ----------------------------------------------------------------------------- def launchZobov(sample, binPath, zobovDir=None, logDir=None, continueRun=None, - numZobovDivisions=None, numZobovThreads=None): + numZobovDivisions=None, numZobovThreads=None, mergingThreshold=0.2): sampleName = sample.fullName @@ -339,10 +339,10 @@ def launchZobov(sample, binPath, zobovDir=None, logDir=None, continueRun=None, if sample.dataType == "observation": maskIndex = open(zobovDir+"/mask_index.txt", "r").read() totalPart = open(zobovDir+"/total_particles.txt", "r").read() - maxDen = 0.2*float(maskIndex)/float(totalPart) + maxDen = mergingThreshold*float(maskIndex)/float(totalPart) else: maskIndex = -1 - maxDen = 0.2 + maxDen = mergingThreshold if numZobovDivisions == 1: print(" WARNING! You are using a single ZOBOV division with a simulation. Periodic boundaries will not be respected!") @@ -466,7 +466,7 @@ def launchZobov(sample, binPath, zobovDir=None, logDir=None, continueRun=None, # ----------------------------------------------------------------------------- def launchPrune(sample, binPath, summaryFile=None, logFile=None, zobovDir=None, - continueRun=None, useComoving=False): + continueRun=None, useComoving=False, mergingThreshold=0.2): sampleName = sample.fullName @@ -477,12 +477,12 @@ def launchPrune(sample, binPath, if sample.dataType == "observation": mockIndex = open(zobovDir+"/mask_index.txt", "r").read() totalPart = open(zobovDir+"/total_particles.txt", "r").read() - maxDen = 0.2*float(mockIndex)/float(totalPart) + maxDen = mergingThreshold*float(mockIndex)/float(totalPart) observationLine = " --isObservation" #periodicLine = " --periodic=''" else: mockIndex = -1 - maxDen = 0.2 + maxDen = mergingThreshold observationLine = "" periodicLine = " --periodic='" + getPeriodic(sample) + "'" diff --git a/python_tools/void_pipeline/__main__.py b/python_tools/void_pipeline/__main__.py index af3797c..304a5db 100644 --- a/python_tools/void_pipeline/__main__.py +++ b/python_tools/void_pipeline/__main__.py @@ -105,7 +105,7 @@ for sample in dataSampleList: launchZobov(sample, ZOBOV_PATH, zobovDir=zobovDir, logDir=logDir, continueRun=continueRun, numZobovDivisions=numZobovDivisions, - numZobovThreads=numZobovThreads) + numZobovThreads=numZobovThreads, mergingThreshold=mergingThreshold) # ------------------------------------------------------------------------- if (startCatalogStage <= 3) and (endCatalogStage >= 3) and not sample.isCombo: @@ -119,7 +119,7 @@ for sample in dataSampleList: launchPrune(sample, PRUNE_PATH, logFile=logFile, zobovDir=zobovDir, - useComoving=sample.useComoving, continueRun=continueRun) + useComoving=sample.useComoving, continueRun=continueRun, mergingThreshold=mergingThreshold) # ------------------------------------------------------------------------- if (startCatalogStage <= 4) and (endCatalogStage >= 4): diff --git a/python_tools/void_pipeline/datasets/example_observation.py b/python_tools/void_pipeline/datasets/example_observation.py index 81f2ca3..b6dec36 100644 --- a/python_tools/void_pipeline/datasets/example_observation.py +++ b/python_tools/void_pipeline/datasets/example_observation.py @@ -54,6 +54,11 @@ numZobovThreads = 2 # optimization: number of subdivisions of the box numZobovDivisions = 2 +# Maximum density for merging voids +# 0 (equivalent to infinitely large value) -> Merge everything (no threshold) +# 1e-9 (or smaller != 0) -> Do not merge anything +mergingThreshold = 1e-9 + # don't change this dataSampleList = [] diff --git a/python_tools/void_pipeline/datasets/example_simulation.py b/python_tools/void_pipeline/datasets/example_simulation.py index 27ddfda..0efcf23 100644 --- a/python_tools/void_pipeline/datasets/example_simulation.py +++ b/python_tools/void_pipeline/datasets/example_simulation.py @@ -71,6 +71,11 @@ numZobovThreads = 2 # optimization: number of subdivisions of the box numZobovDivisions = 2 +# maximum density for merging voids +# 0 (equivalent to infinitely large value) -> Merge everything (no threshold) +# 1e-9 (or smaller != 0) -> Do not merge anything +mergingThreshold = 1e-9 + # prefix to give all outputs prefix = "sim_" From 017c0095d7e3dac609bbc2728f50dbd8cef8595c Mon Sep 17 00:00:00 2001 From: Guilhem Lavaux Date: Thu, 25 Feb 2021 14:19:00 +0200 Subject: [PATCH 4/4] Increase maximum redshift to z=10 --- c_tools/stacking/pruneVoids.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/c_tools/stacking/pruneVoids.cpp b/c_tools/stacking/pruneVoids.cpp index a6ca383..af48acf 100644 --- a/c_tools/stacking/pruneVoids.cpp +++ b/c_tools/stacking/pruneVoids.cpp @@ -170,8 +170,8 @@ int main(int argc, char **argv) { double result, error; size_t nEval; - int iZ, numZ = 4000; - double maxZ = 5.0, z, *dL, *redshifts; + int iZ, numZ = 8000; + double maxZ = 10.0, z, *dL, *redshifts; dL = (double *) malloc(numZ * sizeof(double)); redshifts = (double *) malloc(numZ * sizeof(double)); for (iZ = 0; iZ < numZ; iZ++) { @@ -181,7 +181,7 @@ int main(int argc, char **argv) { //printf("HERE %e %e\n", z, dL[iZ]); redshifts[iZ] = z; } - gsl_interp *interp = gsl_interp_alloc(gsl_interp_linear, 4000); + gsl_interp *interp = gsl_interp_alloc(gsl_interp_linear, numZ); gsl_interp_init(interp, dL, redshifts, numZ); gsl_interp_accel *acc = gsl_interp_accel_alloc();