mirror of
https://github.com/Richard-Sti/csiborgtools.git
synced 2024-12-22 12:38:02 +00:00
Dump radial profile information (#48)
* add radial position path * pep8 * Add basic fit profile dumping * pep8 * pep8 * pep8 * pep8 * pep8 * pep8 * Update TODO * Fix parts is None bug * Update nb
This commit is contained in:
parent
1a115f481d
commit
f48eb6dcb0
29 changed files with 512 additions and 395 deletions
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -1,4 +1,4 @@
|
||||||
venv_galomatch/
|
venv_csiborg/
|
||||||
share/
|
share/
|
||||||
bin/
|
bin/
|
||||||
.bashrc
|
.bashrc
|
||||||
|
|
|
@ -15,7 +15,6 @@
|
||||||
from csiborgtools import clustering, field, fits, match, read # noqa
|
from csiborgtools import clustering, field, fits, match, read # noqa
|
||||||
|
|
||||||
# Arguments to csiborgtools.read.CSiBORGPaths.
|
# Arguments to csiborgtools.read.CSiBORGPaths.
|
||||||
paths_glamdring = {
|
paths_glamdring = {"srcdir": "/mnt/extraspace/hdesmond/",
|
||||||
"srcdir": "/mnt/extraspace/hdesmond/",
|
"postdir": "/mnt/extraspace/rstiskalek/csiborg/",
|
||||||
"postdir": "/mnt/extraspace/rstiskalek/csiborg/"
|
}
|
||||||
}
|
|
||||||
|
|
|
@ -28,4 +28,4 @@ try:
|
||||||
|
|
||||||
from .tpcf import Mock2PCF # noqa
|
from .tpcf import Mock2PCF # noqa
|
||||||
except ImportError:
|
except ImportError:
|
||||||
warn("`Corrfunc` not installed. 2PCF modules will not be available (`Mock2PCF`).") # noqa
|
warn("`Corrfunc` not installed. 2PCF modules will not be available .") # noqa
|
||||||
|
|
|
@ -66,7 +66,7 @@ class RVSinsphere(BaseRVS):
|
||||||
def __call__(self, nsamples, random_state=42, dtype=numpy.float32):
|
def __call__(self, nsamples, random_state=42, dtype=numpy.float32):
|
||||||
gen = numpy.random.default_rng(random_state)
|
gen = numpy.random.default_rng(random_state)
|
||||||
# Spherical
|
# Spherical
|
||||||
r = gen.random(nsamples, dtype=dtype)**(1/3) * self.R
|
r = gen.random(nsamples, dtype=dtype)**(1 / 3) * self.R
|
||||||
theta = 2 * numpy.arcsin(gen.random(nsamples, dtype=dtype))
|
theta = 2 * numpy.arcsin(gen.random(nsamples, dtype=dtype))
|
||||||
phi = 2 * numpy.pi * gen.random(nsamples, dtype=dtype)
|
phi = 2 * numpy.pi * gen.random(nsamples, dtype=dtype)
|
||||||
# Cartesian
|
# Cartesian
|
||||||
|
|
|
@ -105,7 +105,7 @@ class DensityField:
|
||||||
@box.setter
|
@box.setter
|
||||||
def box(self, box):
|
def box(self, box):
|
||||||
try:
|
try:
|
||||||
assert box._name == "box_units"
|
assert box._name == "box_units"
|
||||||
self._box = box
|
self._box = box
|
||||||
except AttributeError as err:
|
except AttributeError as err:
|
||||||
raise TypeError from err
|
raise TypeError from err
|
||||||
|
|
|
@ -40,7 +40,7 @@ class BaseStructure(ABC):
|
||||||
|
|
||||||
@particles.setter
|
@particles.setter
|
||||||
def particles(self, particles):
|
def particles(self, particles):
|
||||||
pars = ["x", "y", "z", "vx", "vy", "vz", "M"]
|
pars = ["x", "y", "z", "M"]
|
||||||
assert all(p in particles.dtype.names for p in pars)
|
assert all(p in particles.dtype.names for p in pars)
|
||||||
self._particles = particles
|
self._particles = particles
|
||||||
|
|
||||||
|
@ -204,11 +204,12 @@ class BaseStructure(ABC):
|
||||||
return numpy.nan
|
return numpy.nan
|
||||||
mass = self.enclosed_mass(radius)
|
mass = self.enclosed_mass(radius)
|
||||||
V = numpy.sqrt(self.box.box_G * mass / radius)
|
V = numpy.sqrt(self.box.box_G * mass / radius)
|
||||||
return numpy.linalg.norm(self.angular_momentum(radius)) / (
|
out = numpy.linalg.norm(self.angular_momentum(radius))
|
||||||
numpy.sqrt(2) * mass * V * radius
|
out /= numpy.sqrt(2) * mass * V * radius
|
||||||
)
|
return out
|
||||||
|
|
||||||
def spherical_overdensity_mass(self, delta_mult, npart_min=10, kind="crit"):
|
def spherical_overdensity_mass(self, delta_mult, npart_min=10,
|
||||||
|
kind="crit"):
|
||||||
r"""
|
r"""
|
||||||
Calculate spherical overdensity mass and radius. The mass is defined as
|
Calculate spherical overdensity mass and radius. The mass is defined as
|
||||||
the enclosed mass within an outermost radius where the mean enclosed
|
the enclosed mass within an outermost radius where the mean enclosed
|
||||||
|
|
|
@ -364,9 +364,7 @@ class NFWPosterior(NFWProfile):
|
||||||
if not (numpy.all(numpy.isfinite(bounds)) and bounds[0] < bounds[1]):
|
if not (numpy.all(numpy.isfinite(bounds)) and bounds[0] < bounds[1]):
|
||||||
return numpy.nan, numpy.nan
|
return numpy.nan, numpy.nan
|
||||||
|
|
||||||
res = minimize_scalar(
|
res = minimize_scalar(loss, bounds=bounds, method="bounded")
|
||||||
loss, bounds=(numpy.log10(rmin), numpy.log10(rmax)), method="bounded"
|
|
||||||
)
|
|
||||||
# Check whether the fit converged to radius sufficienly far from `rmax`
|
# Check whether the fit converged to radius sufficienly far from `rmax`
|
||||||
# and that its a success. Otherwise return NaNs.
|
# and that its a success. Otherwise return NaNs.
|
||||||
if numpy.log10(rmax) - res.x < eps:
|
if numpy.log10(rmax) - res.x < eps:
|
||||||
|
|
|
@ -105,13 +105,13 @@ class RealisationsMatcher:
|
||||||
"""
|
"""
|
||||||
return self._overlapper
|
return self._overlapper
|
||||||
|
|
||||||
def cross(
|
def cross(self, cat0, catx, halos0_archive, halosx_archive, delta_bckg,
|
||||||
self, cat0, catx, halos0_archive, halosx_archive, delta_bckg, verbose=True
|
verbose=True):
|
||||||
):
|
|
||||||
r"""
|
r"""
|
||||||
Find all neighbours whose CM separation is less than `nmult` times the
|
Find all neighbours whose CM separation is less than `nmult` times the
|
||||||
sum of their initial Lagrangian patch sizes and calculate their overlap.
|
sum of their initial Lagrangian patch sizes and calculate their
|
||||||
Enforces that the neighbours' are similar in mass up to `dlogmass` dex.
|
overlap. Enforces that the neighbours' are similar in mass up to
|
||||||
|
`dlogmass` dex.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
|
@ -121,12 +121,12 @@ class RealisationsMatcher:
|
||||||
Halo catalogue of the cross simulation.
|
Halo catalogue of the cross simulation.
|
||||||
halos0_archive : `NpzFile` object
|
halos0_archive : `NpzFile` object
|
||||||
Archive of halos' particles of the reference simulation, keys must
|
Archive of halos' particles of the reference simulation, keys must
|
||||||
include `x`, `y`, `z` and `M`. The positions must already be converted
|
include `x`, `y`, `z` and `M`. The positions must already be
|
||||||
to cell numbers.
|
converted to cell numbers.
|
||||||
halosx_archive : `NpzFile` object
|
halosx_archive : `NpzFile` object
|
||||||
Archive of halos' particles of the cross simulation, keys must
|
Archive of halos' particles of the cross simulation, keys must
|
||||||
include `x`, `y`, `z` and `M`. The positions must already be converted
|
include `x`, `y`, `z` and `M`. The positions must already be
|
||||||
to cell numbers.
|
converted to cell numbers.
|
||||||
delta_bckg : 3-dimensional array
|
delta_bckg : 3-dimensional array
|
||||||
Summed background density field of the reference and cross
|
Summed background density field of the reference and cross
|
||||||
simulations calculated with particles assigned to halos at the
|
simulations calculated with particles assigned to halos at the
|
||||||
|
@ -138,25 +138,23 @@ class RealisationsMatcher:
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
match_indxs : 1-dimensional array of arrays
|
match_indxs : 1-dimensional array of arrays
|
||||||
The outer array corresponds to halos in the reference catalogue, the
|
The outer array corresponds to halos in the reference catalogue,
|
||||||
inner array corresponds to the array positions of matches in the cross
|
the inner array corresponds to the array positions of matches in
|
||||||
catalogue.
|
the cross catalogue.
|
||||||
overlaps : 1-dimensional array of arrays
|
overlaps : 1-dimensional array of arrays
|
||||||
Overlaps with the cross catalogue. Follows similar pattern as `match_indxs`.
|
Overlaps with the cross catalogue. Follows similar pattern as
|
||||||
|
`match_indxs`.
|
||||||
"""
|
"""
|
||||||
# We begin by querying the kNN for the nearest neighbours of each halo
|
# We begin by querying the kNN for the nearest neighbours of each halo
|
||||||
# in the reference simulation from the cross simulation in the initial
|
# in the reference simulation from the cross simulation in the initial
|
||||||
# snapshot.
|
# snapshot.
|
||||||
verbose and print("{}: querying the KNN.".format(datetime.now()), flush=True)
|
if verbose:
|
||||||
|
now = datetime.now()
|
||||||
|
print(f"{now}: querying the KNN.", flush=True)
|
||||||
match_indxs = radius_neighbours(
|
match_indxs = radius_neighbours(
|
||||||
catx.knn(select_initial=True),
|
catx.knn(select_initial=True), cat0.positions(in_initial=True),
|
||||||
cat0.positions(in_initial=True),
|
radiusX=cat0["lagpatch"], radiusKNN=catx["lagpatch"],
|
||||||
radiusX=cat0["lagpatch"],
|
nmult=self.nmult, enforce_int32=True, verbose=verbose)
|
||||||
radiusKNN=catx["lagpatch"],
|
|
||||||
nmult=self.nmult,
|
|
||||||
enforce_int32=True,
|
|
||||||
verbose=verbose,
|
|
||||||
)
|
|
||||||
# We next remove neighbours whose mass is too large/small.
|
# We next remove neighbours whose mass is too large/small.
|
||||||
if self.dlogmass is not None:
|
if self.dlogmass is not None:
|
||||||
for i, indx in enumerate(match_indxs):
|
for i, indx in enumerate(match_indxs):
|
||||||
|
@ -165,34 +163,35 @@ class RealisationsMatcher:
|
||||||
aratio = numpy.abs(numpy.log10(catx[p][indx] / cat0[p][i]))
|
aratio = numpy.abs(numpy.log10(catx[p][indx] / cat0[p][i]))
|
||||||
match_indxs[i] = match_indxs[i][aratio < self.dlogmass]
|
match_indxs[i] = match_indxs[i][aratio < self.dlogmass]
|
||||||
|
|
||||||
# We will make a dictionary to keep in memory the halos' particles from the
|
# We will make a dictionary to keep in memory the halos' particles from
|
||||||
# cross simulations so that they are not loaded in several times and we only
|
# the cross simulations so that they are not loaded in several times
|
||||||
# convert their positions to cells once. Possibly make an option to not do
|
# and we only convert their positions to cells once. Possibly make an
|
||||||
# this to lower memory requirements?
|
# option to not do this to lower memory requirements?
|
||||||
cross_halos = {}
|
cross_halos = {}
|
||||||
cross_lims = {}
|
cross_lims = {}
|
||||||
cross = [numpy.asanyarray([], dtype=numpy.float32)] * match_indxs.size
|
cross = [numpy.asanyarray([], dtype=numpy.float32)] * match_indxs.size
|
||||||
for i, k0 in enumerate(tqdm(cat0["index"]) if verbose else cat0["index"]):
|
indxs = cat0["index"]
|
||||||
|
for i, k0 in enumerate(tqdm(indxs) if verbose else indxs):
|
||||||
# If we have no matches continue to the next halo.
|
# If we have no matches continue to the next halo.
|
||||||
matches = match_indxs[i]
|
matches = match_indxs[i]
|
||||||
if matches.size == 0:
|
if matches.size == 0:
|
||||||
continue
|
continue
|
||||||
# Next, we find this halo's particles, total mass and minimum/maximum cells
|
# Next, we find this halo's particles, total mass, minimum and
|
||||||
# and convert positions to cells.
|
# maximum cells and convert positions to cells.
|
||||||
halo0 = halos0_archive[str(k0)]
|
halo0 = halos0_archive[str(k0)]
|
||||||
mass0 = numpy.sum(halo0["M"])
|
mass0 = numpy.sum(halo0["M"])
|
||||||
mins0, maxs0 = get_halolims(
|
mins0, maxs0 = get_halolims(halo0,
|
||||||
halo0, ncells=self.overlapper.inv_clength, nshift=self.overlapper.nshift
|
ncells=self.overlapper.inv_clength,
|
||||||
)
|
nshift=self.overlapper.nshift)
|
||||||
for p in ("x", "y", "z"):
|
for p in ("x", "y", "z"):
|
||||||
halo0[p] = self.overlapper.pos2cell(halo0[p])
|
halo0[p] = self.overlapper.pos2cell(halo0[p])
|
||||||
# We now loop over matches of this halo and calculate their overlap,
|
# We now loop over matches of this halo and calculate their
|
||||||
# storing them in `_cross`.
|
# overlap, storing them in `_cross`.
|
||||||
_cross = numpy.full(matches.size, numpy.nan, dtype=numpy.float32)
|
_cross = numpy.full(matches.size, numpy.nan, dtype=numpy.float32)
|
||||||
for j, kf in enumerate(catx["index"][matches]):
|
for j, kf in enumerate(catx["index"][matches]):
|
||||||
# Attempt to load this cross halo from memory, if it fails get it from
|
# Attempt to load this cross halo from memory, if it fails get
|
||||||
# from the halo archive (and similarly for the limits) and convert the
|
# it from from the halo archive (and similarly for the limits)
|
||||||
# particle positions to cells.
|
# and convert the particle positions to cells.
|
||||||
try:
|
try:
|
||||||
halox = cross_halos[kf]
|
halox = cross_halos[kf]
|
||||||
minsx, maxsx = cross_lims[kf]
|
minsx, maxsx = cross_lims[kf]
|
||||||
|
@ -208,17 +207,9 @@ class RealisationsMatcher:
|
||||||
cross_halos[kf] = halox
|
cross_halos[kf] = halox
|
||||||
cross_lims[kf] = (minsx, maxsx)
|
cross_lims[kf] = (minsx, maxsx)
|
||||||
massx = numpy.sum(halox["M"])
|
massx = numpy.sum(halox["M"])
|
||||||
_cross[j] = self.overlapper(
|
_cross[j] = self.overlapper(halo0, halox, delta_bckg, mins0,
|
||||||
halo0,
|
maxs0, minsx, maxsx, mass1=mass0,
|
||||||
halox,
|
mass2=massx)
|
||||||
delta_bckg,
|
|
||||||
mins0,
|
|
||||||
maxs0,
|
|
||||||
minsx,
|
|
||||||
maxsx,
|
|
||||||
mass1=mass0,
|
|
||||||
mass2=massx,
|
|
||||||
)
|
|
||||||
cross[i] = _cross
|
cross[i] = _cross
|
||||||
|
|
||||||
# We remove all matches that have zero overlap to save space.
|
# We remove all matches that have zero overlap to save space.
|
||||||
|
@ -233,17 +224,8 @@ class RealisationsMatcher:
|
||||||
cross = numpy.asanyarray(cross, dtype=object)
|
cross = numpy.asanyarray(cross, dtype=object)
|
||||||
return match_indxs, cross
|
return match_indxs, cross
|
||||||
|
|
||||||
def smoothed_cross(
|
def smoothed_cross(self, cat0, catx, halos0_archive, halosx_archive,
|
||||||
self,
|
delta_bckg, match_indxs, smooth_kwargs, verbose=True):
|
||||||
cat0,
|
|
||||||
catx,
|
|
||||||
halos0_archive,
|
|
||||||
halosx_archive,
|
|
||||||
delta_bckg,
|
|
||||||
match_indxs,
|
|
||||||
smooth_kwargs,
|
|
||||||
verbose=True,
|
|
||||||
):
|
|
||||||
r"""
|
r"""
|
||||||
Calculate the smoothed overlaps for pair previously identified via
|
Calculate the smoothed overlaps for pair previously identified via
|
||||||
`self.cross(...)` to have a non-zero overlap.
|
`self.cross(...)` to have a non-zero overlap.
|
||||||
|
@ -256,12 +238,12 @@ class RealisationsMatcher:
|
||||||
Halo catalogue of the cross simulation.
|
Halo catalogue of the cross simulation.
|
||||||
halos0_archive : `NpzFile` object
|
halos0_archive : `NpzFile` object
|
||||||
Archive of halos' particles of the reference simulation, keys must
|
Archive of halos' particles of the reference simulation, keys must
|
||||||
include `x`, `y`, `z` and `M`. The positions must already be converted
|
include `x`, `y`, `z` and `M`. The positions must already be
|
||||||
to cell numbers.
|
converted to cell numbers.
|
||||||
halosx_archive : `NpzFile` object
|
halosx_archive : `NpzFile` object
|
||||||
Archive of halos' particles of the cross simulation, keys must
|
Archive of halos' particles of the cross simulation, keys must
|
||||||
include `x`, `y`, `z` and `M`. The positions must already be converted
|
include `x`, `y`, `z` and `M`. The positions must already be
|
||||||
to cell numbers.
|
converted to cell numbers.
|
||||||
delta_bckg : 3-dimensional array
|
delta_bckg : 3-dimensional array
|
||||||
Smoothed summed background density field of the reference and cross
|
Smoothed summed background density field of the reference and cross
|
||||||
simulations calculated with particles assigned to halos at the
|
simulations calculated with particles assigned to halos at the
|
||||||
|
@ -287,40 +269,32 @@ class RealisationsMatcher:
|
||||||
cross_lims = {}
|
cross_lims = {}
|
||||||
cross = [numpy.asanyarray([], dtype=numpy.float32)] * match_indxs.size
|
cross = [numpy.asanyarray([], dtype=numpy.float32)] * match_indxs.size
|
||||||
|
|
||||||
for i, k0 in enumerate(tqdm(cat0["index"]) if verbose else cat0["index"]):
|
indxs = cat0["index"]
|
||||||
|
for i, k0 in enumerate(tqdm(indxs) if verbose else indxs):
|
||||||
halo0 = halos0_archive[str(k0)]
|
halo0 = halos0_archive[str(k0)]
|
||||||
mins0, maxs0 = get_halolims(
|
mins0, maxs0 = get_halolims(halo0,
|
||||||
halo0, ncells=self.overlapper.inv_clength, nshift=self.overlapper.nshift
|
ncells=self.overlapper.inv_clength,
|
||||||
)
|
nshift=self.overlapper.nshift)
|
||||||
|
|
||||||
# Now loop over the matches and calculate the smoothed overlap.
|
# Now loop over the matches and calculate the smoothed overlap.
|
||||||
_cross = numpy.full(match_indxs[i].size, numpy.nan, numpy.float32)
|
_cross = numpy.full(match_indxs[i].size, numpy.nan, numpy.float32)
|
||||||
for j, kf in enumerate(catx["index"][match_indxs[i]]):
|
for j, kf in enumerate(catx["index"][match_indxs[i]]):
|
||||||
# Attempt to load this cross halo from memory, if it fails get it from
|
# Attempt to load this cross halo from memory, if it fails get
|
||||||
# from the halo archive (and similarly for the limits).
|
# it from from the halo archive (and similarly for the limits).
|
||||||
try:
|
try:
|
||||||
halox = cross_halos[kf]
|
halox = cross_halos[kf]
|
||||||
minsx, maxsx = cross_lims[kf]
|
minsx, maxsx = cross_lims[kf]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
halox = halosx_archive[str(kf)]
|
halox = halosx_archive[str(kf)]
|
||||||
minsx, maxsx = get_halolims(
|
minsx, maxsx = get_halolims(
|
||||||
halox,
|
halox, ncells=self.overlapper.inv_clength,
|
||||||
ncells=self.overlapper.inv_clength,
|
nshift=self.overlapper.nshift)
|
||||||
nshift=self.overlapper.nshift,
|
|
||||||
)
|
|
||||||
cross_halos[kf] = halox
|
cross_halos[kf] = halox
|
||||||
cross_lims[kf] = (minsx, maxsx)
|
cross_lims[kf] = (minsx, maxsx)
|
||||||
|
|
||||||
_cross[j] = self.overlapper(
|
_cross[j] = self.overlapper(halo0, halox, delta_bckg, mins0,
|
||||||
halo0,
|
maxs0, minsx, maxsx,
|
||||||
halox,
|
smooth_kwargs=smooth_kwargs)
|
||||||
delta_bckg,
|
|
||||||
mins0,
|
|
||||||
maxs0,
|
|
||||||
minsx,
|
|
||||||
maxsx,
|
|
||||||
smooth_kwargs=smooth_kwargs,
|
|
||||||
)
|
|
||||||
cross[i] = _cross
|
cross[i] = _cross
|
||||||
|
|
||||||
return numpy.asanyarray(cross, dtype=object)
|
return numpy.asanyarray(cross, dtype=object)
|
||||||
|
@ -398,9 +372,9 @@ class ParticleOverlap:
|
||||||
|
|
||||||
def make_bckg_delta(self, halo_archive, delta=None, verbose=False):
|
def make_bckg_delta(self, halo_archive, delta=None, verbose=False):
|
||||||
"""
|
"""
|
||||||
Calculate a NGP density field of particles belonging to halos within the
|
Calculate a NGP density field of particles belonging to halos within
|
||||||
central :math:`1/2^3` high-resolution region of the simulation. Smoothing
|
the central :math:`1/2^3` high-resolution region of the simulation.
|
||||||
must be applied separately.
|
Smoothing must be applied separately.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
|
@ -417,16 +391,16 @@ class ParticleOverlap:
|
||||||
-------
|
-------
|
||||||
delta : 3-dimensional array
|
delta : 3-dimensional array
|
||||||
"""
|
"""
|
||||||
# We obtain the minimum/maximum cell IDs and number of cells along each dim.
|
# We obtain the minimum/maximum cell IDs and number of cells
|
||||||
cellmin = self.inv_clength // 4 # The minimum cell ID
|
cellmin = self.inv_clength // 4 # The minimum cell ID
|
||||||
cellmax = 3 * self.inv_clength // 4 # The maximum cell ID
|
cellmax = 3 * self.inv_clength // 4 # The maximum cell ID
|
||||||
ncells = cellmax - cellmin
|
ncells = cellmax - cellmin
|
||||||
# We then pre-allocate the density field or check it is of the right shape
|
# We then pre-allocate the density field/check it is of the right shape
|
||||||
# if already given.
|
|
||||||
if delta is None:
|
if delta is None:
|
||||||
delta = numpy.zeros((ncells,) * 3, dtype=numpy.float32)
|
delta = numpy.zeros((ncells,) * 3, dtype=numpy.float32)
|
||||||
else:
|
else:
|
||||||
assert (delta.shape == (ncells,) * 3) & (delta.dtype == numpy.float32)
|
assert ((delta.shape == (ncells,) * 3)
|
||||||
|
& (delta.dtype == numpy.float32))
|
||||||
|
|
||||||
# We now loop one-by-one over the halos fill the density field.
|
# We now loop one-by-one over the halos fill the density field.
|
||||||
files = halo_archive.files
|
files = halo_archive.files
|
||||||
|
@ -436,21 +410,20 @@ class ParticleOverlap:
|
||||||
mass = parts["M"]
|
mass = parts["M"]
|
||||||
|
|
||||||
# We mask out particles outside the cubical high-resolution region
|
# We mask out particles outside the cubical high-resolution region
|
||||||
mask = (
|
mask = ((cellmin <= cells[0])
|
||||||
(cellmin <= cells[0])
|
& (cells[0] < cellmax)
|
||||||
& (cells[0] < cellmax)
|
& (cellmin <= cells[1])
|
||||||
& (cellmin <= cells[1])
|
& (cells[1] < cellmax)
|
||||||
& (cells[1] < cellmax)
|
& (cellmin <= cells[2])
|
||||||
& (cellmin <= cells[2])
|
& (cells[2] < cellmax))
|
||||||
& (cells[2] < cellmax)
|
|
||||||
)
|
|
||||||
cells = [c[mask] for c in cells]
|
cells = [c[mask] for c in cells]
|
||||||
mass = mass[mask]
|
mass = mass[mask]
|
||||||
fill_delta(delta, *cells, *(cellmin,) * 3, mass)
|
fill_delta(delta, *cells, *(cellmin,) * 3, mass)
|
||||||
|
|
||||||
return delta
|
return delta
|
||||||
|
|
||||||
def make_delta(self, clump, mins=None, maxs=None, subbox=False, smooth_kwargs=None):
|
def make_delta(self, clump, mins=None, maxs=None, subbox=False,
|
||||||
|
smooth_kwargs=None):
|
||||||
"""
|
"""
|
||||||
Calculate a NGP density field of a halo on a cubic grid. Optionally can
|
Calculate a NGP density field of a halo on a cubic grid. Optionally can
|
||||||
be smoothed with a Gaussian kernel.
|
be smoothed with a Gaussian kernel.
|
||||||
|
@ -505,16 +478,8 @@ class ParticleOverlap:
|
||||||
gaussian_filter(delta, output=delta, **smooth_kwargs)
|
gaussian_filter(delta, output=delta, **smooth_kwargs)
|
||||||
return delta
|
return delta
|
||||||
|
|
||||||
def make_deltas(
|
def make_deltas(self, clump1, clump2, mins1=None, maxs1=None, mins2=None,
|
||||||
self,
|
maxs2=None, smooth_kwargs=None):
|
||||||
clump1,
|
|
||||||
clump2,
|
|
||||||
mins1=None,
|
|
||||||
maxs1=None,
|
|
||||||
mins2=None,
|
|
||||||
maxs2=None,
|
|
||||||
smooth_kwargs=None,
|
|
||||||
):
|
|
||||||
"""
|
"""
|
||||||
Calculate a NGP density fields of two halos on a grid that encloses
|
Calculate a NGP density fields of two halos on a grid that encloses
|
||||||
them both. Optionally can be smoothed with a Gaussian kernel.
|
them both. Optionally can be smoothed with a Gaussian kernel.
|
||||||
|
@ -596,19 +561,9 @@ class ParticleOverlap:
|
||||||
gaussian_filter(delta2, output=delta2, **smooth_kwargs)
|
gaussian_filter(delta2, output=delta2, **smooth_kwargs)
|
||||||
return delta1, delta2, cellmins, nonzero
|
return delta1, delta2, cellmins, nonzero
|
||||||
|
|
||||||
def __call__(
|
def __call__(self, clump1, clump2, delta_bckg, mins1=None, maxs1=None,
|
||||||
self,
|
mins2=None, maxs2=None, mass1=None, mass2=None,
|
||||||
clump1,
|
smooth_kwargs=None):
|
||||||
clump2,
|
|
||||||
delta_bckg,
|
|
||||||
mins1=None,
|
|
||||||
maxs1=None,
|
|
||||||
mins2=None,
|
|
||||||
maxs2=None,
|
|
||||||
mass1=None,
|
|
||||||
mass2=None,
|
|
||||||
smooth_kwargs=None,
|
|
||||||
):
|
|
||||||
"""
|
"""
|
||||||
Calculate overlap between `clump1` and `clump2`. See
|
Calculate overlap between `clump1` and `clump2`. See
|
||||||
`calculate_overlap(...)` for further information. Be careful so that
|
`calculate_overlap(...)` for further information. Be careful so that
|
||||||
|
@ -647,8 +602,8 @@ class ParticleOverlap:
|
||||||
overlap : float
|
overlap : float
|
||||||
"""
|
"""
|
||||||
delta1, delta2, cellmins, nonzero = self.make_deltas(
|
delta1, delta2, cellmins, nonzero = self.make_deltas(
|
||||||
clump1, clump2, mins1, maxs1, mins2, maxs2, smooth_kwargs=smooth_kwargs
|
clump1, clump2, mins1, maxs1, mins2, maxs2,
|
||||||
)
|
smooth_kwargs=smooth_kwargs)
|
||||||
|
|
||||||
if smooth_kwargs is not None:
|
if smooth_kwargs is not None:
|
||||||
return calculate_overlap(delta1, delta2, cellmins, delta_bckg)
|
return calculate_overlap(delta1, delta2, cellmins, delta_bckg)
|
||||||
|
@ -656,8 +611,7 @@ class ParticleOverlap:
|
||||||
mass1 = numpy.sum(clump1["M"]) if mass1 is None else mass1
|
mass1 = numpy.sum(clump1["M"]) if mass1 is None else mass1
|
||||||
mass2 = numpy.sum(clump2["M"]) if mass2 is None else mass2
|
mass2 = numpy.sum(clump2["M"]) if mass2 is None else mass2
|
||||||
return calculate_overlap_indxs(
|
return calculate_overlap_indxs(
|
||||||
delta1, delta2, cellmins, delta_bckg, nonzero, mass1, mass2
|
delta1, delta2, cellmins, delta_bckg, nonzero, mass1, mass2)
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
@ -963,8 +917,8 @@ def radius_neighbours(
|
||||||
|
|
||||||
for i in trange(nsamples) if verbose else range(nsamples):
|
for i in trange(nsamples) if verbose else range(nsamples):
|
||||||
dist, indx = knn.radius_neighbors(
|
dist, indx = knn.radius_neighbors(
|
||||||
X[i, :].reshape(-1, 3), radiusX[i] + patchknn_max, sort_results=True
|
X[i, :].reshape(-1, 3), radiusX[i] + patchknn_max,
|
||||||
)
|
sort_results=True)
|
||||||
# Note that `dist` and `indx` are wrapped in 1-element arrays
|
# Note that `dist` and `indx` are wrapped in 1-element arrays
|
||||||
# so we take the first item where appropriate
|
# so we take the first item where appropriate
|
||||||
mask = (dist[0] / (radiusX[i] + radiusKNN[indx[0]])) < nmult
|
mask = (dist[0] / (radiusX[i] + radiusKNN[indx[0]])) < nmult
|
||||||
|
|
|
@ -41,9 +41,9 @@ def concatenate_parts(list_parts, include_velocities=False):
|
||||||
else:
|
else:
|
||||||
posdtype = numpy.float32
|
posdtype = numpy.float32
|
||||||
|
|
||||||
# We pre-allocate an empty array. By default, we include just particle positions,
|
# We pre-allocate an empty array. By default, we include just particle
|
||||||
# which may be specified by cell IDs if integers, and masses. Additionally also
|
# positions, which may be specified by cell IDs if integers, and masses.
|
||||||
# outputs velocities.
|
# Additionally also outputs velocities.
|
||||||
if include_velocities:
|
if include_velocities:
|
||||||
dtype = {
|
dtype = {
|
||||||
"names": ["x", "y", "z", "vx", "vy", "vz", "M"],
|
"names": ["x", "y", "z", "vx", "vy", "vz", "M"],
|
||||||
|
|
|
@ -24,27 +24,12 @@ from .readsim import ParticleReader
|
||||||
|
|
||||||
# Map of unit conversions
|
# Map of unit conversions
|
||||||
CONV_NAME = {
|
CONV_NAME = {
|
||||||
"length": [
|
"length": ["x", "y", "z", "peak_x", "peak_y", "peak_z", "Rs", "rmin",
|
||||||
"x",
|
"rmax", "r200c", "r500c", "r200m", "x0", "y0", "z0",
|
||||||
"y",
|
"lagpatch"],
|
||||||
"z",
|
"mass": ["mass_cl", "totpartmass", "m200c", "m500c", "mass_mmain", "M",
|
||||||
"peak_x",
|
"m200m"],
|
||||||
"peak_y",
|
"density": ["rho0"]}
|
||||||
"peak_z",
|
|
||||||
"Rs",
|
|
||||||
"rmin",
|
|
||||||
"rmax",
|
|
||||||
"r200c",
|
|
||||||
"r500c",
|
|
||||||
"r200m",
|
|
||||||
"x0",
|
|
||||||
"y0",
|
|
||||||
"z0",
|
|
||||||
"lagpatch",
|
|
||||||
],
|
|
||||||
"mass": ["mass_cl", "totpartmass", "m200c", "m500c", "mass_mmain", "M", "m200m"],
|
|
||||||
"density": ["rho0"],
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class BoxUnits:
|
class BoxUnits:
|
||||||
|
@ -367,7 +352,8 @@ class BoxUnits:
|
||||||
density : float
|
density : float
|
||||||
Density in :math:`M_\odot / \mathrm{pc}^3`.
|
Density in :math:`M_\odot / \mathrm{pc}^3`.
|
||||||
"""
|
"""
|
||||||
return density * self._unit_d / self._Msuncgs * (units.Mpc.to(units.cm)) ** 3
|
return (density * self._unit_d
|
||||||
|
/ self._Msuncgs * (units.Mpc.to(units.cm)) ** 3)
|
||||||
|
|
||||||
def dens2box(self, density):
|
def dens2box(self, density):
|
||||||
r"""
|
r"""
|
||||||
|
@ -384,7 +370,8 @@ class BoxUnits:
|
||||||
density : float
|
density : float
|
||||||
Density in box units.
|
Density in box units.
|
||||||
"""
|
"""
|
||||||
return density / self._unit_d * self._Msuncgs / (units.Mpc.to(units.cm)) ** 3
|
return (density / self._unit_d * self._Msuncgs
|
||||||
|
/ (units.Mpc.to(units.cm)) ** 3)
|
||||||
|
|
||||||
def convert_from_boxunits(self, data, names):
|
def convert_from_boxunits(self, data, names):
|
||||||
r"""
|
r"""
|
||||||
|
@ -412,13 +399,10 @@ class BoxUnits:
|
||||||
Input array with converted columns.
|
Input array with converted columns.
|
||||||
"""
|
"""
|
||||||
names = [names] if isinstance(names, str) else names
|
names = [names] if isinstance(names, str) else names
|
||||||
|
transforms = {"length": self.box2mpc,
|
||||||
# Shortcut for the transform functions
|
"mass": self.box2solarmass,
|
||||||
transforms = {
|
"density": self.box2dens,
|
||||||
"length": self.box2mpc,
|
}
|
||||||
"mass": self.box2solarmass,
|
|
||||||
"density": self.box2dens,
|
|
||||||
}
|
|
||||||
|
|
||||||
for name in names:
|
for name in names:
|
||||||
# Check that the name is even in the array
|
# Check that the name is even in the array
|
||||||
|
|
|
@ -21,7 +21,8 @@ from sklearn.neighbors import NearestNeighbors
|
||||||
from .box_units import BoxUnits
|
from .box_units import BoxUnits
|
||||||
from .paths import CSiBORGPaths
|
from .paths import CSiBORGPaths
|
||||||
from .readsim import ParticleReader
|
from .readsim import ParticleReader
|
||||||
from .utils import add_columns, cartesian_to_radec, flip_cols, radec_to_cartesian
|
from .utils import (add_columns, cartesian_to_radec, flip_cols,
|
||||||
|
radec_to_cartesian)
|
||||||
|
|
||||||
|
|
||||||
class BaseCatalogue(ABC):
|
class BaseCatalogue(ABC):
|
||||||
|
@ -249,7 +250,8 @@ class BaseCatalogue(ABC):
|
||||||
knn.fit(pos)
|
knn.fit(pos)
|
||||||
# Convert angular radius to cosine difference.
|
# Convert angular radius to cosine difference.
|
||||||
metric_maxdist = 1 - numpy.cos(numpy.deg2rad(ang_radius))
|
metric_maxdist = 1 - numpy.cos(numpy.deg2rad(ang_radius))
|
||||||
dist, ind = knn.radius_neighbors(X, radius=metric_maxdist, sort_results=True)
|
dist, ind = knn.radius_neighbors(X, radius=metric_maxdist,
|
||||||
|
sort_results=True)
|
||||||
# And the cosine difference to angular distance.
|
# And the cosine difference to angular distance.
|
||||||
for i in range(X.shape[0]):
|
for i in range(X.shape[0]):
|
||||||
dist[i] = numpy.rad2deg(numpy.arccos(1 - dist[i]))
|
dist[i] = numpy.rad2deg(numpy.arccos(1 - dist[i]))
|
||||||
|
@ -302,15 +304,8 @@ class ClumpsCatalogue(BaseCatalogue):
|
||||||
transformations.
|
transformations.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(self, nsim, paths, maxdist=155.5 / 0.705,
|
||||||
self,
|
minmass=("mass_cl", 1e12), load_fitted=True, rawdata=False):
|
||||||
nsim,
|
|
||||||
paths,
|
|
||||||
maxdist=155.5 / 0.705,
|
|
||||||
minmass=("mass_cl", 1e12),
|
|
||||||
load_fitted=True,
|
|
||||||
rawdata=False,
|
|
||||||
):
|
|
||||||
self.nsim = nsim
|
self.nsim = nsim
|
||||||
self.paths = paths
|
self.paths = paths
|
||||||
# Read in the clumps from the final snapshot
|
# Read in the clumps from the final snapshot
|
||||||
|
@ -333,27 +328,12 @@ class ClumpsCatalogue(BaseCatalogue):
|
||||||
flip_cols(self._data, "x", "z")
|
flip_cols(self._data, "x", "z")
|
||||||
for p in ("x", "y", "z"):
|
for p in ("x", "y", "z"):
|
||||||
self._data[p] -= 0.5
|
self._data[p] -= 0.5
|
||||||
self._data = self.box.convert_from_boxunits(
|
names = ["x", "y", "z", "mass_cl", "totpartmass", "rho0", "r200c",
|
||||||
self._data,
|
"r500c", "m200c", "m500c", "r200m", "m200m"]
|
||||||
[
|
self._data = self.box.convert_from_boxunits(self._data, names)
|
||||||
"x",
|
|
||||||
"y",
|
|
||||||
"z",
|
|
||||||
"mass_cl",
|
|
||||||
"totpartmass",
|
|
||||||
"rho0",
|
|
||||||
"r200c",
|
|
||||||
"r500c",
|
|
||||||
"m200c",
|
|
||||||
"m500c",
|
|
||||||
"r200m",
|
|
||||||
"m200m",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
if maxdist is not None:
|
if maxdist is not None:
|
||||||
dist = numpy.sqrt(
|
dist = numpy.sqrt(self._data["x"]**2 + self._data["y"]**2
|
||||||
self._data["x"] ** 2 + self._data["y"] ** 2 + self._data["z"] ** 2
|
+ self._data["z"]**2)
|
||||||
)
|
|
||||||
self._data = self._data[dist < maxdist]
|
self._data = self._data[dist < maxdist]
|
||||||
if minmass is not None:
|
if minmass is not None:
|
||||||
self._data = self._data[self._data[minmass[0]] > minmass[1]]
|
self._data = self._data[self._data[minmass[0]] > minmass[1]]
|
||||||
|
@ -397,16 +377,8 @@ class HaloCatalogue(BaseCatalogue):
|
||||||
transformations.
|
transformations.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(self, nsim, paths, maxdist=155.5 / 0.705, minmass=("M", 1e12),
|
||||||
self,
|
load_fitted=True, load_initial=False, rawdata=False):
|
||||||
nsim,
|
|
||||||
paths,
|
|
||||||
maxdist=155.5 / 0.705,
|
|
||||||
minmass=("M", 1e12),
|
|
||||||
load_fitted=True,
|
|
||||||
load_initial=False,
|
|
||||||
rawdata=False,
|
|
||||||
):
|
|
||||||
self.nsim = nsim
|
self.nsim = nsim
|
||||||
self.paths = paths
|
self.paths = paths
|
||||||
# Read in the mmain catalogue of summed substructure
|
# Read in the mmain catalogue of summed substructure
|
||||||
|
@ -426,28 +398,13 @@ class HaloCatalogue(BaseCatalogue):
|
||||||
flip_cols(self._data, "x", "z")
|
flip_cols(self._data, "x", "z")
|
||||||
for p in ("x", "y", "z"):
|
for p in ("x", "y", "z"):
|
||||||
self._data[p] -= 0.5
|
self._data[p] -= 0.5
|
||||||
self._data = self.box.convert_from_boxunits(
|
names = ["x", "y", "z", "M", "totpartmass", "rho0", "r200c",
|
||||||
self._data,
|
"r500c", "m200c", "m500c", "r200m", "m200m"]
|
||||||
[
|
self._data = self.box.convert_from_boxunits(self._data, names)
|
||||||
"x",
|
|
||||||
"y",
|
|
||||||
"z",
|
|
||||||
"M",
|
|
||||||
"totpartmass",
|
|
||||||
"rho0",
|
|
||||||
"r200c",
|
|
||||||
"r500c",
|
|
||||||
"m200c",
|
|
||||||
"m500c",
|
|
||||||
"r200m",
|
|
||||||
"m200m",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
if maxdist is not None:
|
if maxdist is not None:
|
||||||
dist = numpy.sqrt(
|
dist = numpy.sqrt(self._data["x"]**2 + self._data["y"]**2
|
||||||
self._data["x"] ** 2 + self._data["y"] ** 2 + self._data["z"] ** 2
|
+ self._data["z"]**2)
|
||||||
)
|
|
||||||
self._data = self._data[dist < maxdist]
|
self._data = self._data[dist < maxdist]
|
||||||
if minmass is not None:
|
if minmass is not None:
|
||||||
self._data = self._data[self._data[minmass[0]] > minmass[1]]
|
self._data = self._data[self._data[minmass[0]] > minmass[1]]
|
||||||
|
|
|
@ -163,7 +163,7 @@ class TwoMPPGroups(TextSurvey):
|
||||||
# Convert galactic coordinates to RA, dec
|
# Convert galactic coordinates to RA, dec
|
||||||
glon = data[:, 1]
|
glon = data[:, 1]
|
||||||
glat = data[:, 2]
|
glat = data[:, 2]
|
||||||
coords = SkyCoord(l=glon*units.degree, b=glat*units.degree,
|
coords = SkyCoord(l=glon * units.degree, b=glat * units.degree,
|
||||||
frame='galactic')
|
frame='galactic')
|
||||||
coords = coords.transform_to("icrs")
|
coords = coords.transform_to("icrs")
|
||||||
data["RA"] = coords.ra
|
data["RA"] = coords.ra
|
||||||
|
|
|
@ -82,7 +82,7 @@ class PairOverlap:
|
||||||
"match_indxs": match_indxs,
|
"match_indxs": match_indxs,
|
||||||
"ngp_overlap": ngp_overlap,
|
"ngp_overlap": ngp_overlap,
|
||||||
"smoothed_overlap": smoothed_overlap,
|
"smoothed_overlap": smoothed_overlap,
|
||||||
}
|
}
|
||||||
|
|
||||||
self._make_refmask(min_mass, max_dist)
|
self._make_refmask(min_mass, max_dist)
|
||||||
|
|
||||||
|
|
|
@ -97,7 +97,7 @@ class CSiBORGPaths:
|
||||||
fpath = join(self.postdir, "temp")
|
fpath = join(self.postdir, "temp")
|
||||||
if not isdir(fpath):
|
if not isdir(fpath):
|
||||||
mkdir(fpath)
|
mkdir(fpath)
|
||||||
warn("Created directory `{}`.".format(fpath), UserWarning, stacklevel=1)
|
warn(f"Created directory `{fpath}`.", UserWarning, stacklevel=1)
|
||||||
return fpath
|
return fpath
|
||||||
|
|
||||||
def mmain_path(self, nsnap, nsim):
|
def mmain_path(self, nsnap, nsim):
|
||||||
|
@ -118,10 +118,9 @@ class CSiBORGPaths:
|
||||||
fdir = join(self.postdir, "mmain")
|
fdir = join(self.postdir, "mmain")
|
||||||
if not isdir(fdir):
|
if not isdir(fdir):
|
||||||
mkdir(fdir)
|
mkdir(fdir)
|
||||||
warn("Created directory `{}`.".format(fdir), UserWarning, stacklevel=1)
|
warn(f"Created directory `{fdir}`.", UserWarning, stacklevel=1)
|
||||||
return join(
|
return join(fdir,
|
||||||
fdir, "mmain_{}_{}.npz".format(str(nsim).zfill(5), str(nsnap).zfill(5))
|
f"mmain_{str(nsim).zfill(5)}_{str(nsnap).zfill(5)}.npz")
|
||||||
)
|
|
||||||
|
|
||||||
def initmatch_path(self, nsim, kind):
|
def initmatch_path(self, nsim, kind):
|
||||||
"""
|
"""
|
||||||
|
@ -143,8 +142,8 @@ class CSiBORGPaths:
|
||||||
fdir = join(self.postdir, "initmatch")
|
fdir = join(self.postdir, "initmatch")
|
||||||
if not isdir(fdir):
|
if not isdir(fdir):
|
||||||
mkdir(fdir)
|
mkdir(fdir)
|
||||||
warn("Created directory `{}`.".format(fdir), UserWarning, stacklevel=1)
|
warn(f"Created directory `{fdir}`.", UserWarning, stacklevel=1)
|
||||||
return join(fdir, "{}_{}.npy".format(kind, str(nsim).zfill(5)))
|
return join(fdir, f"{kind}_{str(nsim).zfill(5)}.npy")
|
||||||
|
|
||||||
def split_path(self, nsnap, nsim):
|
def split_path(self, nsnap, nsim):
|
||||||
"""
|
"""
|
||||||
|
@ -164,10 +163,9 @@ class CSiBORGPaths:
|
||||||
fdir = join(self.postdir, "split")
|
fdir = join(self.postdir, "split")
|
||||||
if not isdir(fdir):
|
if not isdir(fdir):
|
||||||
mkdir(fdir)
|
mkdir(fdir)
|
||||||
warn("Created directory `{}`.".format(fdir), UserWarning, stacklevel=1)
|
warn(f"Created directory `{fdir}`.", UserWarning, stacklevel=1)
|
||||||
return join(
|
return join(
|
||||||
fdir, "clumps_{}_{}.npz".format(str(nsim).zfill(5), str(nsnap).zfill(5))
|
fdir, f"clumps_{str(nsim).zfill(5)}_{str(nsnap).zfill(5)}.npz")
|
||||||
)
|
|
||||||
|
|
||||||
def get_ics(self, tonew):
|
def get_ics(self, tonew):
|
||||||
"""
|
"""
|
||||||
|
@ -256,7 +254,7 @@ class CSiBORGPaths:
|
||||||
"""
|
"""
|
||||||
tonew = nsnap == 1
|
tonew = nsnap == 1
|
||||||
simpath = self.ic_path(nsim, tonew=tonew)
|
simpath = self.ic_path(nsim, tonew=tonew)
|
||||||
return join(simpath, "output_{}".format(str(nsnap).zfill(5)))
|
return join(simpath, f"output_{str(nsnap).zfill(5)}")
|
||||||
|
|
||||||
def structfit_path(self, nsnap, nsim, kind):
|
def structfit_path(self, nsnap, nsim, kind):
|
||||||
"""
|
"""
|
||||||
|
@ -279,9 +277,8 @@ class CSiBORGPaths:
|
||||||
fdir = join(self.postdir, "structfit")
|
fdir = join(self.postdir, "structfit")
|
||||||
if not isdir(fdir):
|
if not isdir(fdir):
|
||||||
mkdir(fdir)
|
mkdir(fdir)
|
||||||
warn("Created directory `{}`.".format(fdir), UserWarning, stacklevel=1)
|
warn(f"Created directory `{fdir}`.", UserWarning, stacklevel=1)
|
||||||
|
fname = f"{kind}_out_{str(nsim).zfill(5)}_{str(nsnap).zfill(5)}.npy"
|
||||||
fname = "{}_out_{}_{}.npy".format(kind, str(nsim).zfill(5), str(nsnap).zfill(5))
|
|
||||||
return join(fdir, fname)
|
return join(fdir, fname)
|
||||||
|
|
||||||
def overlap_path(self, nsim0, nsimx):
|
def overlap_path(self, nsim0, nsimx):
|
||||||
|
@ -302,8 +299,31 @@ class CSiBORGPaths:
|
||||||
fdir = join(self.postdir, "overlap")
|
fdir = join(self.postdir, "overlap")
|
||||||
if not isdir(fdir):
|
if not isdir(fdir):
|
||||||
mkdir(fdir)
|
mkdir(fdir)
|
||||||
warn("Created directory `{}`.".format(fdir), UserWarning, stacklevel=1)
|
warn(f"Created directory `{fdir}`.", UserWarning, stacklevel=1)
|
||||||
fname = "ovelrap_{}_{}.npz".format(str(nsim0).zfill(5), str(nsimx).zfill(5))
|
fname = f"overlap_{str(nsim0).zfill(5)}_{str(nsimx).zfill(5)}.npz"
|
||||||
|
return join(fdir, fname)
|
||||||
|
|
||||||
|
def radpos_path(self, nsnap, nsim):
|
||||||
|
"""
|
||||||
|
Path to the files containing radial positions of halo particles (with
|
||||||
|
summed substructure).
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
nsnap : int
|
||||||
|
Snapshot index.
|
||||||
|
nsim : int
|
||||||
|
IC realisation index.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
path : str
|
||||||
|
"""
|
||||||
|
fdir = join(self.postdir, "radpos")
|
||||||
|
if not isdir(fdir):
|
||||||
|
mkdir(fdir)
|
||||||
|
warn(f"Created directory `{fdir}`.", UserWarning, stacklevel=1)
|
||||||
|
fname = f"radpos_{str(nsim).zfill(5)}_{str(nsnap).zfill(5)}.npz"
|
||||||
return join(fdir, fname)
|
return join(fdir, fname)
|
||||||
|
|
||||||
def knnauto_path(self, run, nsim=None):
|
def knnauto_path(self, run, nsim=None):
|
||||||
|
@ -325,9 +345,9 @@ class CSiBORGPaths:
|
||||||
fdir = join(self.postdir, "knn", "auto")
|
fdir = join(self.postdir, "knn", "auto")
|
||||||
if not isdir(fdir):
|
if not isdir(fdir):
|
||||||
makedirs(fdir)
|
makedirs(fdir)
|
||||||
warn("Created directory `{}`.".format(fdir), UserWarning, stacklevel=1)
|
warn(f"Created directory `{fdir}`.", UserWarning, stacklevel=1)
|
||||||
if nsim is not None:
|
if nsim is not None:
|
||||||
return join(fdir, "knncdf_{}_{}.p".format(str(nsim).zfill(5), run))
|
return join(fdir, f"knncdf_{str(nsim).zfill(5)}_{run}.p")
|
||||||
|
|
||||||
files = glob(join(fdir, "knncdf*"))
|
files = glob(join(fdir, "knncdf*"))
|
||||||
run = "__" + run
|
run = "__" + run
|
||||||
|
@ -352,12 +372,12 @@ class CSiBORGPaths:
|
||||||
fdir = join(self.postdir, "knn", "cross")
|
fdir = join(self.postdir, "knn", "cross")
|
||||||
if not isdir(fdir):
|
if not isdir(fdir):
|
||||||
makedirs(fdir)
|
makedirs(fdir)
|
||||||
warn("Created directory `{}`.".format(fdir), UserWarning, stacklevel=1)
|
warn(f"Created directory `{fdir}`.", UserWarning, stacklevel=1)
|
||||||
if nsims is not None:
|
if nsims is not None:
|
||||||
assert isinstance(nsims, (list, tuple)) and len(nsims) == 2
|
assert isinstance(nsims, (list, tuple)) and len(nsims) == 2
|
||||||
nsim0 = str(nsims[0]).zfill(5)
|
nsim0 = str(nsims[0]).zfill(5)
|
||||||
nsimx = str(nsims[1]).zfill(5)
|
nsimx = str(nsims[1]).zfill(5)
|
||||||
return join(fdir, "knncdf_{}_{}__{}.p".format(nsim0, nsimx, run))
|
return join(fdir, f"knncdf_{nsim0}_{nsimx}__{run}.p")
|
||||||
|
|
||||||
files = glob(join(fdir, "knncdf*"))
|
files = glob(join(fdir, "knncdf*"))
|
||||||
run = "__" + run
|
run = "__" + run
|
||||||
|
@ -382,9 +402,9 @@ class CSiBORGPaths:
|
||||||
fdir = join(self.postdir, "tpcf", "auto")
|
fdir = join(self.postdir, "tpcf", "auto")
|
||||||
if not isdir(fdir):
|
if not isdir(fdir):
|
||||||
makedirs(fdir)
|
makedirs(fdir)
|
||||||
warn("Created directory `{}`.".format(fdir), UserWarning, stacklevel=1)
|
warn(f"Created directory `{fdir}`.", UserWarning, stacklevel=1)
|
||||||
if nsim is not None:
|
if nsim is not None:
|
||||||
return join(fdir, "tpcf{}_{}.p".format(str(nsim).zfill(5), run))
|
return join(fdir, f"tpcf{str(nsim).zfill(5)}_{run}.p")
|
||||||
|
|
||||||
files = glob(join(fdir, "tpcf*"))
|
files = glob(join(fdir, "tpcf*"))
|
||||||
run = "__" + run
|
run = "__" + run
|
||||||
|
|
|
@ -345,7 +345,7 @@ class ParticleReader:
|
||||||
.format(fname))
|
.format(fname))
|
||||||
data = numpy.genfromtxt(fname)
|
data = numpy.genfromtxt(fname)
|
||||||
# How the data is stored in the clump file.
|
# How the data is stored in the clump file.
|
||||||
clump_cols = {"index": (0, numpy.int32),
|
clump_cols = {"index": (0, numpy.int32),
|
||||||
"level": (1, numpy.int32),
|
"level": (1, numpy.int32),
|
||||||
"parent": (2, numpy.int32),
|
"parent": (2, numpy.int32),
|
||||||
"ncell": (3, numpy.float32),
|
"ncell": (3, numpy.float32),
|
||||||
|
|
|
@ -101,7 +101,8 @@ def cols_to_structured(N, cols):
|
||||||
if not isinstance(cols, list) and all(isinstance(c, tuple) for c in cols):
|
if not isinstance(cols, list) and all(isinstance(c, tuple) for c in cols):
|
||||||
raise TypeError("`cols` must be a list of tuples.")
|
raise TypeError("`cols` must be a list of tuples.")
|
||||||
|
|
||||||
dtype = {"names": [col[0] for col in cols], "formats": [col[1] for col in cols]}
|
dtype = {"names": [col[0] for col in cols],
|
||||||
|
"formats": [col[1] for col in cols]}
|
||||||
return numpy.full(N, numpy.nan, dtype=dtype)
|
return numpy.full(N, numpy.nan, dtype=dtype)
|
||||||
|
|
||||||
|
|
||||||
|
@ -236,9 +237,7 @@ def array_to_structured(arr, cols):
|
||||||
"""
|
"""
|
||||||
cols = [cols] if isinstance(cols, str) else cols
|
cols = [cols] if isinstance(cols, str) else cols
|
||||||
if arr.ndim != 2 and arr.shape[1] != len(cols):
|
if arr.ndim != 2 and arr.shape[1] != len(cols):
|
||||||
raise TypeError(
|
raise TypeError("`arr` must be a 2D array `(n_samples, n_cols)`.")
|
||||||
"`arr` must be a 2-dimensional array of " "shape `(n_samples, n_cols)`."
|
|
||||||
)
|
|
||||||
|
|
||||||
dtype = {"names": cols, "formats": [arr.dtype] * len(cols)}
|
dtype = {"names": cols, "formats": [arr.dtype] * len(cols)}
|
||||||
out = numpy.full(arr.shape[0], numpy.nan, dtype=dtype)
|
out = numpy.full(arr.shape[0], numpy.nan, dtype=dtype)
|
||||||
|
|
93
notebooks/fits.ipynb
Normal file
93
notebooks/fits.ipynb
Normal file
|
@ -0,0 +1,93 @@
|
||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 1,
|
||||||
|
"id": "5a38ed25",
|
||||||
|
"metadata": {
|
||||||
|
"ExecuteTime": {
|
||||||
|
"end_time": "2023-04-12T14:25:46.519408Z",
|
||||||
|
"start_time": "2023-04-12T14:25:03.003304Z"
|
||||||
|
},
|
||||||
|
"scrolled": true
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import sys\n",
|
||||||
|
"import numpy as np\n",
|
||||||
|
"import matplotlib.pyplot as plt\n",
|
||||||
|
"\n",
|
||||||
|
"sys.path.append(\"../\")\n",
|
||||||
|
"import csiborgtools\n",
|
||||||
|
"\n",
|
||||||
|
"%matplotlib widget \n",
|
||||||
|
"%load_ext autoreload\n",
|
||||||
|
"%autoreload 2"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"id": "312c96c9",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"paths = csiborgtools.read.CSiBORGPaths(**csiborgtools.paths_glamdring)\n",
|
||||||
|
"nsim0 = 7444\n",
|
||||||
|
"nsimx = 7444 + 24\n",
|
||||||
|
"nsnap0 = max(paths.get_snapshots(nsim0))\n",
|
||||||
|
"nsnapx = max(paths.get_snapshots(nsimx))\n",
|
||||||
|
"overlapper = csiborgtools.match.ParticleOverlap()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
|
"id": "b097637b",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"halo0_archive = np.load(paths.split_path(nsnap0, nsim0))\n",
|
||||||
|
"halox_archive = np.load(paths.split_path(nsnapx, nsimx))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 13,
|
||||||
|
"id": "c3cf4c91",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stderr",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"100%|██████████| 204873/204873 [16:48<00:00, 203.15it/s]\n",
|
||||||
|
"100%|██████████| 204960/204960 [16:42<00:00, 204.55it/s]\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# delta_bckg = overlapper.make_bckg_delta(halo0_archive, verbose=True)\n",
|
||||||
|
"# delta_bckg = overlapper.make_bckg_delta(halox_archive, delta=delta_bckg, verbose=True)\n",
|
||||||
|
"# np.save(\"./bckg_{}_{}.npy\".format(nsim0, nsimx), delta_bckg)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "f2986dd2",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "venv_galomatch",
|
||||||
|
"language": "python",
|
||||||
|
"name": "venv_galomatch"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
|
@ -63,7 +63,7 @@ fout = join(dumpdir, "crosspk",
|
||||||
|
|
||||||
jobs = csiborgtools.utils.split_jobs(nsims, nproc)[rank]
|
jobs = csiborgtools.utils.split_jobs(nsims, nproc)[rank]
|
||||||
for n in jobs:
|
for n in jobs:
|
||||||
print("Rank {}@{}: saving {}th delta.".format(rank, datetime.now(), n))
|
print(f"Rank {rank} at {datetime.now()}: saving {n}th delta.", flush=True)
|
||||||
nsim = ics[n]
|
nsim = ics[n]
|
||||||
particles = reader.read_particle(max(paths.get_snapshots(nsim)), nsim,
|
particles = reader.read_particle(max(paths.get_snapshots(nsim)), nsim,
|
||||||
["x", "y", "z", "M"], verbose=False)
|
["x", "y", "z", "M"], verbose=False)
|
||||||
|
@ -155,4 +155,4 @@ if rank == 0:
|
||||||
remove(ftemp.format(ic, "delta") + ".npy")
|
remove(ftemp.format(ic, "delta") + ".npy")
|
||||||
remove(ftemp.format(ic, "lengths") + ".p")
|
remove(ftemp.format(ic, "lengths") + ".p")
|
||||||
|
|
||||||
print("All finished!")
|
print("All finished!")
|
||||||
|
|
|
@ -23,7 +23,7 @@ import numpy
|
||||||
import yaml
|
import yaml
|
||||||
from mpi4py import MPI
|
from mpi4py import MPI
|
||||||
from sklearn.neighbors import NearestNeighbors
|
from sklearn.neighbors import NearestNeighbors
|
||||||
from TaskmasterMPI import master_process, worker_process
|
from taskmaster import master_process, worker_process
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import csiborgtools
|
import csiborgtools
|
||||||
|
@ -98,7 +98,7 @@ def do_auto(run, cat, ic):
|
||||||
"""Calculate the kNN-CDF single catalgoue autocorrelation."""
|
"""Calculate the kNN-CDF single catalgoue autocorrelation."""
|
||||||
_config = config.get(run, None)
|
_config = config.get(run, None)
|
||||||
if _config is None:
|
if _config is None:
|
||||||
warn("No configuration for run {}.".format(run), UserWarning, stacklevel=1)
|
warn(f"No configuration for run {run}.", UserWarning, stacklevel=1)
|
||||||
return
|
return
|
||||||
|
|
||||||
rvs_gen = csiborgtools.clustering.RVSinsphere(Rmax)
|
rvs_gen = csiborgtools.clustering.RVSinsphere(Rmax)
|
||||||
|
@ -106,16 +106,10 @@ def do_auto(run, cat, ic):
|
||||||
knn = NearestNeighbors()
|
knn = NearestNeighbors()
|
||||||
knn.fit(pos)
|
knn.fit(pos)
|
||||||
rs, cdf = knncdf(
|
rs, cdf = knncdf(
|
||||||
knn,
|
knn, rvs_gen=rvs_gen, nneighbours=config["nneighbours"],
|
||||||
rvs_gen=rvs_gen,
|
rmin=config["rmin"], rmax=config["rmax"],
|
||||||
nneighbours=config["nneighbours"],
|
nsamples=int(config["nsamples"]), neval=int(config["neval"]),
|
||||||
rmin=config["rmin"],
|
batch_size=int(config["batch_size"]), random_state=config["seed"])
|
||||||
rmax=config["rmax"],
|
|
||||||
nsamples=int(config["nsamples"]),
|
|
||||||
neval=int(config["neval"]),
|
|
||||||
batch_size=int(config["batch_size"]),
|
|
||||||
random_state=config["seed"],
|
|
||||||
)
|
|
||||||
|
|
||||||
joblib.dump(
|
joblib.dump(
|
||||||
{"rs": rs, "cdf": cdf, "ndensity": pos.shape[0] / totvol},
|
{"rs": rs, "cdf": cdf, "ndensity": pos.shape[0] / totvol},
|
||||||
|
@ -127,7 +121,7 @@ def do_cross_rand(run, cat, ic):
|
||||||
"""Calculate the kNN-CDF cross catalogue random correlation."""
|
"""Calculate the kNN-CDF cross catalogue random correlation."""
|
||||||
_config = config.get(run, None)
|
_config = config.get(run, None)
|
||||||
if _config is None:
|
if _config is None:
|
||||||
warn("No configuration for run {}.".format(run), UserWarning, stacklevel=1)
|
warn(f"No configuration for run {run}.", UserWarning, stacklevel=1)
|
||||||
return
|
return
|
||||||
|
|
||||||
rvs_gen = csiborgtools.clustering.RVSinsphere(Rmax)
|
rvs_gen = csiborgtools.clustering.RVSinsphere(Rmax)
|
||||||
|
@ -140,16 +134,10 @@ def do_cross_rand(run, cat, ic):
|
||||||
knn2.fit(pos2)
|
knn2.fit(pos2)
|
||||||
|
|
||||||
rs, cdf0, cdf1, joint_cdf = knncdf.joint(
|
rs, cdf0, cdf1, joint_cdf = knncdf.joint(
|
||||||
knn1,
|
knn1, knn2, rvs_gen=rvs_gen, nneighbours=int(config["nneighbours"]),
|
||||||
knn2,
|
rmin=config["rmin"], rmax=config["rmax"],
|
||||||
rvs_gen=rvs_gen,
|
nsamples=int(config["nsamples"]), neval=int(config["neval"]),
|
||||||
nneighbours=int(config["nneighbours"]),
|
batch_size=int(config["batch_size"]), random_state=config["seed"],
|
||||||
rmin=config["rmin"],
|
|
||||||
rmax=config["rmax"],
|
|
||||||
nsamples=int(config["nsamples"]),
|
|
||||||
neval=int(config["neval"]),
|
|
||||||
batch_size=int(config["batch_size"]),
|
|
||||||
random_state=config["seed"],
|
|
||||||
)
|
)
|
||||||
corr = knncdf.joint_to_corr(cdf0, cdf1, joint_cdf)
|
corr = knncdf.joint_to_corr(cdf0, cdf1, joint_cdf)
|
||||||
joblib.dump({"rs": rs, "corr": corr}, paths.knnauto_path(run, ic))
|
joblib.dump({"rs": rs, "corr": corr}, paths.knnauto_path(run, ic))
|
||||||
|
|
|
@ -23,7 +23,7 @@ import numpy
|
||||||
import yaml
|
import yaml
|
||||||
from mpi4py import MPI
|
from mpi4py import MPI
|
||||||
from sklearn.neighbors import NearestNeighbors
|
from sklearn.neighbors import NearestNeighbors
|
||||||
from TaskmasterMPI import master_process, worker_process
|
from taskmaster import master_process, worker_process
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import csiborgtools
|
import csiborgtools
|
||||||
|
|
|
@ -22,7 +22,7 @@ import joblib
|
||||||
import numpy
|
import numpy
|
||||||
import yaml
|
import yaml
|
||||||
from mpi4py import MPI
|
from mpi4py import MPI
|
||||||
from TaskmasterMPI import master_process, worker_process
|
from taskmaster import master_process, worker_process
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import csiborgtools
|
import csiborgtools
|
||||||
|
|
|
@ -124,4 +124,4 @@ if rank == 0:
|
||||||
|
|
||||||
print("Saving results to `{}`.".format(fperm), flush=True)
|
print("Saving results to `{}`.".format(fperm), flush=True)
|
||||||
with open(fperm, "wb") as f:
|
with open(fperm, "wb") as f:
|
||||||
numpy.save(f, out)
|
numpy.save(f, out)
|
||||||
|
|
|
@ -77,9 +77,12 @@ def fit_clump(particles, clump_info, box):
|
||||||
for i, v in enumerate(["vx", "vy", "vz"]):
|
for i, v in enumerate(["vx", "vy", "vz"]):
|
||||||
out[v] = numpy.average(obj.vel[:, i], weights=obj["M"])
|
out[v] = numpy.average(obj.vel[:, i], weights=obj["M"])
|
||||||
# Overdensity masses
|
# Overdensity masses
|
||||||
out["r200c"], out["m200c"] = obj.spherical_overdensity_mass(200, kind="crit")
|
out["r200c"], out["m200c"] = obj.spherical_overdensity_mass(200,
|
||||||
out["r500c"], out["m500c"] = obj.spherical_overdensity_mass(500, kind="crit")
|
kind="crit")
|
||||||
out["r200m"], out["m200m"] = obj.spherical_overdensity_mass(200, kind="matter")
|
out["r500c"], out["m500c"] = obj.spherical_overdensity_mass(500,
|
||||||
|
kind="crit")
|
||||||
|
out["r200m"], out["m200m"] = obj.spherical_overdensity_mass(200,
|
||||||
|
kind="matter")
|
||||||
# NFW fit
|
# NFW fit
|
||||||
if out["npart"] > 10 and numpy.isfinite(out["r200c"]):
|
if out["npart"] > 10 and numpy.isfinite(out["r200c"]):
|
||||||
Rs, rho0 = nfwpost.fit(obj)
|
Rs, rho0 = nfwpost.fit(obj)
|
||||||
|
@ -108,8 +111,8 @@ def load_parent_particles(clumpid, particle_archive, clumps_cat):
|
||||||
Load a parent halo's particles.
|
Load a parent halo's particles.
|
||||||
"""
|
"""
|
||||||
indxs = clumps_cat["index"][clumps_cat["parent"] == clumpid]
|
indxs = clumps_cat["index"][clumps_cat["parent"] == clumpid]
|
||||||
# We first load the particles of each clump belonging to this parent and then
|
# We first load the particles of each clump belonging to this parent
|
||||||
# concatenate them for further analysis.
|
# and then concatenate them for further analysis.
|
||||||
clumps = []
|
clumps = []
|
||||||
for ind in indxs:
|
for ind in indxs:
|
||||||
parts = load_clump_particles(ind, particle_archive)
|
parts = load_clump_particles(ind, particle_archive)
|
||||||
|
@ -118,24 +121,23 @@ def load_parent_particles(clumpid, particle_archive, clumps_cat):
|
||||||
|
|
||||||
if len(clumps) == 0:
|
if len(clumps) == 0:
|
||||||
return None
|
return None
|
||||||
return csiborgtools.match.concatenate_parts(clumps, include_velocities=True)
|
return csiborgtools.match.concatenate_parts(clumps,
|
||||||
|
include_velocities=True)
|
||||||
|
|
||||||
|
|
||||||
# We now start looping over all simulations
|
# We now start looping over all simulations
|
||||||
for i, nsim in enumerate(paths.get_ics(tonew=False)):
|
for i, nsim in enumerate(paths.get_ics(tonew=False)):
|
||||||
if rank == 0:
|
if rank == 0:
|
||||||
print(
|
print(f"{datetime.now()}: calculating {i}th simulation `{nsim}`.",
|
||||||
"{}: calculating {}th simulation `{}`.".format(datetime.now(), i, nsim),
|
flush=True)
|
||||||
flush=True,
|
|
||||||
)
|
|
||||||
nsnap = max(paths.get_snapshots(nsim))
|
nsnap = max(paths.get_snapshots(nsim))
|
||||||
box = csiborgtools.read.BoxUnits(nsnap, nsim, paths)
|
box = csiborgtools.read.BoxUnits(nsnap, nsim, paths)
|
||||||
|
|
||||||
# Archive of clumps, keywords are their clump IDs
|
# Archive of clumps, keywords are their clump IDs
|
||||||
particle_archive = numpy.load(paths.split_path(nsnap, nsim))
|
particle_archive = numpy.load(paths.split_path(nsnap, nsim))
|
||||||
clumps_cat = csiborgtools.read.ClumpsCatalogue(
|
clumps_cat = csiborgtools.read.ClumpsCatalogue(nsim, paths, maxdist=None,
|
||||||
nsim, paths, maxdist=None, minmass=None, rawdata=True, load_fitted=False
|
minmass=None, rawdata=True,
|
||||||
)
|
load_fitted=False)
|
||||||
# We check whether we fit halos or clumps, will be indexing over different
|
# We check whether we fit halos or clumps, will be indexing over different
|
||||||
# iterators.
|
# iterators.
|
||||||
if args.kind == "halos":
|
if args.kind == "halos":
|
||||||
|
@ -171,25 +173,23 @@ for i, nsim in enumerate(paths.get_ics(tonew=False)):
|
||||||
|
|
||||||
fout = ftemp.format(str(nsim).zfill(5), str(nsnap).zfill(5), rank)
|
fout = ftemp.format(str(nsim).zfill(5), str(nsnap).zfill(5), rank)
|
||||||
if nproc == 0:
|
if nproc == 0:
|
||||||
print(
|
print(f"{datetime.now()}: rank {rank} saving to `{fout}`.", flush=True)
|
||||||
"{}: rank {} saving to `{}`.".format(datetime.now(), rank, fout), flush=True
|
|
||||||
)
|
|
||||||
numpy.save(fout, out)
|
numpy.save(fout, out)
|
||||||
# We saved this CPU's results in a temporary file. Wait now for the other
|
# We saved this CPU's results in a temporary file. Wait now for the other
|
||||||
# CPUs and then collect results from the 0th rank and save them.
|
# CPUs and then collect results from the 0th rank and save them.
|
||||||
comm.Barrier()
|
comm.Barrier()
|
||||||
|
|
||||||
if rank == 0:
|
if rank == 0:
|
||||||
print(
|
print(f"{datetime.now()}: collecting results for simulation `{nsim}`.",
|
||||||
"{}: collecting results for simulation `{}`.".format(datetime.now(), nsim),
|
flush=True)
|
||||||
flush=True,
|
|
||||||
)
|
|
||||||
# We write to the output array. Load data from each CPU and append to
|
# We write to the output array. Load data from each CPU and append to
|
||||||
# the output array.
|
# the output array.
|
||||||
out = csiborgtools.read.cols_to_structured(ntasks, cols_collect)
|
out = csiborgtools.read.cols_to_structured(ntasks, cols_collect)
|
||||||
clumpid2outpos = {indx: i for i, indx in enumerate(clumps_cat["index"])}
|
clumpid2outpos = {indx: i
|
||||||
|
for i, indx in enumerate(clumps_cat["index"])}
|
||||||
for i in range(nproc):
|
for i in range(nproc):
|
||||||
inp = numpy.load(ftemp.format(str(nsim).zfill(5), str(nsnap).zfill(5), i))
|
inp = numpy.load(ftemp.format(str(nsim).zfill(5),
|
||||||
|
str(nsnap).zfill(5), i))
|
||||||
for j, clumpid in enumerate(inp["index"]):
|
for j, clumpid in enumerate(inp["index"]):
|
||||||
k = clumpid2outpos[clumpid]
|
k = clumpid2outpos[clumpid]
|
||||||
for key in inp.dtype.names:
|
for key in inp.dtype.names:
|
||||||
|
@ -201,7 +201,7 @@ for i, nsim in enumerate(paths.get_ics(tonew=False)):
|
||||||
out = out[ismain]
|
out = out[ismain]
|
||||||
|
|
||||||
fout = paths.structfit_path(nsnap, nsim, args.kind)
|
fout = paths.structfit_path(nsnap, nsim, args.kind)
|
||||||
print("Saving to `{}`.".format(fout), flush=True)
|
print(f"Saving to `{fout}`.", flush=True)
|
||||||
numpy.save(fout, out)
|
numpy.save(fout, out)
|
||||||
|
|
||||||
# We now wait before moving on to another simulation.
|
# We now wait before moving on to another simulation.
|
||||||
|
|
140
scripts/fit_profiles.py
Normal file
140
scripts/fit_profiles.py
Normal file
|
@ -0,0 +1,140 @@
|
||||||
|
# Copyright (C) 2023 Richard Stiskalek
|
||||||
|
# This program is free software; you can redistribute it and/or modify it
|
||||||
|
# under the terms of the GNU General Public License as published by the
|
||||||
|
# Free Software Foundation; either version 3 of the License, or (at your
|
||||||
|
# option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful, but
|
||||||
|
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
|
||||||
|
# Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License along
|
||||||
|
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||||
|
"""
|
||||||
|
A script to calculate the particle's separation from the CM and save it.
|
||||||
|
Currently MPI is not supported.
|
||||||
|
"""
|
||||||
|
from argparse import ArgumentParser
|
||||||
|
from datetime import datetime
|
||||||
|
from gc import collect
|
||||||
|
|
||||||
|
import numpy
|
||||||
|
from mpi4py import MPI
|
||||||
|
from tqdm import trange
|
||||||
|
|
||||||
|
try:
|
||||||
|
import csiborgtools
|
||||||
|
except ModuleNotFoundError:
|
||||||
|
import sys
|
||||||
|
|
||||||
|
sys.path.append("../")
|
||||||
|
import csiborgtools
|
||||||
|
|
||||||
|
parser = ArgumentParser()
|
||||||
|
parser.add_argument("--ics", type=int, nargs="+", default=None,
|
||||||
|
help="IC realisatiosn. If `-1` processes all simulations.")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Get MPI things
|
||||||
|
comm = MPI.COMM_WORLD
|
||||||
|
rank = comm.Get_rank()
|
||||||
|
nproc = comm.Get_size()
|
||||||
|
|
||||||
|
if nproc > 1:
|
||||||
|
raise NotImplementedError("MPI is not implemented implemented yet.")
|
||||||
|
|
||||||
|
paths = csiborgtools.read.CSiBORGPaths(**csiborgtools.paths_glamdring)
|
||||||
|
partreader = csiborgtools.read.ParticleReader(paths)
|
||||||
|
cols_collect = [("r", numpy.float32), ("M", numpy.float32)]
|
||||||
|
if args.ics is None or args.ics == -1:
|
||||||
|
nsims = paths.get_ics(tonew=False)
|
||||||
|
else:
|
||||||
|
nsims = args.ics
|
||||||
|
|
||||||
|
|
||||||
|
def load_clump_particles(clumpid, particle_archive):
|
||||||
|
"""
|
||||||
|
Load a clump's particles from the particle archive. If it is not there, i.e
|
||||||
|
clump has no associated particles, return `None`.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
part = particle_archive[str(clumpid)]
|
||||||
|
except KeyError:
|
||||||
|
part = None
|
||||||
|
return part
|
||||||
|
|
||||||
|
|
||||||
|
def load_parent_particles(clumpid, particle_archive, clumps_cat):
|
||||||
|
"""
|
||||||
|
Load a parent halo's particles.
|
||||||
|
"""
|
||||||
|
indxs = clumps_cat["index"][clumps_cat["parent"] == clumpid]
|
||||||
|
# We first load the particles of each clump belonging to this
|
||||||
|
# parent and then concatenate them for further analysis.
|
||||||
|
clumps = []
|
||||||
|
for ind in indxs:
|
||||||
|
parts = load_clump_particles(ind, particle_archive)
|
||||||
|
if parts is not None:
|
||||||
|
clumps.append(parts)
|
||||||
|
|
||||||
|
if len(clumps) == 0:
|
||||||
|
return None
|
||||||
|
return csiborgtools.match.concatenate_parts(clumps)
|
||||||
|
|
||||||
|
|
||||||
|
# We loop over simulations. Here later optionlaly add MPI.
|
||||||
|
for i, nsim in enumerate(nsims):
|
||||||
|
if rank == 0:
|
||||||
|
now = datetime.now()
|
||||||
|
print(f"{now}: calculating {i}th simulation `{nsim}`.", flush=True)
|
||||||
|
nsnap = max(paths.get_snapshots(nsim))
|
||||||
|
box = csiborgtools.read.BoxUnits(nsnap, nsim, paths)
|
||||||
|
|
||||||
|
# Archive of clumps, keywords are their clump IDs
|
||||||
|
particle_archive = numpy.load(paths.split_path(nsnap, nsim))
|
||||||
|
clumps_cat = csiborgtools.read.ClumpsCatalogue(nsim, paths, maxdist=None,
|
||||||
|
minmass=None, rawdata=True,
|
||||||
|
load_fitted=False)
|
||||||
|
ismain = clumps_cat.ismain
|
||||||
|
ntasks = len(clumps_cat)
|
||||||
|
|
||||||
|
# We loop over halos and add ther particle positions to this dictionary,
|
||||||
|
# which we will later save as an archive.
|
||||||
|
out = {}
|
||||||
|
for j in trange(ntasks) if nproc == 1 else range(ntasks):
|
||||||
|
# If we are fitting halos and this clump is not a main, then continue.
|
||||||
|
if not ismain[j]:
|
||||||
|
continue
|
||||||
|
|
||||||
|
clumpid = clumps_cat["index"][j]
|
||||||
|
|
||||||
|
parts = load_parent_particles(clumpid, particle_archive, clumps_cat)
|
||||||
|
# If we have no particles, then do not save anything.
|
||||||
|
if parts is None:
|
||||||
|
continue
|
||||||
|
obj = csiborgtools.fits.Clump(parts, clumps_cat[j], box)
|
||||||
|
r200m, m200m = obj.spherical_overdensity_mass(200, npart_min=10,
|
||||||
|
kind="matter")
|
||||||
|
r = obj.r()
|
||||||
|
mask = r <= r200m
|
||||||
|
|
||||||
|
_out = csiborgtools.read.cols_to_structured(numpy.sum(mask),
|
||||||
|
cols_collect)
|
||||||
|
|
||||||
|
_out["r"] = r[mask]
|
||||||
|
_out["M"] = obj["M"][mask]
|
||||||
|
|
||||||
|
out[str(clumps_cat["index"][i])] = _out
|
||||||
|
|
||||||
|
# Finished, so we save everything.
|
||||||
|
fout = paths.radpos_path(nsnap, nsim)
|
||||||
|
now = datetime.now()
|
||||||
|
print(f"{now}: saving radial profiles for simulation {nsim} to `{fout}`",
|
||||||
|
flush=True)
|
||||||
|
numpy.savez(fout, **out)
|
||||||
|
|
||||||
|
# Clean up the memory just to be sure.
|
||||||
|
del out
|
||||||
|
collect()
|
|
@ -33,66 +33,55 @@ parser.add_argument("--nsim0", type=int)
|
||||||
parser.add_argument("--nsimx", type=int)
|
parser.add_argument("--nsimx", type=int)
|
||||||
parser.add_argument("--nmult", type=float)
|
parser.add_argument("--nmult", type=float)
|
||||||
parser.add_argument("--sigma", type=float)
|
parser.add_argument("--sigma", type=float)
|
||||||
parser.add_argument("--verbose", type=lambda x: bool(strtobool(x)), default=False)
|
parser.add_argument("--verbose", type=lambda x: bool(strtobool(x)),
|
||||||
|
default=False)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
paths = csiborgtools.read.CSiBORGPaths(**csiborgtools.paths_glamdring)
|
paths = csiborgtools.read.CSiBORGPaths(**csiborgtools.paths_glamdring)
|
||||||
smooth_kwargs = {"sigma": args.sigma, "mode": "constant", "cval": 0.0}
|
smooth_kwargs = {"sigma": args.sigma, "mode": "constant", "cval": 0.0}
|
||||||
overlapper = csiborgtools.match.ParticleOverlap()
|
overlapper = csiborgtools.match.ParticleOverlap()
|
||||||
matcher = csiborgtools.match.RealisationsMatcher()
|
matcher = csiborgtools.match.RealisationsMatcher()
|
||||||
|
|
||||||
# Load the raw catalogues (i.e. no selection) including the initial CM positions
|
# Load the raw catalogues (i.e. no selection) including the initial CM
|
||||||
# and the particle archives.
|
# positions and the particle archives.
|
||||||
cat0 = csiborgtools.read.HaloCatalogue(
|
cat0 = csiborgtools.read.HaloCatalogue(args.nsim0, paths, load_initial=True,
|
||||||
args.nsim0, paths, load_initial=True, rawdata=True
|
rawdata=True)
|
||||||
)
|
catx = csiborgtools.read.HaloCatalogue(args.nsimx, paths, load_initial=True,
|
||||||
catx = csiborgtools.read.HaloCatalogue(
|
rawdata=True)
|
||||||
args.nsimx, paths, load_initial=True, rawdata=True
|
|
||||||
)
|
|
||||||
halos0_archive = paths.initmatch_path(args.nsim0, "particles")
|
halos0_archive = paths.initmatch_path(args.nsim0, "particles")
|
||||||
halosx_archive = paths.initmatch_path(args.nsimx, "particles")
|
halosx_archive = paths.initmatch_path(args.nsimx, "particles")
|
||||||
|
|
||||||
# We generate the background density fields. Loads halos's particles one by one
|
# We generate the background density fields. Loads halos's particles one by one
|
||||||
# from the archive, concatenates them and calculates the NGP density field.
|
# from the archive, concatenates them and calculates the NGP density field.
|
||||||
args.verbose and print(
|
if args.verbose:
|
||||||
"{}: generating the background density fields.".format(datetime.now()), flush=True
|
print(f"{datetime.now()}: generating the background density fields.",
|
||||||
)
|
flush=True)
|
||||||
delta_bckg = overlapper.make_bckg_delta(halos0_archive, verbose=args.verbose)
|
delta_bckg = overlapper.make_bckg_delta(halos0_archive, verbose=args.verbose)
|
||||||
delta_bckg = overlapper.make_bckg_delta(
|
delta_bckg = overlapper.make_bckg_delta(halosx_archive, delta=delta_bckg,
|
||||||
halosx_archive, delta=delta_bckg, verbose=args.verbose
|
verbose=args.verbose)
|
||||||
)
|
|
||||||
|
|
||||||
# We calculate the overlap between the NGP fields.
|
# We calculate the overlap between the NGP fields.
|
||||||
args.verbose and print(
|
if args.verbose:
|
||||||
"{}: crossing the simulations.".format(datetime.now()), flush=True
|
print(f"{datetime.now()}: crossing the simulations.", flush=True)
|
||||||
)
|
match_indxs, ngp_overlap = matcher.cross(cat0, catx, halos0_archive,
|
||||||
match_indxs, ngp_overlap = matcher.cross(
|
halosx_archive, delta_bckg)
|
||||||
cat0, catx, halos0_archive, halosx_archive, delta_bckg
|
|
||||||
)
|
|
||||||
|
|
||||||
# We now smoothen up the background density field for the smoothed overlap calculation.
|
# We now smoothen up the background density field for the smoothed overlap
|
||||||
args.verbose and print(
|
# calculation.
|
||||||
"{}: smoothing the background field.".format(datetime.now()), flush=True
|
if args.verbose:
|
||||||
)
|
print(f"{datetime.now()}: smoothing the background field.", flush=True)
|
||||||
gaussian_filter(delta_bckg, output=delta_bckg, **smooth_kwargs)
|
gaussian_filter(delta_bckg, output=delta_bckg, **smooth_kwargs)
|
||||||
|
|
||||||
# We calculate the smoothed overlap for the pairs whose NGP overlap is > 0.
|
# We calculate the smoothed overlap for the pairs whose NGP overlap is > 0.
|
||||||
args.verbose and print(
|
if args.verbose:
|
||||||
"{}: calculating smoothed overlaps.".format(datetime.now()), flush=True
|
print(f"{datetime.now()}: calculating smoothed overlaps.", flush=True)
|
||||||
)
|
smoothed_overlap = matcher.smoothed_cross(cat0, catx, halos0_archive,
|
||||||
smoothed_overlap = matcher.smoothed_cross(
|
halosx_archive, delta_bckg,
|
||||||
cat0, catx, halos0_archive, halosx_archive, delta_bckg, match_indxs, smooth_kwargs
|
match_indxs, smooth_kwargs)
|
||||||
)
|
|
||||||
|
|
||||||
# We save the results at long last.
|
# We save the results at long last.
|
||||||
fout = paths.overlap_path(args.nsim0, args.nsimx)
|
fout = paths.overlap_path(args.nsim0, args.nsimx)
|
||||||
args.verbose and print(
|
if args.verbose:
|
||||||
"{}: saving results to `{}`.".format(datetime.now(), fout), flush=True
|
print(f"{datetime.now()}: saving results to `{fout}`.", flush=True)
|
||||||
)
|
numpy.savez(fout, match_indxs=match_indxs, ngp_overlap=ngp_overlap,
|
||||||
numpy.savez(
|
smoothed_overlap=smoothed_overlap, sigma=args.sigma)
|
||||||
fout,
|
print(f"{datetime.now()}: all finished.", flush=True)
|
||||||
match_indxs=match_indxs,
|
|
||||||
ngp_overlap=ngp_overlap,
|
|
||||||
smoothed_overlap=smoothed_overlap,
|
|
||||||
sigma=args.sigma,
|
|
||||||
)
|
|
||||||
print("{}: all finished.".format(datetime.now()), flush=True)
|
|
||||||
|
|
|
@ -13,8 +13,9 @@
|
||||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||||
"""
|
"""
|
||||||
Script to calculate the particle centre of mass and Lagrangian patch size in the initial
|
Script to calculate the particle centre of mass and Lagrangian patch size in
|
||||||
snapshot. Optinally dumps the particle files, however this requires a lot of memory.
|
the initial snapshot. Optinally dumps the particle files, however this requires
|
||||||
|
a lot of memory.
|
||||||
"""
|
"""
|
||||||
from argparse import ArgumentParser
|
from argparse import ArgumentParser
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
@ -54,15 +55,14 @@ ftemp = join(paths.temp_dumpdir, "initmatch_{}_{}_{}.npy")
|
||||||
# initial snapshot and dumping them.
|
# initial snapshot and dumping them.
|
||||||
for i, nsim in enumerate(paths.get_ics(tonew=True)):
|
for i, nsim in enumerate(paths.get_ics(tonew=True)):
|
||||||
if rank == 0:
|
if rank == 0:
|
||||||
print("{}: reading simulation {}.".format(datetime.now(), nsim), flush=True)
|
print(f"{datetime.now()}: reading simulation {nsim}.", flush=True)
|
||||||
nsnap = max(paths.get_snapshots(nsim))
|
nsnap = max(paths.get_snapshots(nsim))
|
||||||
|
|
||||||
# We first load particles in the initial and final snapshots and sort them
|
# We first load particles in the initial and final snapshots and sort them
|
||||||
# by their particle IDs so that we can match them by array position.
|
# by their particle IDs so that we can match them by array position.
|
||||||
# `clump_ids` are the clump IDs of particles.
|
# `clump_ids` are the clump IDs of particles.
|
||||||
part0 = partreader.read_particle(
|
part0 = partreader.read_particle(1, nsim, ["x", "y", "z", "M", "ID"],
|
||||||
1, nsim, ["x", "y", "z", "M", "ID"], verbose=verbose
|
verbose=verbose)
|
||||||
)
|
|
||||||
part0 = part0[numpy.argsort(part0["ID"])]
|
part0 = part0[numpy.argsort(part0["ID"])]
|
||||||
|
|
||||||
pid = partreader.read_particle(nsnap, nsim, ["ID"], verbose=verbose)["ID"]
|
pid = partreader.read_particle(nsnap, nsim, ["ID"], verbose=verbose)["ID"]
|
||||||
|
@ -85,17 +85,14 @@ for i, nsim in enumerate(paths.get_ics(tonew=True)):
|
||||||
# size and optionally the initial snapshot particles belonging to this
|
# size and optionally the initial snapshot particles belonging to this
|
||||||
# parent halo. Dumping the particles will take majority of time.
|
# parent halo. Dumping the particles will take majority of time.
|
||||||
if rank == 0:
|
if rank == 0:
|
||||||
print(
|
print(f"{datetime.now()}: calculating {i}th simulation {nsim}.",
|
||||||
"{}: calculating {}th simulation {}.".format(datetime.now(), i, nsim),
|
flush=True)
|
||||||
flush=True,
|
|
||||||
)
|
|
||||||
# We load up the clump catalogue which contains information about the
|
# We load up the clump catalogue which contains information about the
|
||||||
# ultimate parent halos of each clump. We will loop only over the clump
|
# ultimate parent halos of each clump. We will loop only over the clump
|
||||||
# IDs of ultimate parent halos and add their substructure particles and at
|
# IDs of ultimate parent halos and add their substructure particles and at
|
||||||
# the end save these.
|
# the end save these.
|
||||||
cat = csiborgtools.read.ClumpsCatalogue(
|
cat = csiborgtools.read.ClumpsCatalogue(nsim, paths, load_fitted=False,
|
||||||
nsim, paths, load_fitted=False, rawdata=True
|
rawdata=True)
|
||||||
)
|
|
||||||
parent_ids = cat["index"][cat.ismain][:500]
|
parent_ids = cat["index"][cat.ismain][:500]
|
||||||
jobs = csiborgtools.fits.split_jobs(parent_ids.size, nproc)[rank]
|
jobs = csiborgtools.fits.split_jobs(parent_ids.size, nproc)[rank]
|
||||||
for i in tqdm(jobs) if verbose else jobs:
|
for i in tqdm(jobs) if verbose else jobs:
|
||||||
|
@ -106,7 +103,8 @@ for i, nsim in enumerate(paths.get_ics(tonew=True)):
|
||||||
mmain_particles = part0[mmain_mask]
|
mmain_particles = part0[mmain_mask]
|
||||||
|
|
||||||
raddist, cmpos = csiborgtools.match.dist_centmass(mmain_particles)
|
raddist, cmpos = csiborgtools.match.dist_centmass(mmain_particles)
|
||||||
patchsize = csiborgtools.match.dist_percentile(raddist, [99], distmax=0.075)
|
patchsize = csiborgtools.match.dist_percentile(raddist, [99],
|
||||||
|
distmax=0.075)
|
||||||
with open(ftemp.format(nsim, clid, "fit"), "wb") as f:
|
with open(ftemp.format(nsim, clid, "fit"), "wb") as f:
|
||||||
numpy.savez(f, cmpos=cmpos, patchsize=patchsize)
|
numpy.savez(f, cmpos=cmpos, patchsize=patchsize)
|
||||||
|
|
||||||
|
@ -118,15 +116,13 @@ for i, nsim in enumerate(paths.get_ics(tonew=True)):
|
||||||
del part0, clump_ids
|
del part0, clump_ids
|
||||||
collect()
|
collect()
|
||||||
|
|
||||||
# We now wait for all processes and then use the 0th process to collect the results.
|
# We now wait for all processes and then use the 0th process to collect
|
||||||
# We first collect just the Lagrangian patch size information.
|
# the results. We first collect just the Lagrangian patch size information.
|
||||||
comm.Barrier()
|
comm.Barrier()
|
||||||
if rank == 0:
|
if rank == 0:
|
||||||
print("{}: collecting fits...".format(datetime.now()), flush=True)
|
print(f"{datetime.now()}: collecting fits...", flush=True)
|
||||||
dtype = {
|
dtype = {"names": ["index", "x", "y", "z", "lagpatch"],
|
||||||
"names": ["index", "x", "y", "z", "lagpatch"],
|
"formats": [numpy.int32] + [numpy.float32] * 4}
|
||||||
"formats": [numpy.int32] + [numpy.float32] * 4,
|
|
||||||
}
|
|
||||||
out = numpy.full(parent_ids.size, numpy.nan, dtype=dtype)
|
out = numpy.full(parent_ids.size, numpy.nan, dtype=dtype)
|
||||||
for i, clid in enumerate(parent_ids):
|
for i, clid in enumerate(parent_ids):
|
||||||
fpath = ftemp.format(nsim, clid, "fit")
|
fpath = ftemp.format(nsim, clid, "fit")
|
||||||
|
@ -140,14 +136,15 @@ for i, nsim in enumerate(paths.get_ics(tonew=True)):
|
||||||
remove(fpath)
|
remove(fpath)
|
||||||
|
|
||||||
fout = paths.initmatch_path(nsim, "fit")
|
fout = paths.initmatch_path(nsim, "fit")
|
||||||
print("{}: dumping fits to .. `{}`.".format(datetime.now(), fout), flush=True)
|
print(f"{datetime.now()}: dumping fits to .. `{fout}`.", flush=True)
|
||||||
with open(fout, "wb") as f:
|
with open(fout, "wb") as f:
|
||||||
numpy.save(f, out)
|
numpy.save(f, out)
|
||||||
|
|
||||||
# We now optionally collect the individual clumps and store them in an archive,
|
# We now optionally collect the individual clumps and store them in an
|
||||||
# which has the benefit of being a single file that can be easily read in.
|
# archive, which has the benefit of being a single file that can be
|
||||||
|
# easily read in.
|
||||||
if args.dump:
|
if args.dump:
|
||||||
print("{}: collecting particles...".format(datetime.now()), flush=True)
|
print(f"{datetime.now()}: collecting particles...", flush=True)
|
||||||
out = {}
|
out = {}
|
||||||
for clid in parent_ids:
|
for clid in parent_ids:
|
||||||
fpath = ftemp.format(nsim, clid, "particles")
|
fpath = ftemp.format(nsim, clid, "particles")
|
||||||
|
@ -155,10 +152,8 @@ for i, nsim in enumerate(paths.get_ics(tonew=True)):
|
||||||
out.update({str(clid): numpy.load(f)})
|
out.update({str(clid): numpy.load(f)})
|
||||||
|
|
||||||
fout = paths.initmatch_path(nsim, "particles")
|
fout = paths.initmatch_path(nsim, "particles")
|
||||||
print(
|
print(f"{datetime.now()}: dumping particles to .. `{fout}`.",
|
||||||
"{}: dumping particles to .. `{}`.".format(datetime.now(), fout),
|
flush=True)
|
||||||
flush=True,
|
|
||||||
)
|
|
||||||
with open(fout, "wb") as f:
|
with open(fout, "wb") as f:
|
||||||
numpy.savez(f, **out)
|
numpy.savez(f, **out)
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@ from datetime import datetime
|
||||||
|
|
||||||
import numpy
|
import numpy
|
||||||
from mpi4py import MPI
|
from mpi4py import MPI
|
||||||
from TaskmasterMPI import master_process, worker_process
|
from taskmaster import master_process, worker_process
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import csiborgtools
|
import csiborgtools
|
||||||
|
@ -58,7 +58,7 @@ if nproc > 1:
|
||||||
else:
|
else:
|
||||||
tasks = paths.get_ics(tonew=False)
|
tasks = paths.get_ics(tonew=False)
|
||||||
for task in tasks:
|
for task in tasks:
|
||||||
print("{}: completing task `{}`.".format(datetime.now(), task))
|
print(f"{datetime.now()}: completing task `{task}`.", flush=True)
|
||||||
do_mmain(task)
|
do_mmain(task)
|
||||||
|
|
||||||
comm.Barrier()
|
comm.Barrier()
|
||||||
|
|
|
@ -24,7 +24,7 @@ from os.path import join
|
||||||
|
|
||||||
import numpy
|
import numpy
|
||||||
from mpi4py import MPI
|
from mpi4py import MPI
|
||||||
from TaskmasterMPI import master_process, worker_process
|
from taskmaster import master_process, worker_process
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -30,12 +30,12 @@ dumpdir = "/mnt/extraspace/rstiskalek/csiborg/"
|
||||||
|
|
||||||
|
|
||||||
# Some chosen clusters
|
# Some chosen clusters
|
||||||
_coma = {"RA": (12 + 59/60 + 48.7 / 60**2) * 15,
|
_coma = {"RA": (12 + 59 / 60 + 48.7 / 60**2) * 15,
|
||||||
"DEC": 27 + 58 / 60 + 50 / 60**2,
|
"DEC": 27 + 58 / 60 + 50 / 60**2,
|
||||||
"COMDIST": 102.975}
|
"COMDIST": 102.975}
|
||||||
|
|
||||||
_virgo = {"RA": (12 + 27 / 60) * 15,
|
_virgo = {"RA": (12 + 27 / 60) * 15,
|
||||||
"DEC": 12 + 43/60,
|
"DEC": 12 + 43 / 60,
|
||||||
"COMDIST": 16.5}
|
"COMDIST": 16.5}
|
||||||
|
|
||||||
specific_clusters = {"Coma": _coma, "Virgo": _virgo}
|
specific_clusters = {"Coma": _coma, "Virgo": _virgo}
|
||||||
|
|
Loading…
Reference in a new issue