Dump radial profile information (#48)

* add radial position path

* pep8

* Add basic fit profile dumping

* pep8

* pep8

* pep8

* pep8

* pep8

* pep8

* Update TODO

* Fix parts is None bug

* Update nb
This commit is contained in:
Richard Stiskalek 2023-04-27 01:18:30 +02:00 committed by GitHub
parent 1a115f481d
commit f48eb6dcb0
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
29 changed files with 512 additions and 395 deletions

View file

@ -63,7 +63,7 @@ fout = join(dumpdir, "crosspk",
jobs = csiborgtools.utils.split_jobs(nsims, nproc)[rank]
for n in jobs:
print("Rank {}@{}: saving {}th delta.".format(rank, datetime.now(), n))
print(f"Rank {rank} at {datetime.now()}: saving {n}th delta.", flush=True)
nsim = ics[n]
particles = reader.read_particle(max(paths.get_snapshots(nsim)), nsim,
["x", "y", "z", "M"], verbose=False)
@ -155,4 +155,4 @@ if rank == 0:
remove(ftemp.format(ic, "delta") + ".npy")
remove(ftemp.format(ic, "lengths") + ".p")
print("All finished!")
print("All finished!")

View file

@ -23,7 +23,7 @@ import numpy
import yaml
from mpi4py import MPI
from sklearn.neighbors import NearestNeighbors
from TaskmasterMPI import master_process, worker_process
from taskmaster import master_process, worker_process
try:
import csiborgtools
@ -98,7 +98,7 @@ def do_auto(run, cat, ic):
"""Calculate the kNN-CDF single catalgoue autocorrelation."""
_config = config.get(run, None)
if _config is None:
warn("No configuration for run {}.".format(run), UserWarning, stacklevel=1)
warn(f"No configuration for run {run}.", UserWarning, stacklevel=1)
return
rvs_gen = csiborgtools.clustering.RVSinsphere(Rmax)
@ -106,16 +106,10 @@ def do_auto(run, cat, ic):
knn = NearestNeighbors()
knn.fit(pos)
rs, cdf = knncdf(
knn,
rvs_gen=rvs_gen,
nneighbours=config["nneighbours"],
rmin=config["rmin"],
rmax=config["rmax"],
nsamples=int(config["nsamples"]),
neval=int(config["neval"]),
batch_size=int(config["batch_size"]),
random_state=config["seed"],
)
knn, rvs_gen=rvs_gen, nneighbours=config["nneighbours"],
rmin=config["rmin"], rmax=config["rmax"],
nsamples=int(config["nsamples"]), neval=int(config["neval"]),
batch_size=int(config["batch_size"]), random_state=config["seed"])
joblib.dump(
{"rs": rs, "cdf": cdf, "ndensity": pos.shape[0] / totvol},
@ -127,7 +121,7 @@ def do_cross_rand(run, cat, ic):
"""Calculate the kNN-CDF cross catalogue random correlation."""
_config = config.get(run, None)
if _config is None:
warn("No configuration for run {}.".format(run), UserWarning, stacklevel=1)
warn(f"No configuration for run {run}.", UserWarning, stacklevel=1)
return
rvs_gen = csiborgtools.clustering.RVSinsphere(Rmax)
@ -140,16 +134,10 @@ def do_cross_rand(run, cat, ic):
knn2.fit(pos2)
rs, cdf0, cdf1, joint_cdf = knncdf.joint(
knn1,
knn2,
rvs_gen=rvs_gen,
nneighbours=int(config["nneighbours"]),
rmin=config["rmin"],
rmax=config["rmax"],
nsamples=int(config["nsamples"]),
neval=int(config["neval"]),
batch_size=int(config["batch_size"]),
random_state=config["seed"],
knn1, knn2, rvs_gen=rvs_gen, nneighbours=int(config["nneighbours"]),
rmin=config["rmin"], rmax=config["rmax"],
nsamples=int(config["nsamples"]), neval=int(config["neval"]),
batch_size=int(config["batch_size"]), random_state=config["seed"],
)
corr = knncdf.joint_to_corr(cdf0, cdf1, joint_cdf)
joblib.dump({"rs": rs, "corr": corr}, paths.knnauto_path(run, ic))

View file

@ -23,7 +23,7 @@ import numpy
import yaml
from mpi4py import MPI
from sklearn.neighbors import NearestNeighbors
from TaskmasterMPI import master_process, worker_process
from taskmaster import master_process, worker_process
try:
import csiborgtools

View file

@ -22,7 +22,7 @@ import joblib
import numpy
import yaml
from mpi4py import MPI
from TaskmasterMPI import master_process, worker_process
from taskmaster import master_process, worker_process
try:
import csiborgtools

View file

@ -124,4 +124,4 @@ if rank == 0:
print("Saving results to `{}`.".format(fperm), flush=True)
with open(fperm, "wb") as f:
numpy.save(f, out)
numpy.save(f, out)

View file

@ -77,9 +77,12 @@ def fit_clump(particles, clump_info, box):
for i, v in enumerate(["vx", "vy", "vz"]):
out[v] = numpy.average(obj.vel[:, i], weights=obj["M"])
# Overdensity masses
out["r200c"], out["m200c"] = obj.spherical_overdensity_mass(200, kind="crit")
out["r500c"], out["m500c"] = obj.spherical_overdensity_mass(500, kind="crit")
out["r200m"], out["m200m"] = obj.spherical_overdensity_mass(200, kind="matter")
out["r200c"], out["m200c"] = obj.spherical_overdensity_mass(200,
kind="crit")
out["r500c"], out["m500c"] = obj.spherical_overdensity_mass(500,
kind="crit")
out["r200m"], out["m200m"] = obj.spherical_overdensity_mass(200,
kind="matter")
# NFW fit
if out["npart"] > 10 and numpy.isfinite(out["r200c"]):
Rs, rho0 = nfwpost.fit(obj)
@ -108,8 +111,8 @@ def load_parent_particles(clumpid, particle_archive, clumps_cat):
Load a parent halo's particles.
"""
indxs = clumps_cat["index"][clumps_cat["parent"] == clumpid]
# We first load the particles of each clump belonging to this parent and then
# concatenate them for further analysis.
# We first load the particles of each clump belonging to this parent
# and then concatenate them for further analysis.
clumps = []
for ind in indxs:
parts = load_clump_particles(ind, particle_archive)
@ -118,24 +121,23 @@ def load_parent_particles(clumpid, particle_archive, clumps_cat):
if len(clumps) == 0:
return None
return csiborgtools.match.concatenate_parts(clumps, include_velocities=True)
return csiborgtools.match.concatenate_parts(clumps,
include_velocities=True)
# We now start looping over all simulations
for i, nsim in enumerate(paths.get_ics(tonew=False)):
if rank == 0:
print(
"{}: calculating {}th simulation `{}`.".format(datetime.now(), i, nsim),
flush=True,
)
print(f"{datetime.now()}: calculating {i}th simulation `{nsim}`.",
flush=True)
nsnap = max(paths.get_snapshots(nsim))
box = csiborgtools.read.BoxUnits(nsnap, nsim, paths)
# Archive of clumps, keywords are their clump IDs
particle_archive = numpy.load(paths.split_path(nsnap, nsim))
clumps_cat = csiborgtools.read.ClumpsCatalogue(
nsim, paths, maxdist=None, minmass=None, rawdata=True, load_fitted=False
)
clumps_cat = csiborgtools.read.ClumpsCatalogue(nsim, paths, maxdist=None,
minmass=None, rawdata=True,
load_fitted=False)
# We check whether we fit halos or clumps, will be indexing over different
# iterators.
if args.kind == "halos":
@ -171,25 +173,23 @@ for i, nsim in enumerate(paths.get_ics(tonew=False)):
fout = ftemp.format(str(nsim).zfill(5), str(nsnap).zfill(5), rank)
if nproc == 0:
print(
"{}: rank {} saving to `{}`.".format(datetime.now(), rank, fout), flush=True
)
print(f"{datetime.now()}: rank {rank} saving to `{fout}`.", flush=True)
numpy.save(fout, out)
# We saved this CPU's results in a temporary file. Wait now for the other
# CPUs and then collect results from the 0th rank and save them.
comm.Barrier()
if rank == 0:
print(
"{}: collecting results for simulation `{}`.".format(datetime.now(), nsim),
flush=True,
)
print(f"{datetime.now()}: collecting results for simulation `{nsim}`.",
flush=True)
# We write to the output array. Load data from each CPU and append to
# the output array.
out = csiborgtools.read.cols_to_structured(ntasks, cols_collect)
clumpid2outpos = {indx: i for i, indx in enumerate(clumps_cat["index"])}
clumpid2outpos = {indx: i
for i, indx in enumerate(clumps_cat["index"])}
for i in range(nproc):
inp = numpy.load(ftemp.format(str(nsim).zfill(5), str(nsnap).zfill(5), i))
inp = numpy.load(ftemp.format(str(nsim).zfill(5),
str(nsnap).zfill(5), i))
for j, clumpid in enumerate(inp["index"]):
k = clumpid2outpos[clumpid]
for key in inp.dtype.names:
@ -201,7 +201,7 @@ for i, nsim in enumerate(paths.get_ics(tonew=False)):
out = out[ismain]
fout = paths.structfit_path(nsnap, nsim, args.kind)
print("Saving to `{}`.".format(fout), flush=True)
print(f"Saving to `{fout}`.", flush=True)
numpy.save(fout, out)
# We now wait before moving on to another simulation.

140
scripts/fit_profiles.py Normal file
View file

@ -0,0 +1,140 @@
# Copyright (C) 2023 Richard Stiskalek
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
A script to calculate the particle's separation from the CM and save it.
Currently MPI is not supported.
"""
from argparse import ArgumentParser
from datetime import datetime
from gc import collect
import numpy
from mpi4py import MPI
from tqdm import trange
try:
import csiborgtools
except ModuleNotFoundError:
import sys
sys.path.append("../")
import csiborgtools
parser = ArgumentParser()
parser.add_argument("--ics", type=int, nargs="+", default=None,
help="IC realisatiosn. If `-1` processes all simulations.")
args = parser.parse_args()
# Get MPI things
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
nproc = comm.Get_size()
if nproc > 1:
raise NotImplementedError("MPI is not implemented implemented yet.")
paths = csiborgtools.read.CSiBORGPaths(**csiborgtools.paths_glamdring)
partreader = csiborgtools.read.ParticleReader(paths)
cols_collect = [("r", numpy.float32), ("M", numpy.float32)]
if args.ics is None or args.ics == -1:
nsims = paths.get_ics(tonew=False)
else:
nsims = args.ics
def load_clump_particles(clumpid, particle_archive):
"""
Load a clump's particles from the particle archive. If it is not there, i.e
clump has no associated particles, return `None`.
"""
try:
part = particle_archive[str(clumpid)]
except KeyError:
part = None
return part
def load_parent_particles(clumpid, particle_archive, clumps_cat):
"""
Load a parent halo's particles.
"""
indxs = clumps_cat["index"][clumps_cat["parent"] == clumpid]
# We first load the particles of each clump belonging to this
# parent and then concatenate them for further analysis.
clumps = []
for ind in indxs:
parts = load_clump_particles(ind, particle_archive)
if parts is not None:
clumps.append(parts)
if len(clumps) == 0:
return None
return csiborgtools.match.concatenate_parts(clumps)
# We loop over simulations. Here later optionlaly add MPI.
for i, nsim in enumerate(nsims):
if rank == 0:
now = datetime.now()
print(f"{now}: calculating {i}th simulation `{nsim}`.", flush=True)
nsnap = max(paths.get_snapshots(nsim))
box = csiborgtools.read.BoxUnits(nsnap, nsim, paths)
# Archive of clumps, keywords are their clump IDs
particle_archive = numpy.load(paths.split_path(nsnap, nsim))
clumps_cat = csiborgtools.read.ClumpsCatalogue(nsim, paths, maxdist=None,
minmass=None, rawdata=True,
load_fitted=False)
ismain = clumps_cat.ismain
ntasks = len(clumps_cat)
# We loop over halos and add ther particle positions to this dictionary,
# which we will later save as an archive.
out = {}
for j in trange(ntasks) if nproc == 1 else range(ntasks):
# If we are fitting halos and this clump is not a main, then continue.
if not ismain[j]:
continue
clumpid = clumps_cat["index"][j]
parts = load_parent_particles(clumpid, particle_archive, clumps_cat)
# If we have no particles, then do not save anything.
if parts is None:
continue
obj = csiborgtools.fits.Clump(parts, clumps_cat[j], box)
r200m, m200m = obj.spherical_overdensity_mass(200, npart_min=10,
kind="matter")
r = obj.r()
mask = r <= r200m
_out = csiborgtools.read.cols_to_structured(numpy.sum(mask),
cols_collect)
_out["r"] = r[mask]
_out["M"] = obj["M"][mask]
out[str(clumps_cat["index"][i])] = _out
# Finished, so we save everything.
fout = paths.radpos_path(nsnap, nsim)
now = datetime.now()
print(f"{now}: saving radial profiles for simulation {nsim} to `{fout}`",
flush=True)
numpy.savez(fout, **out)
# Clean up the memory just to be sure.
del out
collect()

View file

@ -33,66 +33,55 @@ parser.add_argument("--nsim0", type=int)
parser.add_argument("--nsimx", type=int)
parser.add_argument("--nmult", type=float)
parser.add_argument("--sigma", type=float)
parser.add_argument("--verbose", type=lambda x: bool(strtobool(x)), default=False)
parser.add_argument("--verbose", type=lambda x: bool(strtobool(x)),
default=False)
args = parser.parse_args()
paths = csiborgtools.read.CSiBORGPaths(**csiborgtools.paths_glamdring)
smooth_kwargs = {"sigma": args.sigma, "mode": "constant", "cval": 0.0}
overlapper = csiborgtools.match.ParticleOverlap()
matcher = csiborgtools.match.RealisationsMatcher()
# Load the raw catalogues (i.e. no selection) including the initial CM positions
# and the particle archives.
cat0 = csiborgtools.read.HaloCatalogue(
args.nsim0, paths, load_initial=True, rawdata=True
)
catx = csiborgtools.read.HaloCatalogue(
args.nsimx, paths, load_initial=True, rawdata=True
)
# Load the raw catalogues (i.e. no selection) including the initial CM
# positions and the particle archives.
cat0 = csiborgtools.read.HaloCatalogue(args.nsim0, paths, load_initial=True,
rawdata=True)
catx = csiborgtools.read.HaloCatalogue(args.nsimx, paths, load_initial=True,
rawdata=True)
halos0_archive = paths.initmatch_path(args.nsim0, "particles")
halosx_archive = paths.initmatch_path(args.nsimx, "particles")
# We generate the background density fields. Loads halos's particles one by one
# from the archive, concatenates them and calculates the NGP density field.
args.verbose and print(
"{}: generating the background density fields.".format(datetime.now()), flush=True
)
if args.verbose:
print(f"{datetime.now()}: generating the background density fields.",
flush=True)
delta_bckg = overlapper.make_bckg_delta(halos0_archive, verbose=args.verbose)
delta_bckg = overlapper.make_bckg_delta(
halosx_archive, delta=delta_bckg, verbose=args.verbose
)
delta_bckg = overlapper.make_bckg_delta(halosx_archive, delta=delta_bckg,
verbose=args.verbose)
# We calculate the overlap between the NGP fields.
args.verbose and print(
"{}: crossing the simulations.".format(datetime.now()), flush=True
)
match_indxs, ngp_overlap = matcher.cross(
cat0, catx, halos0_archive, halosx_archive, delta_bckg
)
if args.verbose:
print(f"{datetime.now()}: crossing the simulations.", flush=True)
match_indxs, ngp_overlap = matcher.cross(cat0, catx, halos0_archive,
halosx_archive, delta_bckg)
# We now smoothen up the background density field for the smoothed overlap calculation.
args.verbose and print(
"{}: smoothing the background field.".format(datetime.now()), flush=True
)
# We now smoothen up the background density field for the smoothed overlap
# calculation.
if args.verbose:
print(f"{datetime.now()}: smoothing the background field.", flush=True)
gaussian_filter(delta_bckg, output=delta_bckg, **smooth_kwargs)
# We calculate the smoothed overlap for the pairs whose NGP overlap is > 0.
args.verbose and print(
"{}: calculating smoothed overlaps.".format(datetime.now()), flush=True
)
smoothed_overlap = matcher.smoothed_cross(
cat0, catx, halos0_archive, halosx_archive, delta_bckg, match_indxs, smooth_kwargs
)
if args.verbose:
print(f"{datetime.now()}: calculating smoothed overlaps.", flush=True)
smoothed_overlap = matcher.smoothed_cross(cat0, catx, halos0_archive,
halosx_archive, delta_bckg,
match_indxs, smooth_kwargs)
# We save the results at long last.
fout = paths.overlap_path(args.nsim0, args.nsimx)
args.verbose and print(
"{}: saving results to `{}`.".format(datetime.now(), fout), flush=True
)
numpy.savez(
fout,
match_indxs=match_indxs,
ngp_overlap=ngp_overlap,
smoothed_overlap=smoothed_overlap,
sigma=args.sigma,
)
print("{}: all finished.".format(datetime.now()), flush=True)
if args.verbose:
print(f"{datetime.now()}: saving results to `{fout}`.", flush=True)
numpy.savez(fout, match_indxs=match_indxs, ngp_overlap=ngp_overlap,
smoothed_overlap=smoothed_overlap, sigma=args.sigma)
print(f"{datetime.now()}: all finished.", flush=True)

View file

@ -13,8 +13,9 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Script to calculate the particle centre of mass and Lagrangian patch size in the initial
snapshot. Optinally dumps the particle files, however this requires a lot of memory.
Script to calculate the particle centre of mass and Lagrangian patch size in
the initial snapshot. Optinally dumps the particle files, however this requires
a lot of memory.
"""
from argparse import ArgumentParser
from datetime import datetime
@ -54,15 +55,14 @@ ftemp = join(paths.temp_dumpdir, "initmatch_{}_{}_{}.npy")
# initial snapshot and dumping them.
for i, nsim in enumerate(paths.get_ics(tonew=True)):
if rank == 0:
print("{}: reading simulation {}.".format(datetime.now(), nsim), flush=True)
print(f"{datetime.now()}: reading simulation {nsim}.", flush=True)
nsnap = max(paths.get_snapshots(nsim))
# We first load particles in the initial and final snapshots and sort them
# by their particle IDs so that we can match them by array position.
# `clump_ids` are the clump IDs of particles.
part0 = partreader.read_particle(
1, nsim, ["x", "y", "z", "M", "ID"], verbose=verbose
)
part0 = partreader.read_particle(1, nsim, ["x", "y", "z", "M", "ID"],
verbose=verbose)
part0 = part0[numpy.argsort(part0["ID"])]
pid = partreader.read_particle(nsnap, nsim, ["ID"], verbose=verbose)["ID"]
@ -85,17 +85,14 @@ for i, nsim in enumerate(paths.get_ics(tonew=True)):
# size and optionally the initial snapshot particles belonging to this
# parent halo. Dumping the particles will take majority of time.
if rank == 0:
print(
"{}: calculating {}th simulation {}.".format(datetime.now(), i, nsim),
flush=True,
)
print(f"{datetime.now()}: calculating {i}th simulation {nsim}.",
flush=True)
# We load up the clump catalogue which contains information about the
# ultimate parent halos of each clump. We will loop only over the clump
# IDs of ultimate parent halos and add their substructure particles and at
# the end save these.
cat = csiborgtools.read.ClumpsCatalogue(
nsim, paths, load_fitted=False, rawdata=True
)
cat = csiborgtools.read.ClumpsCatalogue(nsim, paths, load_fitted=False,
rawdata=True)
parent_ids = cat["index"][cat.ismain][:500]
jobs = csiborgtools.fits.split_jobs(parent_ids.size, nproc)[rank]
for i in tqdm(jobs) if verbose else jobs:
@ -106,7 +103,8 @@ for i, nsim in enumerate(paths.get_ics(tonew=True)):
mmain_particles = part0[mmain_mask]
raddist, cmpos = csiborgtools.match.dist_centmass(mmain_particles)
patchsize = csiborgtools.match.dist_percentile(raddist, [99], distmax=0.075)
patchsize = csiborgtools.match.dist_percentile(raddist, [99],
distmax=0.075)
with open(ftemp.format(nsim, clid, "fit"), "wb") as f:
numpy.savez(f, cmpos=cmpos, patchsize=patchsize)
@ -118,15 +116,13 @@ for i, nsim in enumerate(paths.get_ics(tonew=True)):
del part0, clump_ids
collect()
# We now wait for all processes and then use the 0th process to collect the results.
# We first collect just the Lagrangian patch size information.
# We now wait for all processes and then use the 0th process to collect
# the results. We first collect just the Lagrangian patch size information.
comm.Barrier()
if rank == 0:
print("{}: collecting fits...".format(datetime.now()), flush=True)
dtype = {
"names": ["index", "x", "y", "z", "lagpatch"],
"formats": [numpy.int32] + [numpy.float32] * 4,
}
print(f"{datetime.now()}: collecting fits...", flush=True)
dtype = {"names": ["index", "x", "y", "z", "lagpatch"],
"formats": [numpy.int32] + [numpy.float32] * 4}
out = numpy.full(parent_ids.size, numpy.nan, dtype=dtype)
for i, clid in enumerate(parent_ids):
fpath = ftemp.format(nsim, clid, "fit")
@ -140,14 +136,15 @@ for i, nsim in enumerate(paths.get_ics(tonew=True)):
remove(fpath)
fout = paths.initmatch_path(nsim, "fit")
print("{}: dumping fits to .. `{}`.".format(datetime.now(), fout), flush=True)
print(f"{datetime.now()}: dumping fits to .. `{fout}`.", flush=True)
with open(fout, "wb") as f:
numpy.save(f, out)
# We now optionally collect the individual clumps and store them in an archive,
# which has the benefit of being a single file that can be easily read in.
# We now optionally collect the individual clumps and store them in an
# archive, which has the benefit of being a single file that can be
# easily read in.
if args.dump:
print("{}: collecting particles...".format(datetime.now()), flush=True)
print(f"{datetime.now()}: collecting particles...", flush=True)
out = {}
for clid in parent_ids:
fpath = ftemp.format(nsim, clid, "particles")
@ -155,10 +152,8 @@ for i, nsim in enumerate(paths.get_ics(tonew=True)):
out.update({str(clid): numpy.load(f)})
fout = paths.initmatch_path(nsim, "particles")
print(
"{}: dumping particles to .. `{}`.".format(datetime.now(), fout),
flush=True,
)
print(f"{datetime.now()}: dumping particles to .. `{fout}`.",
flush=True)
with open(fout, "wb") as f:
numpy.savez(f, **out)

View file

@ -19,7 +19,7 @@ from datetime import datetime
import numpy
from mpi4py import MPI
from TaskmasterMPI import master_process, worker_process
from taskmaster import master_process, worker_process
try:
import csiborgtools
@ -58,7 +58,7 @@ if nproc > 1:
else:
tasks = paths.get_ics(tonew=False)
for task in tasks:
print("{}: completing task `{}`.".format(datetime.now(), task))
print(f"{datetime.now()}: completing task `{task}`.", flush=True)
do_mmain(task)
comm.Barrier()
comm.Barrier()

View file

@ -24,7 +24,7 @@ from os.path import join
import numpy
from mpi4py import MPI
from TaskmasterMPI import master_process, worker_process
from taskmaster import master_process, worker_process
from tqdm import tqdm
try:

View file

@ -30,12 +30,12 @@ dumpdir = "/mnt/extraspace/rstiskalek/csiborg/"
# Some chosen clusters
_coma = {"RA": (12 + 59/60 + 48.7 / 60**2) * 15,
_coma = {"RA": (12 + 59 / 60 + 48.7 / 60**2) * 15,
"DEC": 27 + 58 / 60 + 50 / 60**2,
"COMDIST": 102.975}
_virgo = {"RA": (12 + 27 / 60) * 15,
"DEC": 12 + 43/60,
"DEC": 12 + 43 / 60,
"COMDIST": 16.5}
specific_clusters = {"Coma": _coma, "Virgo": _virgo}