mirror of
https://bitbucket.org/cosmicvoids/vide_public.git
synced 2025-07-04 07:11:12 +00:00
Merged in python3 (pull request #5)
Port to python3, large code cleanup * Fixed command line for cosmotool * Fix path * Dump command line is log file * Fix important typo * Modify paths for example * Fix path again * Use an explicit constructor * Change file to open (python 2->3) * python3 fix for xrange in periodic_kdtree.py * Fixed index for Np, numPart, numZones, numZonesTot, partID, zoneID in catalogUtil.py
This commit is contained in:
parent
8249256508
commit
affb56ff48
392 changed files with 4092 additions and 260938 deletions
|
@ -1,417 +1,436 @@
|
|||
import numpy as np
|
||||
import readsnap
|
||||
import readsubf
|
||||
import sys
|
||||
import time
|
||||
import random
|
||||
|
||||
###############################################################################
|
||||
#this function returns an array containing the positions of the galaxies (kpc/h)
|
||||
#in the catalogue according to the fiducial density, M1 and alpha
|
||||
#CDM halos with masses within [min_mass,max_mass], are populated
|
||||
#with galaxies. The IDs and positions of the CDM particles belonging to the
|
||||
#different groups are read from the snapshots
|
||||
#If one needs to creates many catalogues, this function is not appropiate,
|
||||
#since it wastes a lot of time reading the snapshots and sorting the IDs
|
||||
#min_mass and max_mass are in units of Msun/h, not 1e10 Msun/h
|
||||
#mass_criteria: definition of the halo virial radius -- 't200' 'm200' 'c200'
|
||||
#fiducial_density: galaxy number density to be reproduced, in (h/Mpc)^3
|
||||
def hod(snapshot_fname,groups_fname,groups_number,min_mass,max_mass,
|
||||
fiducial_density,M1,alpha,mass_criteria,verbose=False):
|
||||
|
||||
thres=1e-3 #controls the max relative error to accept a galaxy density
|
||||
|
||||
#read the header and obtain the boxsize
|
||||
head=readsnap.snapshot_header(snapshot_fname)
|
||||
BoxSize=head.boxsize #BoxSize in kpc/h
|
||||
|
||||
#read positions and IDs of DM particles: sort the IDs array
|
||||
DM_pos=readsnap.read_block(snapshot_fname,"POS ",parttype=-1) #kpc/h
|
||||
DM_ids=readsnap.read_block(snapshot_fname,"ID ",parttype=-1)-1
|
||||
sorted_ids=DM_ids.argsort(axis=0)
|
||||
#the particle whose ID is N is located in the position sorted_ids[N]
|
||||
#i.e. DM_ids[sorted_ids[N]]=N
|
||||
#the position of the particle whose ID is N would be:
|
||||
#DM_pos[sorted_ids[N]]
|
||||
|
||||
#read the IDs of the particles belonging to the CDM halos
|
||||
halos_ID=readsubf.subf_ids(groups_fname,groups_number,0,0,
|
||||
long_ids=True,read_all=True)
|
||||
IDs=halos_ID.SubIDs-1
|
||||
del halos_ID
|
||||
|
||||
#read CDM halos information
|
||||
halos=readsubf.subfind_catalog(groups_fname,groups_number,
|
||||
group_veldisp=True,masstab=True,
|
||||
long_ids=True,swap=False)
|
||||
if mass_criteria=='t200':
|
||||
halos_mass=halos.group_m_tophat200*1e10 #masses in Msun/h
|
||||
halos_radius=halos.group_r_tophat200 #radius in kpc/h
|
||||
elif mass_criteria=='m200':
|
||||
halos_mass=halos.group_m_mean200*1e10 #masses in Msun/h
|
||||
halos_radius=halos.group_r_mean200 #radius in kpc/h
|
||||
elif mass_criteria=='c200':
|
||||
halos_mass=halos.group_m_crit200*1e10 #masses in Msun/h
|
||||
halos_radius=halos.group_r_crit200 #radius in kpc/h
|
||||
else:
|
||||
print 'bad mass_criteria'
|
||||
sys.exit()
|
||||
halos_pos=halos.group_pos #positions in kpc/h
|
||||
halos_len=halos.group_len
|
||||
halos_offset=halos.group_offset
|
||||
halos_indexes=np.where((halos_mass>min_mass) & (halos_mass<max_mass))[0]
|
||||
del halos
|
||||
|
||||
if verbose:
|
||||
print ' '
|
||||
print 'total halos found=',halos_pos.shape[0]
|
||||
print 'halos number density=',len(halos_pos)/(BoxSize*1e-3)**3
|
||||
|
||||
#keep only the halos in the given mass range
|
||||
halo_mass=halos_mass[halos_indexes]
|
||||
halo_pos=halos_pos[halos_indexes]
|
||||
halo_radius=halos_radius[halos_indexes]
|
||||
halo_len=halos_len[halos_indexes]
|
||||
halo_offset=halos_offset[halos_indexes]
|
||||
del halos_indexes
|
||||
|
||||
##### COMPUTE Mmin GIVEN M1 & alpha #####
|
||||
i=0; max_iterations=20 #maximum number of iterations
|
||||
Mmin1=min_mass; Mmin2=max_mass
|
||||
while (i<max_iterations):
|
||||
Mmin=0.5*(Mmin1+Mmin2) #estimation of the HOD parameter Mmin
|
||||
|
||||
total_galaxies=0
|
||||
inside=np.where(halo_mass>Mmin)[0] #take all galaxies with M>Mmin
|
||||
mass=halo_mass[inside] #only halos with M>Mmin have central/satellites
|
||||
|
||||
total_galaxies=mass.shape[0]+np.sum((mass/M1)**alpha)
|
||||
mean_density=total_galaxies*1.0/(BoxSize*1e-3)**3 #galaxies/(Mpc/h)^3
|
||||
|
||||
if (np.absolute((mean_density-fiducial_density)/fiducial_density)<thres):
|
||||
i=max_iterations
|
||||
elif (mean_density>fiducial_density):
|
||||
Mmin1=Mmin
|
||||
else:
|
||||
Mmin2=Mmin
|
||||
i+=1
|
||||
|
||||
if verbose:
|
||||
print ' '
|
||||
print 'Mmin=',Mmin
|
||||
print 'average number of galaxies=',total_galaxies
|
||||
print 'average galaxy density=',mean_density
|
||||
#########################################
|
||||
|
||||
#just halos with M>Mmin; the rest do not host central/satellite galaxies
|
||||
inside=np.where(halo_mass>Mmin)[0]
|
||||
halo_mass=halo_mass[inside]
|
||||
halo_pos=halo_pos[inside]
|
||||
halo_radius=halo_radius[inside]
|
||||
halo_len=halo_len[inside]
|
||||
halo_offset=halo_offset[inside]
|
||||
del inside
|
||||
|
||||
#compute number of satellites in each halo using the Poisson distribution
|
||||
N_mean_sat=(halo_mass/M1)**alpha #mean number of satellites
|
||||
N_sat=np.empty(len(N_mean_sat),dtype=np.int32)
|
||||
for i in range(len(N_sat)):
|
||||
N_sat[i]=np.random.poisson(N_mean_sat[i])
|
||||
N_tot=np.sum(N_sat)+len(halo_mass) #total number of galaxies in the catalogue
|
||||
|
||||
if verbose:
|
||||
print ' '
|
||||
print np.min(halo_mass),'< M_halo <',np.max(halo_mass)
|
||||
print 'total number of galaxies=',N_tot
|
||||
print 'galaxy number density=',N_tot/(BoxSize*1e-3)**3
|
||||
|
||||
#put satellites following the distribution of dark matter in groups
|
||||
if verbose:
|
||||
print ' '
|
||||
print 'Creating mock catalogue ...',
|
||||
|
||||
pos_galaxies=np.empty((N_tot,3),dtype=np.float32)
|
||||
#index: variable that go through halos (may be several galaxies in a halo)
|
||||
#i: variable that go through all (central/satellites) galaxies
|
||||
#count: find number of galaxies that lie beyond its host halo virial radius
|
||||
index=0; count=0; i=0
|
||||
while (index<halo_mass.shape[0]):
|
||||
|
||||
position=halo_pos[index] #position of the DM halo
|
||||
radius=halo_radius[index] #radius of the DM halo
|
||||
|
||||
#save the position of the central galaxy
|
||||
pos_galaxies[i]=position; i+=1
|
||||
|
||||
#if halo contains satellites, save their positions
|
||||
Nsat=N_sat[index]
|
||||
if Nsat>0:
|
||||
offset=halo_offset[index]
|
||||
length=halo_len[index]
|
||||
idss=sorted_ids[IDs[offset:offset+length]]
|
||||
|
||||
#compute the distances to the halo center keeping those with R<Rvir
|
||||
pos=DM_pos[idss] #positions of the particles belonging to the halo
|
||||
posc=pos-position
|
||||
|
||||
#this is to populate correctly halos closer to box boundaries
|
||||
if np.any((position+radius>BoxSize) + (position-radius<0.0)):
|
||||
|
||||
inside=np.where(posc[:,0]>BoxSize/2.0)[0]
|
||||
posc[inside,0]-=BoxSize
|
||||
inside=np.where(posc[:,0]<-BoxSize/2.0)[0]
|
||||
posc[inside,0]+=BoxSize
|
||||
|
||||
inside=np.where(posc[:,1]>BoxSize/2.0)[0]
|
||||
posc[inside,1]-=BoxSize
|
||||
inside=np.where(posc[:,1]<-BoxSize/2.0)[0]
|
||||
posc[inside,1]+=BoxSize
|
||||
|
||||
inside=np.where(posc[:,2]>BoxSize/2.0)[0]
|
||||
posc[inside,2]-=BoxSize
|
||||
inside=np.where(posc[:,2]<-BoxSize/2.0)[0]
|
||||
posc[inside,2]+=BoxSize
|
||||
|
||||
radii=np.sqrt(posc[:,0]**2+posc[:,1]**2+posc[:,2]**2)
|
||||
inside=np.where(radii<radius)[0]
|
||||
selected=random.sample(inside,Nsat)
|
||||
pos=pos[selected]
|
||||
|
||||
#aditional, not esential check. Can be comment out
|
||||
posc=pos-position
|
||||
if np.any((posc>BoxSize/2.0) + (posc<-BoxSize/2.0)):
|
||||
inside=np.where(posc[:,0]>BoxSize/2.0)[0]
|
||||
posc[inside,0]-=BoxSize
|
||||
inside=np.where(posc[:,0]<-BoxSize/2.0)[0]
|
||||
posc[inside,0]+=BoxSize
|
||||
|
||||
inside=np.where(posc[:,1]>BoxSize/2.0)[0]
|
||||
posc[inside,1]-=BoxSize
|
||||
inside=np.where(posc[:,1]<-BoxSize/2.0)[0]
|
||||
posc[inside,1]+=BoxSize
|
||||
|
||||
inside=np.where(posc[:,2]>BoxSize/2.0)[0]
|
||||
posc[inside,2]-=BoxSize
|
||||
inside=np.where(posc[:,2]<-BoxSize/2.0)[0]
|
||||
posc[inside,2]+=BoxSize
|
||||
r_max=np.max(np.sqrt(posc[:,0]**2+posc[:,1]**2+posc[:,2]**2))
|
||||
if r_max>radius: #check no particles beyond Rv selected
|
||||
print position
|
||||
print radius
|
||||
print pos
|
||||
count+=1
|
||||
|
||||
for j in range(Nsat):
|
||||
pos_galaxies[i]=pos[j]; i+=1
|
||||
index+=1
|
||||
|
||||
if verbose:
|
||||
print 'done'
|
||||
#some final checks
|
||||
if i!=N_tot:
|
||||
print 'some galaxies missing:'
|
||||
print 'register',i,'galaxies out of',N_tot
|
||||
if count>0:
|
||||
print 'error:',count,'particles beyond the virial radius selected'
|
||||
|
||||
return pos_galaxies
|
||||
###############################################################################
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#This function is equal to the above one, except that the snapshot read, halos
|
||||
#read and ID sorting it is not performing here. It is best suited when many
|
||||
#galaxy catalogues need to be created: for example, when iterating among M1 and
|
||||
#alpha trying to find the best combination that reproduces the measured wp(r)
|
||||
#VARIABLES:
|
||||
#DM_pos: array containing the positions of the CDM particles
|
||||
#sorted_ids: array containing the positions of the IDs in the snapshots.
|
||||
#sorted_ids[N] gives the position where the particle whose ID is N is located
|
||||
#IDs:IDs array as read from the subfind ID file
|
||||
#halo_mass: array containing the masses of the CDM halos in the mass interval
|
||||
#halo_pos: array containing the positions of the CDM halos in the mass interval
|
||||
#halo_radius: array containing the radii of the CDM halos in the mass interval
|
||||
#halo_len: array containing the len of the CDM halos in the mass interval
|
||||
#halo_offset: array containing the offset of the CDM halos in the mass interval
|
||||
#BoxSize: Size of the simulation Box. In Mpc/h
|
||||
#fiducial_density: galaxy number density to be reproduced, in (h/Mpc)^3
|
||||
def hod_fast(DM_pos,sorted_ids,IDs,halo_mass,halo_pos,halo_radius,halo_len,
|
||||
halo_offset,BoxSize,min_mass,max_mass,fiducial_density,
|
||||
M1,alpha,seed,verbose=False):
|
||||
|
||||
problematic_cases=0 #number of problematic cases (e.g. halos with Rvir=0.0)
|
||||
thres=1e-3 #controls the max relative error to accept a galaxy density
|
||||
|
||||
##### COMPUTE Mmin GIVEN M1 & alpha #####
|
||||
i=0; max_iterations=20 #maximum number of iterations
|
||||
Mmin1=min_mass; Mmin2=max_mass
|
||||
while (i<max_iterations):
|
||||
Mmin=0.5*(Mmin1+Mmin2) #estimation of the HOD parameter Mmin
|
||||
|
||||
total_galaxies=0
|
||||
inside=np.where(halo_mass>Mmin)[0]
|
||||
mass=halo_mass[inside] #only halos with M>Mmin have central/satellites
|
||||
|
||||
total_galaxies=mass.shape[0]+np.sum((mass/M1)**alpha)
|
||||
mean_density=total_galaxies*1.0/BoxSize**3
|
||||
|
||||
if (np.absolute((mean_density-fiducial_density)/fiducial_density)<thres):
|
||||
i=max_iterations
|
||||
elif (mean_density>fiducial_density):
|
||||
Mmin1=Mmin
|
||||
else:
|
||||
Mmin2=Mmin
|
||||
i+=1
|
||||
|
||||
if verbose:
|
||||
print ' '
|
||||
print 'Mmin=',Mmin
|
||||
print 'average number of galaxies=',total_galaxies
|
||||
print 'average galaxy density=',mean_density
|
||||
#########################################
|
||||
|
||||
#just halos with M>Mmin; the rest do not host central/satellite galaxies
|
||||
inside=np.where(halo_mass>Mmin)[0]
|
||||
halo_mass=halo_mass[inside]
|
||||
halo_pos=halo_pos[inside]
|
||||
halo_radius=halo_radius[inside]
|
||||
halo_len=halo_len[inside]
|
||||
halo_offset=halo_offset[inside]
|
||||
del inside
|
||||
|
||||
#compute number of satellites in each halo using the Poisson distribution
|
||||
np.random.seed(seed) #this is just to check convergence on w_p(r_p)
|
||||
N_mean_sat=(halo_mass/M1)**alpha #mean number of satellites
|
||||
N_sat=np.empty(len(N_mean_sat),dtype=np.int32)
|
||||
for i in range(len(N_sat)):
|
||||
N_sat[i]=np.random.poisson(N_mean_sat[i])
|
||||
N_tot=np.sum(N_sat)+len(halo_mass) #total number of galaxies in the catalogue
|
||||
|
||||
if verbose:
|
||||
print ' '
|
||||
print np.min(halo_mass),'< M_halo <',np.max(halo_mass)
|
||||
print 'total number of galaxies=',N_tot
|
||||
print 'galaxy number density=',N_tot/BoxSize**3
|
||||
|
||||
#put satellites following the distribution of dark matter in groups
|
||||
if verbose:
|
||||
print ' '
|
||||
print 'Creating mock catalogue ...',
|
||||
|
||||
pos_galaxies=np.empty((N_tot,3),dtype=np.float32)
|
||||
#index: variable that go through halos (may be several galaxies in a halo)
|
||||
#i: variable that go through galaxies
|
||||
#count: find number of galaxies that lie beyond its host halo virial radius
|
||||
random.seed(seed) #this is just to check convergence on w_p(r_p)
|
||||
index=0; count=0; i=0
|
||||
while (index<halo_mass.size):
|
||||
|
||||
position=halo_pos[index] #position of the DM halo
|
||||
radius=halo_radius[index] #radius of the DM halo
|
||||
|
||||
#save the position of the central galaxy
|
||||
pos_galaxies[i]=position; i+=1
|
||||
|
||||
#if halo contains satellites, save their positions
|
||||
Nsat=N_sat[index]
|
||||
if Nsat>0:
|
||||
offset=halo_offset[index]
|
||||
length=halo_len[index]
|
||||
idss=sorted_ids[IDs[offset:offset+length]]
|
||||
|
||||
#compute the radius of those particles and keep those with R<Rvir
|
||||
pos=DM_pos[idss]
|
||||
posc=pos-position
|
||||
|
||||
#this is to populate correctly halos closer to box boundaries
|
||||
if np.any((position+radius>BoxSize) + (position-radius<0.0)):
|
||||
|
||||
inside=np.where(posc[:,0]>BoxSize/2.0)[0]
|
||||
posc[inside,0]-=BoxSize
|
||||
inside=np.where(posc[:,0]<-BoxSize/2.0)[0]
|
||||
posc[inside,0]+=BoxSize
|
||||
|
||||
inside=np.where(posc[:,1]>BoxSize/2.0)[0]
|
||||
posc[inside,1]-=BoxSize
|
||||
inside=np.where(posc[:,1]<-BoxSize/2.0)[0]
|
||||
posc[inside,1]+=BoxSize
|
||||
|
||||
inside=np.where(posc[:,2]>BoxSize/2.0)[0]
|
||||
posc[inside,2]-=BoxSize
|
||||
inside=np.where(posc[:,2]<-BoxSize/2.0)[0]
|
||||
posc[inside,2]+=BoxSize
|
||||
|
||||
radii=np.sqrt(posc[:,0]**2+posc[:,1]**2+posc[:,2]**2)
|
||||
inside=np.where(radii<radius)[0]
|
||||
if len(inside)<Nsat:
|
||||
problematic_cases+=1
|
||||
print 'problematic case',len(inside),Nsat
|
||||
else:
|
||||
selected=random.sample(inside,Nsat)
|
||||
pos=pos[selected]
|
||||
|
||||
#aditional, not esential check. Can be comment out
|
||||
#posc=pos-position
|
||||
#if np.any((posc>BoxSize/2.0) + (posc<-BoxSize/2.0)):
|
||||
# inside=np.where(posc[:,0]>BoxSize/2.0)[0]
|
||||
# posc[inside,0]-=BoxSize
|
||||
# inside=np.where(posc[:,0]<-BoxSize/2.0)[0]
|
||||
# posc[inside,0]+=BoxSize
|
||||
|
||||
# inside=np.where(posc[:,1]>BoxSize/2.0)[0]
|
||||
# posc[inside,1]-=BoxSize
|
||||
# inside=np.where(posc[:,1]<-BoxSize/2.0)[0]
|
||||
# posc[inside,1]+=BoxSize
|
||||
|
||||
# inside=np.where(posc[:,2]>BoxSize/2.0)[0]
|
||||
# posc[inside,2]-=BoxSize
|
||||
# inside=np.where(posc[:,2]<-BoxSize/2.0)[0]
|
||||
# posc[inside,2]+=BoxSize
|
||||
#r_max=np.max(np.sqrt(posc[:,0]**2+posc[:,1]**2+posc[:,2]**2))
|
||||
#if r_max>radius: #check no particles beyond Rv selected
|
||||
# print position
|
||||
# print radius
|
||||
# print pos
|
||||
# count+=1
|
||||
|
||||
for j in range(Nsat):
|
||||
pos_galaxies[i]=pos[j]; i+=1
|
||||
index+=1
|
||||
|
||||
if verbose:
|
||||
print 'done'
|
||||
#some final checks
|
||||
if i!=N_tot:
|
||||
print 'some galaxies missing:'
|
||||
print 'register',i,'galaxies out of',N_tot
|
||||
if count>0:
|
||||
print 'error:',count,'particles beyond the virial radius selected'
|
||||
|
||||
return pos_galaxies
|
||||
###############################################################################
|
||||
|
||||
|
||||
|
||||
|
||||
##### example of use #####
|
||||
"""
|
||||
snapshot_fname='/data1/villa/b500p512nu0.6z99np1024tree/snapdir_017/snap_017'
|
||||
groups_fname='/home/villa/data1/b500p512nu0.6z99np1024tree'
|
||||
groups_number=17
|
||||
|
||||
### HALO CATALOGUE PARAMETERS ###
|
||||
mass_criteria='t200'
|
||||
min_mass=2e12 #Msun/h
|
||||
max_mass=2e15 #Msun/h
|
||||
|
||||
### HOD PARAMETERS ###
|
||||
fiducial_density=0.00111 #mean number density for galaxies with Mr<-21
|
||||
M1=8e13
|
||||
alpha=1.4
|
||||
|
||||
pos=hod(snapshot_fname,groups_fname,groups_number,min_mass,max_mass,fiducial_density,M1,alpha,mass_criteria,verbose=True)
|
||||
|
||||
print pos
|
||||
"""
|
||||
#+
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/fit_hod/HOD_library.py
|
||||
# Copyright (C) 2010-2014 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2014 P. M. Sutter
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#+
|
||||
import numpy as np
|
||||
import readsnap
|
||||
import readsubf
|
||||
import sys
|
||||
import time
|
||||
import random
|
||||
|
||||
###############################################################################
|
||||
#this function returns an array containing the positions of the galaxies (kpc/h)
|
||||
#in the catalogue according to the fiducial density, M1 and alpha
|
||||
#CDM halos with masses within [min_mass,max_mass], are populated
|
||||
#with galaxies. The IDs and positions of the CDM particles belonging to the
|
||||
#different groups are read from the snapshots
|
||||
#If one needs to creates many catalogues, this function is not appropiate,
|
||||
#since it wastes a lot of time reading the snapshots and sorting the IDs
|
||||
#min_mass and max_mass are in units of Msun/h, not 1e10 Msun/h
|
||||
#mass_criteria: definition of the halo virial radius -- 't200' 'm200' 'c200'
|
||||
#fiducial_density: galaxy number density to be reproduced, in (h/Mpc)^3
|
||||
def hod(snapshot_fname,groups_fname,groups_number,min_mass,max_mass,
|
||||
fiducial_density,M1,alpha,mass_criteria,verbose=False):
|
||||
|
||||
thres=1e-3 #controls the max relative error to accept a galaxy density
|
||||
|
||||
#read the header and obtain the boxsize
|
||||
head=readsnap.snapshot_header(snapshot_fname)
|
||||
BoxSize=head.boxsize #BoxSize in kpc/h
|
||||
|
||||
#read positions and IDs of DM particles: sort the IDs array
|
||||
DM_pos=readsnap.read_block(snapshot_fname,"POS ",parttype=-1) #kpc/h
|
||||
DM_ids=readsnap.read_block(snapshot_fname,"ID ",parttype=-1)-1
|
||||
sorted_ids=DM_ids.argsort(axis=0)
|
||||
#the particle whose ID is N is located in the position sorted_ids[N]
|
||||
#i.e. DM_ids[sorted_ids[N]]=N
|
||||
#the position of the particle whose ID is N would be:
|
||||
#DM_pos[sorted_ids[N]]
|
||||
|
||||
#read the IDs of the particles belonging to the CDM halos
|
||||
halos_ID=readsubf.subf_ids(groups_fname,groups_number,0,0,
|
||||
long_ids=True,read_all=True)
|
||||
IDs=halos_ID.SubIDs-1
|
||||
del halos_ID
|
||||
|
||||
#read CDM halos information
|
||||
halos=readsubf.subfind_catalog(groups_fname,groups_number,
|
||||
group_veldisp=True,masstab=True,
|
||||
long_ids=True,swap=False)
|
||||
if mass_criteria=='t200':
|
||||
halos_mass=halos.group_m_tophat200*1e10 #masses in Msun/h
|
||||
halos_radius=halos.group_r_tophat200 #radius in kpc/h
|
||||
elif mass_criteria=='m200':
|
||||
halos_mass=halos.group_m_mean200*1e10 #masses in Msun/h
|
||||
halos_radius=halos.group_r_mean200 #radius in kpc/h
|
||||
elif mass_criteria=='c200':
|
||||
halos_mass=halos.group_m_crit200*1e10 #masses in Msun/h
|
||||
halos_radius=halos.group_r_crit200 #radius in kpc/h
|
||||
else:
|
||||
print('bad mass_criteria')
|
||||
sys.exit()
|
||||
halos_pos=halos.group_pos #positions in kpc/h
|
||||
halos_len=halos.group_len
|
||||
halos_offset=halos.group_offset
|
||||
halos_indexes=np.where((halos_mass>min_mass) & (halos_mass<max_mass))[0]
|
||||
del halos
|
||||
|
||||
if verbose:
|
||||
print(' ')
|
||||
print('total halos found=',halos_pos.shape[0])
|
||||
print('halos number density=',len(halos_pos)/(BoxSize*1e-3)**3)
|
||||
|
||||
#keep only the halos in the given mass range
|
||||
halo_mass=halos_mass[halos_indexes]
|
||||
halo_pos=halos_pos[halos_indexes]
|
||||
halo_radius=halos_radius[halos_indexes]
|
||||
halo_len=halos_len[halos_indexes]
|
||||
halo_offset=halos_offset[halos_indexes]
|
||||
del halos_indexes
|
||||
|
||||
##### COMPUTE Mmin GIVEN M1 & alpha #####
|
||||
i=0; max_iterations=20 #maximum number of iterations
|
||||
Mmin1=min_mass; Mmin2=max_mass
|
||||
while (i<max_iterations):
|
||||
Mmin=0.5*(Mmin1+Mmin2) #estimation of the HOD parameter Mmin
|
||||
|
||||
total_galaxies=0
|
||||
inside=np.where(halo_mass>Mmin)[0] #take all galaxies with M>Mmin
|
||||
mass=halo_mass[inside] #only halos with M>Mmin have central/satellites
|
||||
|
||||
total_galaxies=mass.shape[0]+np.sum((mass/M1)**alpha)
|
||||
mean_density=total_galaxies*1.0/(BoxSize*1e-3)**3 #galaxies/(Mpc/h)^3
|
||||
|
||||
if (np.absolute((mean_density-fiducial_density)/fiducial_density)<thres):
|
||||
i=max_iterations
|
||||
elif (mean_density>fiducial_density):
|
||||
Mmin1=Mmin
|
||||
else:
|
||||
Mmin2=Mmin
|
||||
i+=1
|
||||
|
||||
if verbose:
|
||||
print(' ')
|
||||
print('Mmin=',Mmin)
|
||||
print('average number of galaxies=',total_galaxies)
|
||||
print('average galaxy density=',mean_density)
|
||||
#########################################
|
||||
|
||||
#just halos with M>Mmin; the rest do not host central/satellite galaxies
|
||||
inside=np.where(halo_mass>Mmin)[0]
|
||||
halo_mass=halo_mass[inside]
|
||||
halo_pos=halo_pos[inside]
|
||||
halo_radius=halo_radius[inside]
|
||||
halo_len=halo_len[inside]
|
||||
halo_offset=halo_offset[inside]
|
||||
del inside
|
||||
|
||||
#compute number of satellites in each halo using the Poisson distribution
|
||||
N_mean_sat=(halo_mass/M1)**alpha #mean number of satellites
|
||||
N_sat=np.empty(len(N_mean_sat),dtype=np.int32)
|
||||
for i in range(len(N_sat)):
|
||||
N_sat[i]=np.random.poisson(N_mean_sat[i])
|
||||
N_tot=np.sum(N_sat)+len(halo_mass) #total number of galaxies in the catalogue
|
||||
|
||||
if verbose:
|
||||
print(' ')
|
||||
print(np.min(halo_mass),'< M_halo <',np.max(halo_mass))
|
||||
print('total number of galaxies=',N_tot)
|
||||
print('galaxy number density=',N_tot/(BoxSize*1e-3)**3)
|
||||
|
||||
#put satellites following the distribution of dark matter in groups
|
||||
if verbose:
|
||||
print(' ')
|
||||
print('Creating mock catalogue ...', end=' ')
|
||||
|
||||
pos_galaxies=np.empty((N_tot,3),dtype=np.float32)
|
||||
#index: variable that go through halos (may be several galaxies in a halo)
|
||||
#i: variable that go through all (central/satellites) galaxies
|
||||
#count: find number of galaxies that lie beyond its host halo virial radius
|
||||
index=0; count=0; i=0
|
||||
while (index<halo_mass.shape[0]):
|
||||
|
||||
position=halo_pos[index] #position of the DM halo
|
||||
radius=halo_radius[index] #radius of the DM halo
|
||||
|
||||
#save the position of the central galaxy
|
||||
pos_galaxies[i]=position; i+=1
|
||||
|
||||
#if halo contains satellites, save their positions
|
||||
Nsat=N_sat[index]
|
||||
if Nsat>0:
|
||||
offset=halo_offset[index]
|
||||
length=halo_len[index]
|
||||
idss=sorted_ids[IDs[offset:offset+length]]
|
||||
|
||||
#compute the distances to the halo center keeping those with R<Rvir
|
||||
pos=DM_pos[idss] #positions of the particles belonging to the halo
|
||||
posc=pos-position
|
||||
|
||||
#this is to populate correctly halos closer to box boundaries
|
||||
if np.any((position+radius>BoxSize) + (position-radius<0.0)):
|
||||
|
||||
inside=np.where(posc[:,0]>BoxSize/2.0)[0]
|
||||
posc[inside,0]-=BoxSize
|
||||
inside=np.where(posc[:,0]<-BoxSize/2.0)[0]
|
||||
posc[inside,0]+=BoxSize
|
||||
|
||||
inside=np.where(posc[:,1]>BoxSize/2.0)[0]
|
||||
posc[inside,1]-=BoxSize
|
||||
inside=np.where(posc[:,1]<-BoxSize/2.0)[0]
|
||||
posc[inside,1]+=BoxSize
|
||||
|
||||
inside=np.where(posc[:,2]>BoxSize/2.0)[0]
|
||||
posc[inside,2]-=BoxSize
|
||||
inside=np.where(posc[:,2]<-BoxSize/2.0)[0]
|
||||
posc[inside,2]+=BoxSize
|
||||
|
||||
radii=np.sqrt(posc[:,0]**2+posc[:,1]**2+posc[:,2]**2)
|
||||
inside=np.where(radii<radius)[0]
|
||||
selected=random.sample(inside,Nsat)
|
||||
pos=pos[selected]
|
||||
|
||||
#aditional, not esential check. Can be comment out
|
||||
posc=pos-position
|
||||
if np.any((posc>BoxSize/2.0) + (posc<-BoxSize/2.0)):
|
||||
inside=np.where(posc[:,0]>BoxSize/2.0)[0]
|
||||
posc[inside,0]-=BoxSize
|
||||
inside=np.where(posc[:,0]<-BoxSize/2.0)[0]
|
||||
posc[inside,0]+=BoxSize
|
||||
|
||||
inside=np.where(posc[:,1]>BoxSize/2.0)[0]
|
||||
posc[inside,1]-=BoxSize
|
||||
inside=np.where(posc[:,1]<-BoxSize/2.0)[0]
|
||||
posc[inside,1]+=BoxSize
|
||||
|
||||
inside=np.where(posc[:,2]>BoxSize/2.0)[0]
|
||||
posc[inside,2]-=BoxSize
|
||||
inside=np.where(posc[:,2]<-BoxSize/2.0)[0]
|
||||
posc[inside,2]+=BoxSize
|
||||
r_max=np.max(np.sqrt(posc[:,0]**2+posc[:,1]**2+posc[:,2]**2))
|
||||
if r_max>radius: #check no particles beyond Rv selected
|
||||
print(position)
|
||||
print(radius)
|
||||
print(pos)
|
||||
count+=1
|
||||
|
||||
for j in range(Nsat):
|
||||
pos_galaxies[i]=pos[j]; i+=1
|
||||
index+=1
|
||||
|
||||
if verbose:
|
||||
print('done')
|
||||
#some final checks
|
||||
if i!=N_tot:
|
||||
print('some galaxies missing:')
|
||||
print('register',i,'galaxies out of',N_tot)
|
||||
if count>0:
|
||||
print('error:',count,'particles beyond the virial radius selected')
|
||||
|
||||
return pos_galaxies
|
||||
###############################################################################
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#This function is equal to the above one, except that the snapshot read, halos
|
||||
#read and ID sorting it is not performing here. It is best suited when many
|
||||
#galaxy catalogues need to be created: for example, when iterating among M1 and
|
||||
#alpha trying to find the best combination that reproduces the measured wp(r)
|
||||
#VARIABLES:
|
||||
#DM_pos: array containing the positions of the CDM particles
|
||||
#sorted_ids: array containing the positions of the IDs in the snapshots.
|
||||
#sorted_ids[N] gives the position where the particle whose ID is N is located
|
||||
#IDs:IDs array as read from the subfind ID file
|
||||
#halo_mass: array containing the masses of the CDM halos in the mass interval
|
||||
#halo_pos: array containing the positions of the CDM halos in the mass interval
|
||||
#halo_radius: array containing the radii of the CDM halos in the mass interval
|
||||
#halo_len: array containing the len of the CDM halos in the mass interval
|
||||
#halo_offset: array containing the offset of the CDM halos in the mass interval
|
||||
#BoxSize: Size of the simulation Box. In Mpc/h
|
||||
#fiducial_density: galaxy number density to be reproduced, in (h/Mpc)^3
|
||||
def hod_fast(DM_pos,sorted_ids,IDs,halo_mass,halo_pos,halo_radius,halo_len,
|
||||
halo_offset,BoxSize,min_mass,max_mass,fiducial_density,
|
||||
M1,alpha,seed,verbose=False):
|
||||
|
||||
problematic_cases=0 #number of problematic cases (e.g. halos with Rvir=0.0)
|
||||
thres=1e-3 #controls the max relative error to accept a galaxy density
|
||||
|
||||
##### COMPUTE Mmin GIVEN M1 & alpha #####
|
||||
i=0; max_iterations=20 #maximum number of iterations
|
||||
Mmin1=min_mass; Mmin2=max_mass
|
||||
while (i<max_iterations):
|
||||
Mmin=0.5*(Mmin1+Mmin2) #estimation of the HOD parameter Mmin
|
||||
|
||||
total_galaxies=0
|
||||
inside=np.where(halo_mass>Mmin)[0]
|
||||
mass=halo_mass[inside] #only halos with M>Mmin have central/satellites
|
||||
|
||||
total_galaxies=mass.shape[0]+np.sum((mass/M1)**alpha)
|
||||
mean_density=total_galaxies*1.0/BoxSize**3
|
||||
|
||||
if (np.absolute((mean_density-fiducial_density)/fiducial_density)<thres):
|
||||
i=max_iterations
|
||||
elif (mean_density>fiducial_density):
|
||||
Mmin1=Mmin
|
||||
else:
|
||||
Mmin2=Mmin
|
||||
i+=1
|
||||
|
||||
if verbose:
|
||||
print(' ')
|
||||
print('Mmin=',Mmin)
|
||||
print('average number of galaxies=',total_galaxies)
|
||||
print('average galaxy density=',mean_density)
|
||||
#########################################
|
||||
|
||||
#just halos with M>Mmin; the rest do not host central/satellite galaxies
|
||||
inside=np.where(halo_mass>Mmin)[0]
|
||||
halo_mass=halo_mass[inside]
|
||||
halo_pos=halo_pos[inside]
|
||||
halo_radius=halo_radius[inside]
|
||||
halo_len=halo_len[inside]
|
||||
halo_offset=halo_offset[inside]
|
||||
del inside
|
||||
|
||||
#compute number of satellites in each halo using the Poisson distribution
|
||||
np.random.seed(seed) #this is just to check convergence on w_p(r_p)
|
||||
N_mean_sat=(halo_mass/M1)**alpha #mean number of satellites
|
||||
N_sat=np.empty(len(N_mean_sat),dtype=np.int32)
|
||||
for i in range(len(N_sat)):
|
||||
N_sat[i]=np.random.poisson(N_mean_sat[i])
|
||||
N_tot=np.sum(N_sat)+len(halo_mass) #total number of galaxies in the catalogue
|
||||
|
||||
if verbose:
|
||||
print(' ')
|
||||
print(np.min(halo_mass),'< M_halo <',np.max(halo_mass))
|
||||
print('total number of galaxies=',N_tot)
|
||||
print('galaxy number density=',N_tot/BoxSize**3)
|
||||
|
||||
#put satellites following the distribution of dark matter in groups
|
||||
if verbose:
|
||||
print(' ')
|
||||
print('Creating mock catalogue ...', end=' ')
|
||||
|
||||
pos_galaxies=np.empty((N_tot,3),dtype=np.float32)
|
||||
#index: variable that go through halos (may be several galaxies in a halo)
|
||||
#i: variable that go through galaxies
|
||||
#count: find number of galaxies that lie beyond its host halo virial radius
|
||||
random.seed(seed) #this is just to check convergence on w_p(r_p)
|
||||
index=0; count=0; i=0
|
||||
while (index<halo_mass.size):
|
||||
|
||||
position=halo_pos[index] #position of the DM halo
|
||||
radius=halo_radius[index] #radius of the DM halo
|
||||
|
||||
#save the position of the central galaxy
|
||||
pos_galaxies[i]=position; i+=1
|
||||
|
||||
#if halo contains satellites, save their positions
|
||||
Nsat=N_sat[index]
|
||||
if Nsat>0:
|
||||
offset=halo_offset[index]
|
||||
length=halo_len[index]
|
||||
idss=sorted_ids[IDs[offset:offset+length]]
|
||||
|
||||
#compute the radius of those particles and keep those with R<Rvir
|
||||
pos=DM_pos[idss]
|
||||
posc=pos-position
|
||||
|
||||
#this is to populate correctly halos closer to box boundaries
|
||||
if np.any((position+radius>BoxSize) + (position-radius<0.0)):
|
||||
|
||||
inside=np.where(posc[:,0]>BoxSize/2.0)[0]
|
||||
posc[inside,0]-=BoxSize
|
||||
inside=np.where(posc[:,0]<-BoxSize/2.0)[0]
|
||||
posc[inside,0]+=BoxSize
|
||||
|
||||
inside=np.where(posc[:,1]>BoxSize/2.0)[0]
|
||||
posc[inside,1]-=BoxSize
|
||||
inside=np.where(posc[:,1]<-BoxSize/2.0)[0]
|
||||
posc[inside,1]+=BoxSize
|
||||
|
||||
inside=np.where(posc[:,2]>BoxSize/2.0)[0]
|
||||
posc[inside,2]-=BoxSize
|
||||
inside=np.where(posc[:,2]<-BoxSize/2.0)[0]
|
||||
posc[inside,2]+=BoxSize
|
||||
|
||||
radii=np.sqrt(posc[:,0]**2+posc[:,1]**2+posc[:,2]**2)
|
||||
inside=np.where(radii<radius)[0]
|
||||
if len(inside)<Nsat:
|
||||
problematic_cases+=1
|
||||
print('problematic case',len(inside),Nsat)
|
||||
else:
|
||||
selected=random.sample(inside,Nsat)
|
||||
pos=pos[selected]
|
||||
|
||||
#aditional, not esential check. Can be comment out
|
||||
#posc=pos-position
|
||||
#if np.any((posc>BoxSize/2.0) + (posc<-BoxSize/2.0)):
|
||||
# inside=np.where(posc[:,0]>BoxSize/2.0)[0]
|
||||
# posc[inside,0]-=BoxSize
|
||||
# inside=np.where(posc[:,0]<-BoxSize/2.0)[0]
|
||||
# posc[inside,0]+=BoxSize
|
||||
|
||||
# inside=np.where(posc[:,1]>BoxSize/2.0)[0]
|
||||
# posc[inside,1]-=BoxSize
|
||||
# inside=np.where(posc[:,1]<-BoxSize/2.0)[0]
|
||||
# posc[inside,1]+=BoxSize
|
||||
|
||||
# inside=np.where(posc[:,2]>BoxSize/2.0)[0]
|
||||
# posc[inside,2]-=BoxSize
|
||||
# inside=np.where(posc[:,2]<-BoxSize/2.0)[0]
|
||||
# posc[inside,2]+=BoxSize
|
||||
#r_max=np.max(np.sqrt(posc[:,0]**2+posc[:,1]**2+posc[:,2]**2))
|
||||
#if r_max>radius: #check no particles beyond Rv selected
|
||||
# print position
|
||||
# print radius
|
||||
# print pos
|
||||
# count+=1
|
||||
|
||||
for j in range(Nsat):
|
||||
pos_galaxies[i]=pos[j]; i+=1
|
||||
index+=1
|
||||
|
||||
if verbose:
|
||||
print('done')
|
||||
#some final checks
|
||||
if i!=N_tot:
|
||||
print('some galaxies missing:')
|
||||
print('register',i,'galaxies out of',N_tot)
|
||||
if count>0:
|
||||
print('error:',count,'particles beyond the virial radius selected')
|
||||
|
||||
return pos_galaxies
|
||||
###############################################################################
|
||||
|
||||
|
||||
|
||||
|
||||
##### example of use #####
|
||||
"""
|
||||
snapshot_fname='/data1/villa/b500p512nu0.6z99np1024tree/snapdir_017/snap_017'
|
||||
groups_fname='/home/villa/data1/b500p512nu0.6z99np1024tree'
|
||||
groups_number=17
|
||||
|
||||
### HALO CATALOGUE PARAMETERS ###
|
||||
mass_criteria='t200'
|
||||
min_mass=2e12 #Msun/h
|
||||
max_mass=2e15 #Msun/h
|
||||
|
||||
### HOD PARAMETERS ###
|
||||
fiducial_density=0.00111 #mean number density for galaxies with Mr<-21
|
||||
M1=8e13
|
||||
alpha=1.4
|
||||
|
||||
pos=hod(snapshot_fname,groups_fname,groups_number,min_mass,max_mass,fiducial_density,M1,alpha,mass_criteria,verbose=True)
|
||||
|
||||
print pos
|
||||
"""
|
||||
|
|
|
@ -1,276 +1,295 @@
|
|||
#LATEST MODIFICATION: 10/11/2013
|
||||
#This code computes the Xi^2 for a set of different HOD parameters
|
||||
|
||||
#to generate always the same results for a particular value of M1 & alpha
|
||||
#edit the HOD_library.py code and comment out the lines with the seeds
|
||||
|
||||
#the range over which M1 and alpha wants to be varied has to be specified
|
||||
#below: not in the INPUT
|
||||
|
||||
#Be careful with the IDs. In Gadget the IDs start from 1 whereas when we sort
|
||||
#them the first one will be 0, for instance:
|
||||
#import numpy as np
|
||||
#a=np.array([1,2,8,5,4,9,6,3,7])
|
||||
#b=a.argsort(axis=0)
|
||||
#b
|
||||
#array([0, 1, 7, 4, 3, 6, 8, 2, 5])
|
||||
#i.e. b[1] will return 1, whereas it should be 0
|
||||
|
||||
from mpi4py import MPI
|
||||
import numpy as np
|
||||
import scipy.integrate as si
|
||||
import snap_chooser as SC
|
||||
import readsnap
|
||||
import readsubf
|
||||
import HOD_library as HOD
|
||||
import correlation_function_library as CF
|
||||
import sys
|
||||
import os
|
||||
import random
|
||||
|
||||
#function used to compute wp(rp): d(wp) / dr = 2r*xi(r) / sqrt(r^2-rp^2)
|
||||
def deriv(y,x,r,xi,rp):
|
||||
value=2.0*x*np.interp(x,r,xi)/np.sqrt(x**2-rp**2)
|
||||
return np.array([value])
|
||||
|
||||
|
||||
###### MPI DEFINITIONS ######
|
||||
comm=MPI.COMM_WORLD
|
||||
nprocs=comm.Get_size()
|
||||
myrank=comm.Get_rank()
|
||||
|
||||
########################### INPUT ###############################
|
||||
if len(sys.argv)>1:
|
||||
sa=sys.argv
|
||||
|
||||
snapshot_fname=sa[1]; groups_fname=sa[2]; groups_number=sa[3]
|
||||
|
||||
mass_criteria=sa[4]; min_mass=float(sa[5]); max_mass=float(sa[6])
|
||||
|
||||
fiducial_density=float(sa[7])
|
||||
M1_min=float(sa[8]); M1_max=float(sa[9]); M1_bins=int(sa[10]);
|
||||
alpha_min=float(sa[11]); alpha_max=float(sa[12]); alpha_bins=int(sa[13])
|
||||
|
||||
random_file=sa[14]
|
||||
|
||||
BoxSize=float(sa[15])
|
||||
Rmin=float(sa[16]); Rmax=float(sa[17]); bins=int(sa[18])
|
||||
|
||||
DD_name=sa[19]; RR_name=sa[20]; DR_name=sa[21]
|
||||
DD_action=sa[22]; RR_action=sa[23]; DR_action=sa[24]
|
||||
|
||||
wp_file=sa[25]; results_file=sa[26]
|
||||
|
||||
else:
|
||||
#### SNAPSHOTS TO SELECT GALAXIES WITHIN CDM HALOS ####
|
||||
snapshot_fname='../../snapdir_003/snap_003'
|
||||
groups_fname='../../'
|
||||
groups_number=3
|
||||
|
||||
#### HALO CATALOGUE PARAMETERS ####
|
||||
mass_criteria='m200' #'t200' 'm200' or 'c200'
|
||||
min_mass=3e10 #Msun/h
|
||||
max_mass=2e15 #Msun/h
|
||||
|
||||
### HOD PARAMETERS ###
|
||||
fiducial_density=0.00111 #mean number density for galaxies with Mr<-21
|
||||
#M1_min=6.0e13; M1_max=1.0e14; M1_bins=20
|
||||
#alpha_min=1.05; alpha_max=1.60; alpha_bins=20
|
||||
|
||||
M1_min=6.9e+13; M1_max= 6.9e+13; M1_bins=100
|
||||
alpha_min=1.20; alpha_max=1.20; alpha_bins=100
|
||||
|
||||
#### RANDOM CATALOG ####
|
||||
random_file='/home/villa/disksom2/Correlation_function/Random_catalogue/random_catalogue_4e5.dat'
|
||||
|
||||
#### PARAMETERS ####
|
||||
BoxSize=500.0 #Mpc/h
|
||||
Rmin=0.1 #Mpc/h
|
||||
Rmax=75.0 #Mpc/h
|
||||
bins=60
|
||||
|
||||
#### PARTIAL RESULTS NAMES ####
|
||||
DD_name='DD.dat' #name for the file containing DD results
|
||||
RR_name='../RR_0.1_75_60_4e5.dat' #name for the file containing RR results
|
||||
DR_name='DR.dat' #name for the file containing DR results
|
||||
|
||||
#### ACTIONS ####
|
||||
DD_action='compute' #'compute' or 'read' (from DD_name file)
|
||||
RR_action='read' #'compute' or 'read' (from RR_name file)
|
||||
DR_action='compute' #'compute' or 'read' (from DR_name file)
|
||||
|
||||
#### wp FILE ####
|
||||
wp_file='../w_p_21.dat'
|
||||
wp_covariance_file='../wp_covar_21.0.dat'
|
||||
|
||||
#### OUTPUT ####
|
||||
results_file='borrar.dat'
|
||||
######################################################
|
||||
|
||||
if myrank==0:
|
||||
|
||||
#read positions and IDs of DM particles: sort the IDs array
|
||||
DM_pos=readsnap.read_block(snapshot_fname,"POS ",parttype=-1)
|
||||
#IDs should go from 0 to N-1, instead from 1 to N
|
||||
DM_ids=readsnap.read_block(snapshot_fname,"ID ",parttype=-1)-1
|
||||
if np.min(DM_ids)!=0 or np.max(DM_ids)!=(len(DM_pos)-1):
|
||||
print 'Error!!!!'
|
||||
print 'IDs should go from 0 to N-1'
|
||||
print len(DM_ids),np.min(DM_ids),np.max(DM_ids)
|
||||
sorted_ids=DM_ids.argsort(axis=0)
|
||||
del DM_ids
|
||||
#the particle whose ID is N is located in the position sorted_ids[N]
|
||||
#i.e. DM_ids[sorted_ids[N]]=N
|
||||
#the position of the particle whose ID is N would be:
|
||||
#DM_pos[sorted_ids[N]]
|
||||
|
||||
#read the IDs of the particles belonging to the CDM halos
|
||||
#again the IDs should go from 0 to N-1
|
||||
halos_ID=readsubf.subf_ids(groups_fname,groups_number,0,0,
|
||||
long_ids=True,read_all=True)
|
||||
IDs=halos_ID.SubIDs-1
|
||||
del halos_ID
|
||||
|
||||
print 'subhalos IDs=',np.min(IDs),np.max(IDs)
|
||||
|
||||
#read CDM halos information
|
||||
halos=readsubf.subfind_catalog(groups_fname,groups_number,
|
||||
group_veldisp=True,masstab=True,
|
||||
long_ids=True,swap=False)
|
||||
if mass_criteria=='t200':
|
||||
halos_mass=halos.group_m_tophat200*1e10 #masses in Msun/h
|
||||
halos_radius=halos.group_r_tophat200 #radius in kpc/h
|
||||
elif mass_criteria=='m200':
|
||||
halos_mass=halos.group_m_mean200*1e10 #masses in Msun/h
|
||||
halos_radius=halos.group_r_mean200 #radius in kpc/h
|
||||
elif mass_criteria=='c200':
|
||||
halos_mass=halos.group_m_crit200*1e10 #masses in Msun/h
|
||||
halos_radius=halos.group_r_crit200 #radius in kpc/h
|
||||
else:
|
||||
print 'bad mass_criteria'
|
||||
sys.exit()
|
||||
halos_pos=halos.group_pos
|
||||
halos_len=halos.group_len
|
||||
halos_offset=halos.group_offset
|
||||
halos_indexes=np.where((halos_mass>min_mass) & (halos_mass<max_mass))[0]
|
||||
del halos
|
||||
|
||||
print ' '
|
||||
print 'total halos found=',len(halos_pos)
|
||||
print 'halos number density=',len(halos_pos)/BoxSize**3
|
||||
|
||||
#keep only the halos in the given mass range
|
||||
halo_mass=halos_mass[halos_indexes]
|
||||
halo_pos=halos_pos[halos_indexes]
|
||||
halo_radius=halos_radius[halos_indexes]
|
||||
halo_len=halos_len[halos_indexes]
|
||||
halo_offset=halos_offset[halos_indexes]
|
||||
del halos_indexes
|
||||
|
||||
if np.any(halo_len==[]):
|
||||
print 'something bad'
|
||||
|
||||
#read the random catalogue (new version)
|
||||
dt=np.dtype((np.float32,3))
|
||||
pos_r=np.fromfile(random_file,dtype=dt)*BoxSize #Mpc/h
|
||||
|
||||
#read the wp file
|
||||
f=open(wp_file,'r'); wp=[]
|
||||
for line in f.readlines():
|
||||
a=line.split()
|
||||
wp.append([float(a[0]),float(a[1]),float(a[2])])
|
||||
f.close(); wp=np.array(wp)
|
||||
|
||||
#read covariance matrix file
|
||||
f=open(wp_covariance_file,'r')
|
||||
Cov=[]
|
||||
for line in f.readlines():
|
||||
a=line.split()
|
||||
for value in a:
|
||||
Cov.append(float(value))
|
||||
f.close(); Cov=np.array(Cov)
|
||||
if len(Cov)!=len(wp)**2:
|
||||
print 'problem with point numbers in the covariance file'
|
||||
sys.exit()
|
||||
Cov=np.reshape(Cov,(len(wp),len(wp)))
|
||||
Cov=np.matrix(Cov)
|
||||
|
||||
for g in range(100):
|
||||
|
||||
##### MASTER #####
|
||||
if myrank==0:
|
||||
|
||||
#set here the range of M1, alpha to vary
|
||||
#print 'M1='; M1=float(raw_input())
|
||||
#print 'alpha='; alpha=float(raw_input())
|
||||
|
||||
#M1=1.0e14+0.4e14*np.random.random()
|
||||
#alpha=1.10+0.3*np.random.random()
|
||||
#seed=np.random.randint(0,3000,1)[0]
|
||||
|
||||
M1=1.15e14
|
||||
alpha=1.27
|
||||
seed=955
|
||||
|
||||
#create the galaxy catalogue through the HOD parameters
|
||||
pos_g=HOD.hod_fast(DM_pos,sorted_ids,IDs,halo_mass,halo_pos,
|
||||
halo_radius,halo_len,halo_offset,BoxSize,
|
||||
min_mass,max_mass,fiducial_density,M1,
|
||||
alpha,seed,verbose=True)/1e3
|
||||
|
||||
#compute the 2pt correlation function
|
||||
r,xi_r,error_xi=CF.TPCF(pos_g,pos_r,BoxSize,DD_action,
|
||||
RR_action,DR_action,DD_name,RR_name,
|
||||
DR_name,bins,Rmin,Rmax)
|
||||
|
||||
f=open('correlation_function.dat','w')
|
||||
for i in range(len(r)):
|
||||
f.write(str(r[i])+' '+str(xi_r[i])+' '+str(error_xi[i])+'\n')
|
||||
f.close()
|
||||
|
||||
r_max=np.max(r)
|
||||
h=1e-13 #discontinuity at r=rp. We integrate from r=rp+h to r_max
|
||||
yinit=np.array([0.0])
|
||||
|
||||
f=open('projected_correlation_function.dat','w')
|
||||
wp_HOD=[]
|
||||
for rp in wp[:,0]:
|
||||
x=np.array([rp+h,r_max])
|
||||
y=si.odeint(deriv,yinit,x,args=(r,xi_r,rp),mxstep=100000)
|
||||
wp_HOD.append(y[1][0])
|
||||
f.write(str(rp)+' '+str(y[1][0])+'\n')
|
||||
wp_HOD=np.array(wp_HOD)
|
||||
f.close()
|
||||
|
||||
print 'M1=',M1
|
||||
print 'alpha=',alpha
|
||||
|
||||
chi2_bins=(wp_HOD-wp[:,1])**2/wp[:,2]**2
|
||||
|
||||
for min_bin in [2]:
|
||||
for max_bin in [12]:
|
||||
elements=np.arange(min_bin,max_bin)
|
||||
|
||||
#X^2 without covariance matrix
|
||||
chi2_nocov=np.sum(chi2_bins[elements])
|
||||
|
||||
#X^2 with covariance matrix
|
||||
wp_aux=wp[elements,1]; wp_HOD_aux=wp_HOD[elements]
|
||||
Cov_aux=Cov[elements,:][:,elements]
|
||||
diff=np.matrix(wp_HOD_aux-wp_aux)
|
||||
chi2=diff*Cov_aux.I*diff.T
|
||||
|
||||
print 'X2('+str(min_bin)+'-'+str(max_bin)+')=',chi2_nocov,chi2
|
||||
g=open(results_file,'a')
|
||||
g.write(str(M1)+ ' '+str(alpha)+' '+str(seed)+' '+str(chi2)+'\n')
|
||||
g.close()
|
||||
|
||||
|
||||
##### SLAVES #####
|
||||
else:
|
||||
pos_g=None; pos_r=None
|
||||
CF.TPCF(pos_g,pos_r,BoxSize,DD_action,RR_action,DR_action,
|
||||
DD_name,RR_name,DR_name,bins,Rmin,Rmax)
|
||||
|
||||
|
||||
|
||||
#+
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/fit_hod/HOD_parameters.py
|
||||
# Copyright (C) 2010-2014 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2014 P. M. Sutter
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#+
|
||||
#LATEST MODIFICATION: 10/11/2013
|
||||
#This code computes the Xi^2 for a set of different HOD parameters
|
||||
|
||||
#to generate always the same results for a particular value of M1 & alpha
|
||||
#edit the HOD_library.py code and comment out the lines with the seeds
|
||||
|
||||
#the range over which M1 and alpha wants to be varied has to be specified
|
||||
#below: not in the INPUT
|
||||
|
||||
#Be careful with the IDs. In Gadget the IDs start from 1 whereas when we sort
|
||||
#them the first one will be 0, for instance:
|
||||
#import numpy as np
|
||||
#a=np.array([1,2,8,5,4,9,6,3,7])
|
||||
#b=a.argsort(axis=0)
|
||||
#b
|
||||
#array([0, 1, 7, 4, 3, 6, 8, 2, 5])
|
||||
#i.e. b[1] will return 1, whereas it should be 0
|
||||
|
||||
from mpi4py import MPI
|
||||
import numpy as np
|
||||
import scipy.integrate as si
|
||||
import snap_chooser as SC
|
||||
import readsnap
|
||||
import readsubf
|
||||
import HOD_library as HOD
|
||||
import correlation_function_library as CF
|
||||
import sys
|
||||
import os
|
||||
import random
|
||||
|
||||
#function used to compute wp(rp): d(wp) / dr = 2r*xi(r) / sqrt(r^2-rp^2)
|
||||
def deriv(y,x,r,xi,rp):
|
||||
value=2.0*x*np.interp(x,r,xi)/np.sqrt(x**2-rp**2)
|
||||
return np.array([value])
|
||||
|
||||
|
||||
###### MPI DEFINITIONS ######
|
||||
comm=MPI.COMM_WORLD
|
||||
nprocs=comm.Get_size()
|
||||
myrank=comm.Get_rank()
|
||||
|
||||
########################### INPUT ###############################
|
||||
if len(sys.argv)>1:
|
||||
sa=sys.argv
|
||||
|
||||
snapshot_fname=sa[1]; groups_fname=sa[2]; groups_number=sa[3]
|
||||
|
||||
mass_criteria=sa[4]; min_mass=float(sa[5]); max_mass=float(sa[6])
|
||||
|
||||
fiducial_density=float(sa[7])
|
||||
M1_min=float(sa[8]); M1_max=float(sa[9]); M1_bins=int(sa[10]);
|
||||
alpha_min=float(sa[11]); alpha_max=float(sa[12]); alpha_bins=int(sa[13])
|
||||
|
||||
random_file=sa[14]
|
||||
|
||||
BoxSize=float(sa[15])
|
||||
Rmin=float(sa[16]); Rmax=float(sa[17]); bins=int(sa[18])
|
||||
|
||||
DD_name=sa[19]; RR_name=sa[20]; DR_name=sa[21]
|
||||
DD_action=sa[22]; RR_action=sa[23]; DR_action=sa[24]
|
||||
|
||||
wp_file=sa[25]; results_file=sa[26]
|
||||
|
||||
else:
|
||||
#### SNAPSHOTS TO SELECT GALAXIES WITHIN CDM HALOS ####
|
||||
snapshot_fname='../../snapdir_003/snap_003'
|
||||
groups_fname='../../'
|
||||
groups_number=3
|
||||
|
||||
#### HALO CATALOGUE PARAMETERS ####
|
||||
mass_criteria='m200' #'t200' 'm200' or 'c200'
|
||||
min_mass=3e10 #Msun/h
|
||||
max_mass=2e15 #Msun/h
|
||||
|
||||
### HOD PARAMETERS ###
|
||||
fiducial_density=0.00111 #mean number density for galaxies with Mr<-21
|
||||
#M1_min=6.0e13; M1_max=1.0e14; M1_bins=20
|
||||
#alpha_min=1.05; alpha_max=1.60; alpha_bins=20
|
||||
|
||||
M1_min=6.9e+13; M1_max= 6.9e+13; M1_bins=100
|
||||
alpha_min=1.20; alpha_max=1.20; alpha_bins=100
|
||||
|
||||
#### RANDOM CATALOG ####
|
||||
random_file='/home/villa/disksom2/Correlation_function/Random_catalogue/random_catalogue_4e5.dat'
|
||||
|
||||
#### PARAMETERS ####
|
||||
BoxSize=500.0 #Mpc/h
|
||||
Rmin=0.1 #Mpc/h
|
||||
Rmax=75.0 #Mpc/h
|
||||
bins=60
|
||||
|
||||
#### PARTIAL RESULTS NAMES ####
|
||||
DD_name='DD.dat' #name for the file containing DD results
|
||||
RR_name='../RR_0.1_75_60_4e5.dat' #name for the file containing RR results
|
||||
DR_name='DR.dat' #name for the file containing DR results
|
||||
|
||||
#### ACTIONS ####
|
||||
DD_action='compute' #'compute' or 'read' (from DD_name file)
|
||||
RR_action='read' #'compute' or 'read' (from RR_name file)
|
||||
DR_action='compute' #'compute' or 'read' (from DR_name file)
|
||||
|
||||
#### wp FILE ####
|
||||
wp_file='../w_p_21.dat'
|
||||
wp_covariance_file='../wp_covar_21.0.dat'
|
||||
|
||||
#### OUTPUT ####
|
||||
results_file='borrar.dat'
|
||||
######################################################
|
||||
|
||||
if myrank==0:
|
||||
|
||||
#read positions and IDs of DM particles: sort the IDs array
|
||||
DM_pos=readsnap.read_block(snapshot_fname,"POS ",parttype=-1)
|
||||
#IDs should go from 0 to N-1, instead from 1 to N
|
||||
DM_ids=readsnap.read_block(snapshot_fname,"ID ",parttype=-1)-1
|
||||
if np.min(DM_ids)!=0 or np.max(DM_ids)!=(len(DM_pos)-1):
|
||||
print('Error!!!!')
|
||||
print('IDs should go from 0 to N-1')
|
||||
print(len(DM_ids),np.min(DM_ids),np.max(DM_ids))
|
||||
sorted_ids=DM_ids.argsort(axis=0)
|
||||
del DM_ids
|
||||
#the particle whose ID is N is located in the position sorted_ids[N]
|
||||
#i.e. DM_ids[sorted_ids[N]]=N
|
||||
#the position of the particle whose ID is N would be:
|
||||
#DM_pos[sorted_ids[N]]
|
||||
|
||||
#read the IDs of the particles belonging to the CDM halos
|
||||
#again the IDs should go from 0 to N-1
|
||||
halos_ID=readsubf.subf_ids(groups_fname,groups_number,0,0,
|
||||
long_ids=True,read_all=True)
|
||||
IDs=halos_ID.SubIDs-1
|
||||
del halos_ID
|
||||
|
||||
print('subhalos IDs=',np.min(IDs),np.max(IDs))
|
||||
|
||||
#read CDM halos information
|
||||
halos=readsubf.subfind_catalog(groups_fname,groups_number,
|
||||
group_veldisp=True,masstab=True,
|
||||
long_ids=True,swap=False)
|
||||
if mass_criteria=='t200':
|
||||
halos_mass=halos.group_m_tophat200*1e10 #masses in Msun/h
|
||||
halos_radius=halos.group_r_tophat200 #radius in kpc/h
|
||||
elif mass_criteria=='m200':
|
||||
halos_mass=halos.group_m_mean200*1e10 #masses in Msun/h
|
||||
halos_radius=halos.group_r_mean200 #radius in kpc/h
|
||||
elif mass_criteria=='c200':
|
||||
halos_mass=halos.group_m_crit200*1e10 #masses in Msun/h
|
||||
halos_radius=halos.group_r_crit200 #radius in kpc/h
|
||||
else:
|
||||
print('bad mass_criteria')
|
||||
sys.exit()
|
||||
halos_pos=halos.group_pos
|
||||
halos_len=halos.group_len
|
||||
halos_offset=halos.group_offset
|
||||
halos_indexes=np.where((halos_mass>min_mass) & (halos_mass<max_mass))[0]
|
||||
del halos
|
||||
|
||||
print(' ')
|
||||
print('total halos found=',len(halos_pos))
|
||||
print('halos number density=',len(halos_pos)/BoxSize**3)
|
||||
|
||||
#keep only the halos in the given mass range
|
||||
halo_mass=halos_mass[halos_indexes]
|
||||
halo_pos=halos_pos[halos_indexes]
|
||||
halo_radius=halos_radius[halos_indexes]
|
||||
halo_len=halos_len[halos_indexes]
|
||||
halo_offset=halos_offset[halos_indexes]
|
||||
del halos_indexes
|
||||
|
||||
if np.any(halo_len==[]):
|
||||
print('something bad')
|
||||
|
||||
#read the random catalogue (new version)
|
||||
dt=np.dtype((np.float32,3))
|
||||
pos_r=np.fromfile(random_file,dtype=dt)*BoxSize #Mpc/h
|
||||
|
||||
#read the wp file
|
||||
f=open(wp_file,'r'); wp=[]
|
||||
for line in f.readlines():
|
||||
a=line.split()
|
||||
wp.append([float(a[0]),float(a[1]),float(a[2])])
|
||||
f.close(); wp=np.array(wp)
|
||||
|
||||
#read covariance matrix file
|
||||
f=open(wp_covariance_file,'r')
|
||||
Cov=[]
|
||||
for line in f.readlines():
|
||||
a=line.split()
|
||||
for value in a:
|
||||
Cov.append(float(value))
|
||||
f.close(); Cov=np.array(Cov)
|
||||
if len(Cov)!=len(wp)**2:
|
||||
print('problem with point numbers in the covariance file')
|
||||
sys.exit()
|
||||
Cov=np.reshape(Cov,(len(wp),len(wp)))
|
||||
Cov=np.matrix(Cov)
|
||||
|
||||
for g in range(100):
|
||||
|
||||
##### MASTER #####
|
||||
if myrank==0:
|
||||
|
||||
#set here the range of M1, alpha to vary
|
||||
#print 'M1='; M1=float(raw_input())
|
||||
#print 'alpha='; alpha=float(raw_input())
|
||||
|
||||
#M1=1.0e14+0.4e14*np.random.random()
|
||||
#alpha=1.10+0.3*np.random.random()
|
||||
#seed=np.random.randint(0,3000,1)[0]
|
||||
|
||||
M1=1.15e14
|
||||
alpha=1.27
|
||||
seed=955
|
||||
|
||||
#create the galaxy catalogue through the HOD parameters
|
||||
pos_g=HOD.hod_fast(DM_pos,sorted_ids,IDs,halo_mass,halo_pos,
|
||||
halo_radius,halo_len,halo_offset,BoxSize,
|
||||
min_mass,max_mass,fiducial_density,M1,
|
||||
alpha,seed,verbose=True)/1e3
|
||||
|
||||
#compute the 2pt correlation function
|
||||
r,xi_r,error_xi=CF.TPCF(pos_g,pos_r,BoxSize,DD_action,
|
||||
RR_action,DR_action,DD_name,RR_name,
|
||||
DR_name,bins,Rmin,Rmax)
|
||||
|
||||
f=open('correlation_function.dat','w')
|
||||
for i in range(len(r)):
|
||||
f.write(str(r[i])+' '+str(xi_r[i])+' '+str(error_xi[i])+'\n')
|
||||
f.close()
|
||||
|
||||
r_max=np.max(r)
|
||||
h=1e-13 #discontinuity at r=rp. We integrate from r=rp+h to r_max
|
||||
yinit=np.array([0.0])
|
||||
|
||||
f=open('projected_correlation_function.dat','w')
|
||||
wp_HOD=[]
|
||||
for rp in wp[:,0]:
|
||||
x=np.array([rp+h,r_max])
|
||||
y=si.odeint(deriv,yinit,x,args=(r,xi_r,rp),mxstep=100000)
|
||||
wp_HOD.append(y[1][0])
|
||||
f.write(str(rp)+' '+str(y[1][0])+'\n')
|
||||
wp_HOD=np.array(wp_HOD)
|
||||
f.close()
|
||||
|
||||
print('M1=',M1)
|
||||
print('alpha=',alpha)
|
||||
|
||||
chi2_bins=(wp_HOD-wp[:,1])**2/wp[:,2]**2
|
||||
|
||||
for min_bin in [2]:
|
||||
for max_bin in [12]:
|
||||
elements=np.arange(min_bin,max_bin)
|
||||
|
||||
#X^2 without covariance matrix
|
||||
chi2_nocov=np.sum(chi2_bins[elements])
|
||||
|
||||
#X^2 with covariance matrix
|
||||
wp_aux=wp[elements,1]; wp_HOD_aux=wp_HOD[elements]
|
||||
Cov_aux=Cov[elements,:][:,elements]
|
||||
diff=np.matrix(wp_HOD_aux-wp_aux)
|
||||
chi2=diff*Cov_aux.I*diff.T
|
||||
|
||||
print('X2('+str(min_bin)+'-'+str(max_bin)+')=',chi2_nocov,chi2)
|
||||
g=open(results_file,'a')
|
||||
g.write(str(M1)+ ' '+str(alpha)+' '+str(seed)+' '+str(chi2)+'\n')
|
||||
g.close()
|
||||
|
||||
|
||||
##### SLAVES #####
|
||||
else:
|
||||
pos_g=None; pos_r=None
|
||||
CF.TPCF(pos_g,pos_r,BoxSize,DD_action,RR_action,DR_action,
|
||||
DD_name,RR_name,DR_name,bins,Rmin,Rmax)
|
||||
|
||||
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,431 +1,450 @@
|
|||
# routines for reading headers and data blocks from Gadget snapshot files
|
||||
# usage e.g.:
|
||||
#
|
||||
# import readsnap as rs
|
||||
# header = rs.snapshot_header("snap_063.0") # reads snapshot header
|
||||
# print header.massarr
|
||||
# mass = rs.read_block("snap_063","MASS",parttype=5) # reads mass for particles of type 5, using block names should work for both format 1 and 2 snapshots
|
||||
# print "mass for", mass.size, "particles read"
|
||||
# print mass[0:10]
|
||||
#
|
||||
# before using read_block, make sure that the description (and order if using format 1 snapshot files) of the data blocks
|
||||
# is correct for your configuration of Gadget
|
||||
#
|
||||
# for mutliple file snapshots give e.g. the filename "snap_063" rather than "snap_063.0" to read_block
|
||||
# for snapshot_header the file number should be included, e.g."snap_063.0", as the headers of the files differ
|
||||
#
|
||||
# the returned data block is ordered by particle species even when read from a multiple file snapshot
|
||||
|
||||
import numpy as np
|
||||
import os
|
||||
import sys
|
||||
import math
|
||||
|
||||
# ----- class for snapshot header -----
|
||||
|
||||
class snapshot_header:
|
||||
def __init__(self, filename):
|
||||
|
||||
if os.path.exists(filename):
|
||||
curfilename = filename
|
||||
elif os.path.exists(filename+".0"):
|
||||
curfilename = filename+".0"
|
||||
else:
|
||||
print "file not found:", filename
|
||||
sys.exit()
|
||||
|
||||
self.filename = filename
|
||||
f = open(curfilename,'rb')
|
||||
blocksize = np.fromfile(f,dtype=np.int32,count=1)
|
||||
if blocksize[0] == 8:
|
||||
swap = 0
|
||||
format = 2
|
||||
elif blocksize[0] == 256:
|
||||
swap = 0
|
||||
format = 1
|
||||
else:
|
||||
blocksize.byteswap(True)
|
||||
if blocksize[0] == 8:
|
||||
swap = 1
|
||||
format = 2
|
||||
elif blocksize[0] == 256:
|
||||
swap = 1
|
||||
format = 1
|
||||
else:
|
||||
print "incorrect file format encountered when reading header of", filename
|
||||
sys.exit()
|
||||
|
||||
self.format = format
|
||||
self.swap = swap
|
||||
|
||||
if format==2:
|
||||
f.seek(16, os.SEEK_CUR)
|
||||
|
||||
self.npart = np.fromfile(f,dtype=np.int32,count=6)
|
||||
self.massarr = np.fromfile(f,dtype=np.float64,count=6)
|
||||
self.time = (np.fromfile(f,dtype=np.float64,count=1))[0]
|
||||
self.redshift = (np.fromfile(f,dtype=np.float64,count=1))[0]
|
||||
self.sfr = (np.fromfile(f,dtype=np.int32,count=1))[0]
|
||||
self.feedback = (np.fromfile(f,dtype=np.int32,count=1))[0]
|
||||
self.nall = np.fromfile(f,dtype=np.int32,count=6)
|
||||
self.cooling = (np.fromfile(f,dtype=np.int32,count=1))[0]
|
||||
self.filenum = (np.fromfile(f,dtype=np.int32,count=1))[0]
|
||||
self.boxsize = (np.fromfile(f,dtype=np.float64,count=1))[0]
|
||||
self.omega_m = (np.fromfile(f,dtype=np.float64,count=1))[0]
|
||||
self.omega_l = (np.fromfile(f,dtype=np.float64,count=1))[0]
|
||||
self.hubble = (np.fromfile(f,dtype=np.float64,count=1))[0]
|
||||
|
||||
if swap:
|
||||
self.npart.byteswap(True)
|
||||
self.massarr.byteswap(True)
|
||||
self.time = self.time.byteswap()
|
||||
self.redshift = self.redshift.byteswap()
|
||||
self.sfr = self.sfr.byteswap()
|
||||
self.feedback = self.feedback.byteswap()
|
||||
self.nall.byteswap(True)
|
||||
self.cooling = self.cooling.byteswap()
|
||||
self.filenum = self.filenum.byteswap()
|
||||
self.boxsize = self.boxsize.byteswap()
|
||||
self.omega_m = self.omega_m.byteswap()
|
||||
self.omega_l = self.omega_l.byteswap()
|
||||
self.hubble = self.hubble.byteswap()
|
||||
|
||||
f.close()
|
||||
|
||||
# ----- find offset and size of data block -----
|
||||
|
||||
def find_block(filename, format, swap, block, block_num, only_list_blocks=False):
|
||||
if (not os.path.exists(filename)):
|
||||
print "file not found:", filename
|
||||
sys.exit()
|
||||
|
||||
f = open(filename,'rb')
|
||||
f.seek(0, os.SEEK_END)
|
||||
filesize = f.tell()
|
||||
f.seek(0, os.SEEK_SET)
|
||||
|
||||
found = False
|
||||
curblock_num = 1
|
||||
while ((not found) and (f.tell()<filesize)):
|
||||
if format==2:
|
||||
f.seek(4, os.SEEK_CUR)
|
||||
curblock = f.read(4)
|
||||
if (block == curblock):
|
||||
found = True
|
||||
f.seek(8, os.SEEK_CUR)
|
||||
else:
|
||||
if curblock_num==block_num:
|
||||
found = True
|
||||
|
||||
curblocksize = (np.fromfile(f,dtype=np.uint32,count=1))[0]
|
||||
if swap:
|
||||
curblocksize = curblocksize.byteswap()
|
||||
|
||||
# - print some debug info about found data blocks -
|
||||
#if format==2:
|
||||
# print curblock, curblock_num, curblocksize
|
||||
#else:
|
||||
# print curblock_num, curblocksize
|
||||
|
||||
if only_list_blocks:
|
||||
if format==2:
|
||||
print curblock_num,curblock,f.tell(),curblocksize
|
||||
else:
|
||||
print curblock_num,f.tell(),curblocksize
|
||||
found = False
|
||||
|
||||
|
||||
if found:
|
||||
blocksize = curblocksize
|
||||
offset = f.tell()
|
||||
else:
|
||||
f.seek(curblocksize, os.SEEK_CUR)
|
||||
blocksize_check = (np.fromfile(f,dtype=np.uint32,count=1))[0]
|
||||
if swap: blocksize_check = blocksize_check.byteswap()
|
||||
if (curblocksize != blocksize_check):
|
||||
print "something wrong"
|
||||
sys.exit()
|
||||
curblock_num += 1
|
||||
f.close()
|
||||
|
||||
if ((not found) and (not only_list_blocks)):
|
||||
print "Error: block not found"
|
||||
sys.exit()
|
||||
|
||||
if (not only_list_blocks):
|
||||
return offset,blocksize
|
||||
|
||||
# ----- read data block -----
|
||||
#for snapshots with very very large number of particles set nall manually
|
||||
#for instance nall=np.array([0,2048**3,0,0,0,0])
|
||||
def read_block(filename, block, parttype=-1, physical_velocities=True, arepo=0, no_masses=False, verbose=False, nall=[0,0,0,0,0,0]):
|
||||
if (verbose):
|
||||
print "reading block", block
|
||||
|
||||
blockadd=0
|
||||
blocksub=0
|
||||
|
||||
if arepo==0:
|
||||
if (verbose):
|
||||
print "Gadget format"
|
||||
blockadd=0
|
||||
if arepo==1:
|
||||
if (verbose):
|
||||
print "Arepo format"
|
||||
blockadd=1
|
||||
if arepo==2:
|
||||
if (verbose):
|
||||
print "Arepo extended format"
|
||||
blockadd=4
|
||||
if no_masses==True:
|
||||
if (verbose):
|
||||
print "No mass block present"
|
||||
blocksub=1
|
||||
|
||||
if parttype not in [-1,0,1,2,3,4,5]:
|
||||
print "wrong parttype given"
|
||||
sys.exit()
|
||||
|
||||
if os.path.exists(filename):
|
||||
curfilename = filename
|
||||
elif os.path.exists(filename+".0"):
|
||||
curfilename = filename+".0"
|
||||
else:
|
||||
print "file not found:", filename
|
||||
print "and:", curfilename
|
||||
sys.exit()
|
||||
|
||||
head = snapshot_header(curfilename)
|
||||
format = head.format
|
||||
|
||||
print "FORMAT=", format
|
||||
swap = head.swap
|
||||
npart = head.npart
|
||||
massarr = head.massarr
|
||||
if np.all(nall==[0,0,0,0,0,0]):
|
||||
nall = head.nall
|
||||
filenum = head.filenum
|
||||
redshift = head.redshift
|
||||
time = head.time
|
||||
del head
|
||||
|
||||
# - description of data blocks -
|
||||
# add or change blocks as needed for your Gadget version
|
||||
data_for_type = np.zeros(6,bool) # should be set to "True" below for the species for which data is stored in the data block #by doing this, the default value is False data_for_type=[False,False,False,False,False,False]
|
||||
dt = np.float32 # data type of the data in the block
|
||||
if block=="POS ":
|
||||
data_for_type[:] = True
|
||||
dt = np.dtype((np.float32,3))
|
||||
block_num = 2
|
||||
elif block=="VEL ":
|
||||
data_for_type[:] = True
|
||||
dt = np.dtype((np.float32,3))
|
||||
block_num = 3
|
||||
elif block=="ID ":
|
||||
data_for_type[:] = True
|
||||
dt = np.uint32
|
||||
block_num = 4
|
||||
#only used for format I, when file structure is HEAD,POS,VEL,ID,ACCE
|
||||
elif block=="ACCE": #This is only for the PIETRONI project
|
||||
data_for_type[:] = True #This is only for the PIETRONI project
|
||||
dt = np.dtype((np.float32,3)) #This is only for the PIETRONI project
|
||||
block_num = 5 #This is only for the PIETRONI project
|
||||
elif block=="MASS":
|
||||
data_for_type[np.where(massarr==0)] = True
|
||||
block_num = 5
|
||||
if parttype>=0 and massarr[parttype]>0:
|
||||
if (verbose):
|
||||
print "filling masses according to massarr"
|
||||
return np.ones(nall[parttype],dtype=dt)*massarr[parttype]
|
||||
elif block=="U ":
|
||||
data_for_type[0] = True
|
||||
block_num = 6-blocksub
|
||||
elif block=="RHO ":
|
||||
data_for_type[0] = True
|
||||
block_num = 7-blocksub
|
||||
elif block=="VOL ":
|
||||
data_for_type[0] = True
|
||||
block_num = 8-blocksub
|
||||
elif block=="CMCE":
|
||||
data_for_type[0] = True
|
||||
dt = np.dtype((np.float32,3))
|
||||
block_num = 9-blocksub
|
||||
elif block=="AREA":
|
||||
data_for_type[0] = True
|
||||
block_num = 10-blocksub
|
||||
elif block=="NFAC":
|
||||
data_for_type[0] = True
|
||||
dt = np.dtype(np.int64) #depends on code version, most recent hast int32, old MyIDType
|
||||
block_num = 11-blocksub
|
||||
elif block=="NE ":
|
||||
data_for_type[0] = True
|
||||
block_num = 8+blockadd-blocksub
|
||||
elif block=="NH ":
|
||||
data_for_type[0] = True
|
||||
block_num = 9+blockadd-blocksub
|
||||
elif block=="HSML":
|
||||
data_for_type[0] = True
|
||||
block_num = 10+blockadd-blocksub
|
||||
elif block=="SFR ":
|
||||
data_for_type[0] = True
|
||||
block_num = 11+blockadd-blocksub
|
||||
elif block=="MHI ": #This is only for the bias_HI project
|
||||
data_for_type[0] = True #This is only for the bias_HI project
|
||||
block_num = 12+blockadd-blocksub #This is only for the bias_HI project
|
||||
elif block=="TEMP": #This is only for the bias_HI project
|
||||
data_for_type[0] = True #This is only for the bias_HI project
|
||||
block_num = 13+blockadd-blocksub #This is only for the bias_HI project
|
||||
elif block=="AGE ":
|
||||
data_for_type[4] = True
|
||||
block_num = 12+blockadd-blocksub
|
||||
elif block=="Z ":
|
||||
data_for_type[0] = True
|
||||
data_for_type[4] = True
|
||||
block_num = 13+blockadd-blocksub
|
||||
elif block=="BHMA":
|
||||
data_for_type[5] = True
|
||||
block_num = 14+blockadd-blocksub
|
||||
elif block=="BHMD":
|
||||
data_for_type[5] = True
|
||||
block_num = 15+blockadd-blocksub
|
||||
else:
|
||||
print "Sorry! Block type", block, "not known!"
|
||||
sys.exit()
|
||||
# - end of block description -
|
||||
|
||||
actual_data_for_type = np.copy(data_for_type)
|
||||
if parttype >= 0:
|
||||
actual_data_for_type[:] = False
|
||||
actual_data_for_type[parttype] = True
|
||||
if data_for_type[parttype]==False:
|
||||
print "Error: no data for specified particle type", parttype, "in the block", block
|
||||
sys.exit()
|
||||
elif block=="MASS":
|
||||
actual_data_for_type[:] = True
|
||||
|
||||
allpartnum = np.int64(0)
|
||||
species_offset = np.zeros(6,np.int64)
|
||||
for j in range(6):
|
||||
species_offset[j] = allpartnum
|
||||
if actual_data_for_type[j]:
|
||||
allpartnum += nall[j]
|
||||
|
||||
for i in range(filenum): # main loop over files
|
||||
if filenum>1:
|
||||
curfilename = filename+"."+str(i)
|
||||
|
||||
if i>0:
|
||||
head = snapshot_header(curfilename)
|
||||
npart = head.npart
|
||||
del head
|
||||
|
||||
curpartnum = np.int32(0)
|
||||
cur_species_offset = np.zeros(6,np.int64)
|
||||
for j in range(6):
|
||||
cur_species_offset[j] = curpartnum
|
||||
if data_for_type[j]:
|
||||
curpartnum += npart[j]
|
||||
|
||||
if parttype>=0:
|
||||
actual_curpartnum = npart[parttype]
|
||||
add_offset = cur_species_offset[parttype]
|
||||
else:
|
||||
actual_curpartnum = curpartnum
|
||||
add_offset = np.int32(0)
|
||||
|
||||
offset,blocksize = find_block(curfilename,format,swap,block,block_num)
|
||||
|
||||
if i==0: # fix data type for ID if long IDs are used
|
||||
if block=="ID ":
|
||||
if blocksize == np.dtype(dt).itemsize*curpartnum * 2:
|
||||
dt = np.uint64
|
||||
|
||||
if np.dtype(dt).itemsize*curpartnum != blocksize:
|
||||
print "something wrong with blocksize! expected =",np.dtype(dt).itemsize*curpartnum,"actual =",blocksize
|
||||
sys.exit()
|
||||
|
||||
f = open(curfilename,'rb')
|
||||
f.seek(offset + add_offset*np.dtype(dt).itemsize, os.SEEK_CUR)
|
||||
curdat = np.fromfile(f,dtype=dt,count=actual_curpartnum) # read data
|
||||
f.close()
|
||||
if swap:
|
||||
curdat.byteswap(True)
|
||||
|
||||
if i==0:
|
||||
data = np.empty(allpartnum,dt)
|
||||
|
||||
for j in range(6):
|
||||
if actual_data_for_type[j]:
|
||||
if block=="MASS" and massarr[j]>0: # add mass block for particles for which the mass is specified in the snapshot header
|
||||
data[species_offset[j]:species_offset[j]+npart[j]] = massarr[j]
|
||||
else:
|
||||
if parttype>=0:
|
||||
data[species_offset[j]:species_offset[j]+npart[j]] = curdat
|
||||
else:
|
||||
data[species_offset[j]:species_offset[j]+npart[j]] = curdat[cur_species_offset[j]:cur_species_offset[j]+npart[j]]
|
||||
species_offset[j] += npart[j]
|
||||
|
||||
del curdat
|
||||
|
||||
if physical_velocities and block=="VEL " and redshift!=0:
|
||||
data *= math.sqrt(time)
|
||||
|
||||
return data
|
||||
|
||||
# ----- list all data blocks in a format 2 snapshot file -----
|
||||
|
||||
def list_format2_blocks(filename):
|
||||
if os.path.exists(filename):
|
||||
curfilename = filename
|
||||
elif os.path.exists(filename+".0"):
|
||||
curfilename = filename+".0"
|
||||
else:
|
||||
print "file not found:", filename
|
||||
sys.exit()
|
||||
|
||||
head = snapshot_header(curfilename)
|
||||
format = head.format
|
||||
swap = head.swap
|
||||
del head
|
||||
|
||||
print 'GADGET FORMAT ',format
|
||||
if (format != 2):
|
||||
print "# OFFSET SIZE"
|
||||
else:
|
||||
print "# BLOCK OFFSET SIZE"
|
||||
print "-------------------------"
|
||||
|
||||
find_block(curfilename, format, swap, "XXXX", 0, only_list_blocks=True)
|
||||
|
||||
print "-------------------------"
|
||||
|
||||
def read_gadget_header(filename):
|
||||
if os.path.exists(filename):
|
||||
curfilename = filename
|
||||
elif os.path.exists(filename+".0"):
|
||||
curfilename = filename+".0"
|
||||
else:
|
||||
print "file not found:", filename
|
||||
sys.exit()
|
||||
|
||||
head=snapshot_header(curfilename)
|
||||
print 'npar=',head.npart
|
||||
print 'nall=',head.nall
|
||||
print 'a=',head.time
|
||||
print 'z=',head.redshift
|
||||
print 'masses=',head.massarr*1e10,'Msun/h'
|
||||
print 'boxsize=',head.boxsize,'kpc/h'
|
||||
print 'filenum=',head.filenum
|
||||
print 'cooling=',head.cooling
|
||||
print 'Omega_m,Omega_l=',head.omega_m,head.omega_l
|
||||
print 'h=',head.hubble,'\n'
|
||||
|
||||
rhocrit=2.77536627e11 #h**2 M_sun/Mpc**3
|
||||
rhocrit=rhocrit/1e9 #h**2M_sun/kpc**3
|
||||
|
||||
Omega_DM=head.nall[1]*head.massarr[1]*1e10/(head.boxsize**3*rhocrit)
|
||||
print 'DM mass=',head.massarr[1]*1e10,'Omega_DM=',Omega_DM
|
||||
if head.nall[2]>0 and head.massarr[2]>0:
|
||||
Omega_NU=head.nall[2]*head.massarr[2]*1e10/(head.boxsize**3*rhocrit)
|
||||
print 'NU mass=',head.massarr[2]*1e10,'Omega_NU=',Omega_NU
|
||||
print 'Sum of neutrino masses=',Omega_NU*head.hubble**2*94.1745,'eV'
|
||||
#+
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/fit_hod/readsnap.py
|
||||
# Copyright (C) 2010-2014 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2014 P. M. Sutter
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#+
|
||||
# routines for reading headers and data blocks from Gadget snapshot files
|
||||
# usage e.g.:
|
||||
#
|
||||
# import readsnap as rs
|
||||
# header = rs.snapshot_header("snap_063.0") # reads snapshot header
|
||||
# print header.massarr
|
||||
# mass = rs.read_block("snap_063","MASS",parttype=5) # reads mass for particles of type 5, using block names should work for both format 1 and 2 snapshots
|
||||
# print "mass for", mass.size, "particles read"
|
||||
# print mass[0:10]
|
||||
#
|
||||
# before using read_block, make sure that the description (and order if using format 1 snapshot files) of the data blocks
|
||||
# is correct for your configuration of Gadget
|
||||
#
|
||||
# for mutliple file snapshots give e.g. the filename "snap_063" rather than "snap_063.0" to read_block
|
||||
# for snapshot_header the file number should be included, e.g."snap_063.0", as the headers of the files differ
|
||||
#
|
||||
# the returned data block is ordered by particle species even when read from a multiple file snapshot
|
||||
|
||||
import numpy as np
|
||||
import os
|
||||
import sys
|
||||
import math
|
||||
|
||||
# ----- class for snapshot header -----
|
||||
|
||||
class snapshot_header:
|
||||
def __init__(self, filename):
|
||||
|
||||
if os.path.exists(filename):
|
||||
curfilename = filename
|
||||
elif os.path.exists(filename+".0"):
|
||||
curfilename = filename+".0"
|
||||
else:
|
||||
print("file not found:", filename)
|
||||
sys.exit()
|
||||
|
||||
self.filename = filename
|
||||
f = open(curfilename,'rb')
|
||||
blocksize = np.fromfile(f,dtype=np.int32,count=1)
|
||||
if blocksize[0] == 8:
|
||||
swap = 0
|
||||
format = 2
|
||||
elif blocksize[0] == 256:
|
||||
swap = 0
|
||||
format = 1
|
||||
else:
|
||||
blocksize.byteswap(True)
|
||||
if blocksize[0] == 8:
|
||||
swap = 1
|
||||
format = 2
|
||||
elif blocksize[0] == 256:
|
||||
swap = 1
|
||||
format = 1
|
||||
else:
|
||||
print("incorrect file format encountered when reading header of", filename)
|
||||
sys.exit()
|
||||
|
||||
self.format = format
|
||||
self.swap = swap
|
||||
|
||||
if format==2:
|
||||
f.seek(16, os.SEEK_CUR)
|
||||
|
||||
self.npart = np.fromfile(f,dtype=np.int32,count=6)
|
||||
self.massarr = np.fromfile(f,dtype=np.float64,count=6)
|
||||
self.time = (np.fromfile(f,dtype=np.float64,count=1))[0]
|
||||
self.redshift = (np.fromfile(f,dtype=np.float64,count=1))[0]
|
||||
self.sfr = (np.fromfile(f,dtype=np.int32,count=1))[0]
|
||||
self.feedback = (np.fromfile(f,dtype=np.int32,count=1))[0]
|
||||
self.nall = np.fromfile(f,dtype=np.int32,count=6)
|
||||
self.cooling = (np.fromfile(f,dtype=np.int32,count=1))[0]
|
||||
self.filenum = (np.fromfile(f,dtype=np.int32,count=1))[0]
|
||||
self.boxsize = (np.fromfile(f,dtype=np.float64,count=1))[0]
|
||||
self.omega_m = (np.fromfile(f,dtype=np.float64,count=1))[0]
|
||||
self.omega_l = (np.fromfile(f,dtype=np.float64,count=1))[0]
|
||||
self.hubble = (np.fromfile(f,dtype=np.float64,count=1))[0]
|
||||
|
||||
if swap:
|
||||
self.npart.byteswap(True)
|
||||
self.massarr.byteswap(True)
|
||||
self.time = self.time.byteswap()
|
||||
self.redshift = self.redshift.byteswap()
|
||||
self.sfr = self.sfr.byteswap()
|
||||
self.feedback = self.feedback.byteswap()
|
||||
self.nall.byteswap(True)
|
||||
self.cooling = self.cooling.byteswap()
|
||||
self.filenum = self.filenum.byteswap()
|
||||
self.boxsize = self.boxsize.byteswap()
|
||||
self.omega_m = self.omega_m.byteswap()
|
||||
self.omega_l = self.omega_l.byteswap()
|
||||
self.hubble = self.hubble.byteswap()
|
||||
|
||||
f.close()
|
||||
|
||||
# ----- find offset and size of data block -----
|
||||
|
||||
def find_block(filename, format, swap, block, block_num, only_list_blocks=False):
|
||||
if (not os.path.exists(filename)):
|
||||
print("file not found:", filename)
|
||||
sys.exit()
|
||||
|
||||
f = open(filename,'rb')
|
||||
f.seek(0, os.SEEK_END)
|
||||
filesize = f.tell()
|
||||
f.seek(0, os.SEEK_SET)
|
||||
|
||||
found = False
|
||||
curblock_num = 1
|
||||
while ((not found) and (f.tell()<filesize)):
|
||||
if format==2:
|
||||
f.seek(4, os.SEEK_CUR)
|
||||
curblock = f.read(4)
|
||||
if (block == curblock):
|
||||
found = True
|
||||
f.seek(8, os.SEEK_CUR)
|
||||
else:
|
||||
if curblock_num==block_num:
|
||||
found = True
|
||||
|
||||
curblocksize = (np.fromfile(f,dtype=np.uint32,count=1))[0]
|
||||
if swap:
|
||||
curblocksize = curblocksize.byteswap()
|
||||
|
||||
# - print some debug info about found data blocks -
|
||||
#if format==2:
|
||||
# print curblock, curblock_num, curblocksize
|
||||
#else:
|
||||
# print curblock_num, curblocksize
|
||||
|
||||
if only_list_blocks:
|
||||
if format==2:
|
||||
print(curblock_num,curblock,f.tell(),curblocksize)
|
||||
else:
|
||||
print(curblock_num,f.tell(),curblocksize)
|
||||
found = False
|
||||
|
||||
|
||||
if found:
|
||||
blocksize = curblocksize
|
||||
offset = f.tell()
|
||||
else:
|
||||
f.seek(curblocksize, os.SEEK_CUR)
|
||||
blocksize_check = (np.fromfile(f,dtype=np.uint32,count=1))[0]
|
||||
if swap: blocksize_check = blocksize_check.byteswap()
|
||||
if (curblocksize != blocksize_check):
|
||||
print("something wrong")
|
||||
sys.exit()
|
||||
curblock_num += 1
|
||||
f.close()
|
||||
|
||||
if ((not found) and (not only_list_blocks)):
|
||||
print("Error: block not found")
|
||||
sys.exit()
|
||||
|
||||
if (not only_list_blocks):
|
||||
return offset,blocksize
|
||||
|
||||
# ----- read data block -----
|
||||
#for snapshots with very very large number of particles set nall manually
|
||||
#for instance nall=np.array([0,2048**3,0,0,0,0])
|
||||
def read_block(filename, block, parttype=-1, physical_velocities=True, arepo=0, no_masses=False, verbose=False, nall=[0,0,0,0,0,0]):
|
||||
if (verbose):
|
||||
print("reading block", block)
|
||||
|
||||
blockadd=0
|
||||
blocksub=0
|
||||
|
||||
if arepo==0:
|
||||
if (verbose):
|
||||
print("Gadget format")
|
||||
blockadd=0
|
||||
if arepo==1:
|
||||
if (verbose):
|
||||
print("Arepo format")
|
||||
blockadd=1
|
||||
if arepo==2:
|
||||
if (verbose):
|
||||
print("Arepo extended format")
|
||||
blockadd=4
|
||||
if no_masses==True:
|
||||
if (verbose):
|
||||
print("No mass block present")
|
||||
blocksub=1
|
||||
|
||||
if parttype not in [-1,0,1,2,3,4,5]:
|
||||
print("wrong parttype given")
|
||||
sys.exit()
|
||||
|
||||
if os.path.exists(filename):
|
||||
curfilename = filename
|
||||
elif os.path.exists(filename+".0"):
|
||||
curfilename = filename+".0"
|
||||
else:
|
||||
print("file not found:", filename)
|
||||
print("and:", curfilename)
|
||||
sys.exit()
|
||||
|
||||
head = snapshot_header(curfilename)
|
||||
format = head.format
|
||||
|
||||
print("FORMAT=", format)
|
||||
swap = head.swap
|
||||
npart = head.npart
|
||||
massarr = head.massarr
|
||||
if np.all(nall==[0,0,0,0,0,0]):
|
||||
nall = head.nall
|
||||
filenum = head.filenum
|
||||
redshift = head.redshift
|
||||
time = head.time
|
||||
del head
|
||||
|
||||
# - description of data blocks -
|
||||
# add or change blocks as needed for your Gadget version
|
||||
data_for_type = np.zeros(6,bool) # should be set to "True" below for the species for which data is stored in the data block #by doing this, the default value is False data_for_type=[False,False,False,False,False,False]
|
||||
dt = np.float32 # data type of the data in the block
|
||||
if block=="POS ":
|
||||
data_for_type[:] = True
|
||||
dt = np.dtype((np.float32,3))
|
||||
block_num = 2
|
||||
elif block=="VEL ":
|
||||
data_for_type[:] = True
|
||||
dt = np.dtype((np.float32,3))
|
||||
block_num = 3
|
||||
elif block=="ID ":
|
||||
data_for_type[:] = True
|
||||
dt = np.uint32
|
||||
block_num = 4
|
||||
#only used for format I, when file structure is HEAD,POS,VEL,ID,ACCE
|
||||
elif block=="ACCE": #This is only for the PIETRONI project
|
||||
data_for_type[:] = True #This is only for the PIETRONI project
|
||||
dt = np.dtype((np.float32,3)) #This is only for the PIETRONI project
|
||||
block_num = 5 #This is only for the PIETRONI project
|
||||
elif block=="MASS":
|
||||
data_for_type[np.where(massarr==0)] = True
|
||||
block_num = 5
|
||||
if parttype>=0 and massarr[parttype]>0:
|
||||
if (verbose):
|
||||
print("filling masses according to massarr")
|
||||
return np.ones(nall[parttype],dtype=dt)*massarr[parttype]
|
||||
elif block=="U ":
|
||||
data_for_type[0] = True
|
||||
block_num = 6-blocksub
|
||||
elif block=="RHO ":
|
||||
data_for_type[0] = True
|
||||
block_num = 7-blocksub
|
||||
elif block=="VOL ":
|
||||
data_for_type[0] = True
|
||||
block_num = 8-blocksub
|
||||
elif block=="CMCE":
|
||||
data_for_type[0] = True
|
||||
dt = np.dtype((np.float32,3))
|
||||
block_num = 9-blocksub
|
||||
elif block=="AREA":
|
||||
data_for_type[0] = True
|
||||
block_num = 10-blocksub
|
||||
elif block=="NFAC":
|
||||
data_for_type[0] = True
|
||||
dt = np.dtype(np.int64) #depends on code version, most recent hast int32, old MyIDType
|
||||
block_num = 11-blocksub
|
||||
elif block=="NE ":
|
||||
data_for_type[0] = True
|
||||
block_num = 8+blockadd-blocksub
|
||||
elif block=="NH ":
|
||||
data_for_type[0] = True
|
||||
block_num = 9+blockadd-blocksub
|
||||
elif block=="HSML":
|
||||
data_for_type[0] = True
|
||||
block_num = 10+blockadd-blocksub
|
||||
elif block=="SFR ":
|
||||
data_for_type[0] = True
|
||||
block_num = 11+blockadd-blocksub
|
||||
elif block=="MHI ": #This is only for the bias_HI project
|
||||
data_for_type[0] = True #This is only for the bias_HI project
|
||||
block_num = 12+blockadd-blocksub #This is only for the bias_HI project
|
||||
elif block=="TEMP": #This is only for the bias_HI project
|
||||
data_for_type[0] = True #This is only for the bias_HI project
|
||||
block_num = 13+blockadd-blocksub #This is only for the bias_HI project
|
||||
elif block=="AGE ":
|
||||
data_for_type[4] = True
|
||||
block_num = 12+blockadd-blocksub
|
||||
elif block=="Z ":
|
||||
data_for_type[0] = True
|
||||
data_for_type[4] = True
|
||||
block_num = 13+blockadd-blocksub
|
||||
elif block=="BHMA":
|
||||
data_for_type[5] = True
|
||||
block_num = 14+blockadd-blocksub
|
||||
elif block=="BHMD":
|
||||
data_for_type[5] = True
|
||||
block_num = 15+blockadd-blocksub
|
||||
else:
|
||||
print("Sorry! Block type", block, "not known!")
|
||||
sys.exit()
|
||||
# - end of block description -
|
||||
|
||||
actual_data_for_type = np.copy(data_for_type)
|
||||
if parttype >= 0:
|
||||
actual_data_for_type[:] = False
|
||||
actual_data_for_type[parttype] = True
|
||||
if data_for_type[parttype]==False:
|
||||
print("Error: no data for specified particle type", parttype, "in the block", block)
|
||||
sys.exit()
|
||||
elif block=="MASS":
|
||||
actual_data_for_type[:] = True
|
||||
|
||||
allpartnum = np.int64(0)
|
||||
species_offset = np.zeros(6,np.int64)
|
||||
for j in range(6):
|
||||
species_offset[j] = allpartnum
|
||||
if actual_data_for_type[j]:
|
||||
allpartnum += nall[j]
|
||||
|
||||
for i in range(filenum): # main loop over files
|
||||
if filenum>1:
|
||||
curfilename = filename+"."+str(i)
|
||||
|
||||
if i>0:
|
||||
head = snapshot_header(curfilename)
|
||||
npart = head.npart
|
||||
del head
|
||||
|
||||
curpartnum = np.int32(0)
|
||||
cur_species_offset = np.zeros(6,np.int64)
|
||||
for j in range(6):
|
||||
cur_species_offset[j] = curpartnum
|
||||
if data_for_type[j]:
|
||||
curpartnum += npart[j]
|
||||
|
||||
if parttype>=0:
|
||||
actual_curpartnum = npart[parttype]
|
||||
add_offset = cur_species_offset[parttype]
|
||||
else:
|
||||
actual_curpartnum = curpartnum
|
||||
add_offset = np.int32(0)
|
||||
|
||||
offset,blocksize = find_block(curfilename,format,swap,block,block_num)
|
||||
|
||||
if i==0: # fix data type for ID if long IDs are used
|
||||
if block=="ID ":
|
||||
if blocksize == np.dtype(dt).itemsize*curpartnum * 2:
|
||||
dt = np.uint64
|
||||
|
||||
if np.dtype(dt).itemsize*curpartnum != blocksize:
|
||||
print("something wrong with blocksize! expected =",np.dtype(dt).itemsize*curpartnum,"actual =",blocksize)
|
||||
sys.exit()
|
||||
|
||||
f = open(curfilename,'rb')
|
||||
f.seek(offset + add_offset*np.dtype(dt).itemsize, os.SEEK_CUR)
|
||||
curdat = np.fromfile(f,dtype=dt,count=actual_curpartnum) # read data
|
||||
f.close()
|
||||
if swap:
|
||||
curdat.byteswap(True)
|
||||
|
||||
if i==0:
|
||||
data = np.empty(allpartnum,dt)
|
||||
|
||||
for j in range(6):
|
||||
if actual_data_for_type[j]:
|
||||
if block=="MASS" and massarr[j]>0: # add mass block for particles for which the mass is specified in the snapshot header
|
||||
data[species_offset[j]:species_offset[j]+npart[j]] = massarr[j]
|
||||
else:
|
||||
if parttype>=0:
|
||||
data[species_offset[j]:species_offset[j]+npart[j]] = curdat
|
||||
else:
|
||||
data[species_offset[j]:species_offset[j]+npart[j]] = curdat[cur_species_offset[j]:cur_species_offset[j]+npart[j]]
|
||||
species_offset[j] += npart[j]
|
||||
|
||||
del curdat
|
||||
|
||||
if physical_velocities and block=="VEL " and redshift!=0:
|
||||
data *= math.sqrt(time)
|
||||
|
||||
return data
|
||||
|
||||
# ----- list all data blocks in a format 2 snapshot file -----
|
||||
|
||||
def list_format2_blocks(filename):
|
||||
if os.path.exists(filename):
|
||||
curfilename = filename
|
||||
elif os.path.exists(filename+".0"):
|
||||
curfilename = filename+".0"
|
||||
else:
|
||||
print("file not found:", filename)
|
||||
sys.exit()
|
||||
|
||||
head = snapshot_header(curfilename)
|
||||
format = head.format
|
||||
swap = head.swap
|
||||
del head
|
||||
|
||||
print('GADGET FORMAT ',format)
|
||||
if (format != 2):
|
||||
print("# OFFSET SIZE")
|
||||
else:
|
||||
print("# BLOCK OFFSET SIZE")
|
||||
print("-------------------------")
|
||||
|
||||
find_block(curfilename, format, swap, "XXXX", 0, only_list_blocks=True)
|
||||
|
||||
print("-------------------------")
|
||||
|
||||
def read_gadget_header(filename):
|
||||
if os.path.exists(filename):
|
||||
curfilename = filename
|
||||
elif os.path.exists(filename+".0"):
|
||||
curfilename = filename+".0"
|
||||
else:
|
||||
print("file not found:", filename)
|
||||
sys.exit()
|
||||
|
||||
head=snapshot_header(curfilename)
|
||||
print('npar=',head.npart)
|
||||
print('nall=',head.nall)
|
||||
print('a=',head.time)
|
||||
print('z=',head.redshift)
|
||||
print('masses=',head.massarr*1e10,'Msun/h')
|
||||
print('boxsize=',head.boxsize,'kpc/h')
|
||||
print('filenum=',head.filenum)
|
||||
print('cooling=',head.cooling)
|
||||
print('Omega_m,Omega_l=',head.omega_m,head.omega_l)
|
||||
print('h=',head.hubble,'\n')
|
||||
|
||||
rhocrit=2.77536627e11 #h**2 M_sun/Mpc**3
|
||||
rhocrit=rhocrit/1e9 #h**2M_sun/kpc**3
|
||||
|
||||
Omega_DM=head.nall[1]*head.massarr[1]*1e10/(head.boxsize**3*rhocrit)
|
||||
print('DM mass=',head.massarr[1]*1e10,'Omega_DM=',Omega_DM)
|
||||
if head.nall[2]>0 and head.massarr[2]>0:
|
||||
Omega_NU=head.nall[2]*head.massarr[2]*1e10/(head.boxsize**3*rhocrit)
|
||||
print('NU mass=',head.massarr[2]*1e10,'Omega_NU=',Omega_NU)
|
||||
print('Sum of neutrino masses=',Omega_NU*head.hubble**2*94.1745,'eV')
|
||||
|
|
|
@ -1,290 +1,309 @@
|
|||
|
||||
# code for reading Subfind's subhalo_tab files
|
||||
# usage e.g.:
|
||||
#
|
||||
# import readsubf
|
||||
# cat = readsubf.subfind_catalog("./m_10002_h_94_501_z3_csf/",63,masstab=True)
|
||||
# print cat.nsubs
|
||||
# print "largest halo x position = ",cat.sub_pos[0][0]
|
||||
|
||||
import numpy as np
|
||||
import os
|
||||
import sys
|
||||
|
||||
class subfind_catalog:
|
||||
def __init__(self, basedir, snapnum, group_veldisp = False, masstab = False, long_ids = False, swap = False):
|
||||
self.filebase = basedir + "/groups_" + str(snapnum).zfill(3) + "/subhalo_tab_" + str(snapnum).zfill(3) + "."
|
||||
|
||||
#print
|
||||
#print "reading subfind catalog for snapshot",snapnum,"of",basedir
|
||||
|
||||
if long_ids: self.id_type = np.uint64
|
||||
else: self.id_type = np.uint32
|
||||
|
||||
self.group_veldisp = group_veldisp
|
||||
self.masstab = masstab
|
||||
|
||||
filenum = 0
|
||||
doneflag = False
|
||||
skip_gr = 0
|
||||
skip_sub = 0
|
||||
while not doneflag:
|
||||
curfile = self.filebase + str(filenum)
|
||||
|
||||
if (not os.path.exists(curfile)):
|
||||
print "file not found:", curfile
|
||||
sys.exit()
|
||||
|
||||
f = open(curfile,'rb')
|
||||
|
||||
ngroups = np.fromfile(f, dtype=np.uint32, count=1)[0]
|
||||
totngroups = np.fromfile(f, dtype=np.uint32, count=1)[0]
|
||||
nids = np.fromfile(f, dtype=np.uint32, count=1)[0]
|
||||
totnids = np.fromfile(f, dtype=np.uint64, count=1)[0]
|
||||
ntask = np.fromfile(f, dtype=np.uint32, count=1)[0]
|
||||
nsubs = np.fromfile(f, dtype=np.uint32, count=1)[0]
|
||||
totnsubs = np.fromfile(f, dtype=np.uint32, count=1)[0]
|
||||
|
||||
if swap:
|
||||
ngroups = ngroups.byteswap()
|
||||
totngroups = totngroups.byteswap()
|
||||
nids = nids.byteswap()
|
||||
totnids = totnids.byteswap()
|
||||
ntask = ntask.byteswap()
|
||||
nsubs = nsubs.byteswap()
|
||||
totnsubs = totnsubs.byteswap()
|
||||
|
||||
if filenum == 0:
|
||||
self.ngroups = totngroups
|
||||
self.nids = totnids
|
||||
self.nfiles = ntask
|
||||
self.nsubs = totnsubs
|
||||
|
||||
self.group_len = np.empty(totngroups, dtype=np.uint32)
|
||||
self.group_offset = np.empty(totngroups, dtype=np.uint32)
|
||||
self.group_mass = np.empty(totngroups, dtype=np.float32)
|
||||
self.group_pos = np.empty(totngroups, dtype=np.dtype((np.float32,3)))
|
||||
self.group_m_mean200 = np.empty(totngroups, dtype=np.float32)
|
||||
self.group_r_mean200 = np.empty(totngroups, dtype=np.float32)
|
||||
self.group_m_crit200 = np.empty(totngroups, dtype=np.float32)
|
||||
self.group_r_crit200 = np.empty(totngroups, dtype=np.float32)
|
||||
self.group_m_tophat200 = np.empty(totngroups, dtype=np.float32)
|
||||
self.group_r_tophat200 = np.empty(totngroups, dtype=np.float32)
|
||||
if group_veldisp:
|
||||
self.group_veldisp_mean200 = np.empty(totngroups, dtype=np.float32)
|
||||
self.group_veldisp_crit200 = np.empty(totngroups, dtype=np.float32)
|
||||
self.group_veldisp_tophat200 = np.empty(totngroups, dtype=np.float32)
|
||||
self.group_contamination_count = np.empty(totngroups, dtype=np.uint32)
|
||||
self.group_contamination_mass = np.empty(totngroups, dtype=np.float32)
|
||||
self.group_nsubs = np.empty(totngroups, dtype=np.uint32)
|
||||
self.group_firstsub = np.empty(totngroups, dtype=np.uint32)
|
||||
|
||||
self.sub_len = np.empty(totnsubs, dtype=np.uint32)
|
||||
self.sub_offset = np.empty(totnsubs, dtype=np.uint32)
|
||||
self.sub_parent = np.empty(totnsubs, dtype=np.uint32)
|
||||
self.sub_mass = np.empty(totnsubs, dtype=np.float32)
|
||||
self.sub_pos = np.empty(totnsubs, dtype=np.dtype((np.float32,3)))
|
||||
self.sub_vel = np.empty(totnsubs, dtype=np.dtype((np.float32,3)))
|
||||
self.sub_cm = np.empty(totnsubs, dtype=np.dtype((np.float32,3)))
|
||||
self.sub_spin = np.empty(totnsubs, dtype=np.dtype((np.float32,3)))
|
||||
self.sub_veldisp = np.empty(totnsubs, dtype=np.float32)
|
||||
self.sub_vmax = np.empty(totnsubs, dtype=np.float32)
|
||||
self.sub_vmaxrad = np.empty(totnsubs, dtype=np.float32)
|
||||
self.sub_halfmassrad = np.empty(totnsubs, dtype=np.float32)
|
||||
self.sub_id_mostbound = np.empty(totnsubs, dtype=self.id_type)
|
||||
self.sub_grnr = np.empty(totnsubs, dtype=np.uint32)
|
||||
if masstab:
|
||||
self.sub_masstab = np.empty(totnsubs, dtype=np.dtype((np.float32,6)))
|
||||
|
||||
if ngroups > 0:
|
||||
locs = slice(skip_gr, skip_gr + ngroups)
|
||||
self.group_len[locs] = np.fromfile(f, dtype=np.uint32, count=ngroups)
|
||||
self.group_offset[locs] = np.fromfile(f, dtype=np.uint32, count=ngroups)
|
||||
self.group_mass[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
|
||||
self.group_pos[locs] = np.fromfile(f, dtype=np.dtype((np.float32,3)), count=ngroups)
|
||||
self.group_m_mean200[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
|
||||
self.group_r_mean200[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
|
||||
self.group_m_crit200[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
|
||||
self.group_r_crit200[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
|
||||
self.group_m_tophat200[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
|
||||
self.group_r_tophat200[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
|
||||
if group_veldisp:
|
||||
self.group_veldisp_mean200[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
|
||||
self.group_veldisp_crit200[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
|
||||
self.group_veldisp_tophat200[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
|
||||
self.group_contamination_count[locs] = np.fromfile(f, dtype=np.uint32, count=ngroups)
|
||||
self.group_contamination_mass[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
|
||||
self.group_nsubs[locs] = np.fromfile(f, dtype=np.uint32, count=ngroups)
|
||||
self.group_firstsub[locs] = np.fromfile(f, dtype=np.uint32, count=ngroups)
|
||||
skip_gr += ngroups
|
||||
|
||||
if nsubs > 0:
|
||||
locs = slice(skip_sub, skip_sub + nsubs)
|
||||
self.sub_len[locs] = np.fromfile(f, dtype=np.uint32, count=nsubs)
|
||||
self.sub_offset[locs] = np.fromfile(f, dtype=np.uint32, count=nsubs)
|
||||
self.sub_parent[locs] = np.fromfile(f, dtype=np.uint32, count=nsubs)
|
||||
self.sub_mass[locs] = np.fromfile(f, dtype=np.float32, count=nsubs)
|
||||
self.sub_pos[locs] = np.fromfile(f, dtype=np.dtype((np.float32,3)), count=nsubs)
|
||||
self.sub_vel[locs] = np.fromfile(f, dtype=np.dtype((np.float32,3)), count=nsubs)
|
||||
self.sub_cm[locs] = np.fromfile(f, dtype=np.dtype((np.float32,3)), count=nsubs)
|
||||
self.sub_spin[locs] = np.fromfile(f, dtype=np.dtype((np.float32,3)), count=nsubs)
|
||||
self.sub_veldisp[locs] = np.fromfile(f, dtype=np.float32, count=nsubs)
|
||||
self.sub_vmax[locs] = np.fromfile(f, dtype=np.float32, count=nsubs)
|
||||
self.sub_vmaxrad[locs] = np.fromfile(f, dtype=np.float32, count=nsubs)
|
||||
self.sub_halfmassrad[locs] = np.fromfile(f, dtype=np.float32, count=nsubs)
|
||||
self.sub_id_mostbound[locs] = np.fromfile(f, dtype=self.id_type, count=nsubs)
|
||||
self.sub_grnr[locs] = np.fromfile(f, dtype=np.uint32, count=nsubs)
|
||||
if masstab:
|
||||
self.sub_masstab[locs] = np.fromfile(f, dtype=np.dtype((np.float32,6)), count=nsubs)
|
||||
skip_sub += nsubs
|
||||
|
||||
curpos = f.tell()
|
||||
f.seek(0,os.SEEK_END)
|
||||
if curpos != f.tell(): print "Warning: finished reading before EOF for file",filenum
|
||||
f.close()
|
||||
#print 'finished with file number',filenum,"of",ntask
|
||||
filenum += 1
|
||||
if filenum == self.nfiles: doneflag = True
|
||||
|
||||
if swap:
|
||||
self.group_len.byteswap(True)
|
||||
self.group_offset.byteswap(True)
|
||||
self.group_mass.byteswap(True)
|
||||
self.group_pos.byteswap(True)
|
||||
self.group_m_mean200.byteswap(True)
|
||||
self.group_r_mean200.byteswap(True)
|
||||
self.group_m_crit200.byteswap(True)
|
||||
self.group_r_crit200.byteswap(True)
|
||||
self.group_m_tophat200.byteswap(True)
|
||||
self.group_r_tophat200.byteswap(True)
|
||||
if group_veldisp:
|
||||
self.group_veldisp_mean200.byteswap(True)
|
||||
self.group_veldisp_crit200.byteswap(True)
|
||||
self.group_veldisp_tophat200.byteswap(True)
|
||||
self.group_contamination_count.byteswap(True)
|
||||
self.group_contamination_mass.byteswap(True)
|
||||
self.group_nsubs.byteswap(True)
|
||||
self.group_firstsub.byteswap(True)
|
||||
|
||||
self.sub_len.byteswap(True)
|
||||
self.sub_offset.byteswap(True)
|
||||
self.sub_parent.byteswap(True)
|
||||
self.sub_mass.byteswap(True)
|
||||
self.sub_pos.byteswap(True)
|
||||
self.sub_vel.byteswap(True)
|
||||
self.sub_cm.byteswap(True)
|
||||
self.sub_spin.byteswap(True)
|
||||
self.sub_veldisp.byteswap(True)
|
||||
self.sub_vmax.byteswap(True)
|
||||
self.sub_vmaxrad.byteswap(True)
|
||||
self.sub_halfmassrad.byteswap(True)
|
||||
self.sub_id_mostbound.byteswap(True)
|
||||
self.sub_grnr.byteswap(True)
|
||||
if masstab:
|
||||
self.sub_masstab.byteswap(True)
|
||||
|
||||
#print
|
||||
#print "number of groups =", self.ngroups
|
||||
#print "number of subgroups =", self.nsubs
|
||||
#if self.nsubs > 0:
|
||||
# print "largest group of length",self.group_len[0],"has",self.group_nsubs[0],"subhalos"
|
||||
# print
|
||||
|
||||
|
||||
|
||||
# code for reading Subfind's ID files
|
||||
# usage e.g.:
|
||||
#
|
||||
# import readsubf
|
||||
# ids = readsubf.subf_ids("./m_10002_h_94_501_z3_csf/", 0, 100)
|
||||
|
||||
|
||||
class subf_ids:
|
||||
def __init__(self, basedir, snapnum, substart, sublen, swap = False, verbose = False, long_ids = False, read_all = False):
|
||||
self.filebase = basedir + "/groups_" + str(snapnum).zfill(3) + "/subhalo_ids_" + str(snapnum).zfill(3) + "."
|
||||
|
||||
if (verbose):
|
||||
print
|
||||
print "reading subhalo IDs for snapshot",snapnum,"of",basedir
|
||||
|
||||
if long_ids: self.id_type = np.uint64
|
||||
else: self.id_type = np.uint32
|
||||
|
||||
|
||||
filenum = 0
|
||||
doneflag = False
|
||||
count=substart
|
||||
found=0
|
||||
|
||||
|
||||
while not doneflag:
|
||||
curfile = self.filebase + str(filenum)
|
||||
|
||||
if (not os.path.exists(curfile)):
|
||||
print "file not found:", curfile
|
||||
sys.exit()
|
||||
|
||||
f = open(curfile,'rb')
|
||||
|
||||
Ngroups = np.fromfile(f, dtype=np.uint32, count=1)[0]
|
||||
TotNgroups = np.fromfile(f, dtype=np.uint32, count=1)[0]
|
||||
NIds = np.fromfile(f, dtype=np.uint32, count=1)[0]
|
||||
TotNids = np.fromfile(f, dtype=np.uint64, count=1)[0]
|
||||
NTask = np.fromfile(f, dtype=np.uint32, count=1)[0]
|
||||
Offset = np.fromfile(f, dtype=np.uint32, count=1)[0]
|
||||
|
||||
|
||||
if read_all:
|
||||
substart=0
|
||||
sublen=TotNids
|
||||
if swap:
|
||||
Ngroups = Ngroups.byteswap()
|
||||
TotNgroups = TotNgroups.byteswap()
|
||||
NIds = NIds.byteswap()
|
||||
TotNids = TotNids.byteswap()
|
||||
NTask = NTask.byteswap()
|
||||
Offset = Offset.byteswap()
|
||||
if filenum == 0:
|
||||
if (verbose):
|
||||
print "Ngroups = ", Ngroups
|
||||
print "TotNgroups = ", Ngroups
|
||||
print "NIds = ", NIds
|
||||
print "TotNids = ", TotNids
|
||||
print "NTask = ", NTask
|
||||
print "Offset = ", Offset
|
||||
self.nfiles = NTask
|
||||
self.SubLen=sublen
|
||||
self.SubIDs = np.empty(sublen, dtype=self.id_type)
|
||||
|
||||
|
||||
if count <= Offset+NIds:
|
||||
nskip = count - Offset
|
||||
nrem = Offset + NIds - count
|
||||
if sublen > nrem:
|
||||
n_to_read = nrem
|
||||
else:
|
||||
n_to_read = sublen
|
||||
if n_to_read > 0:
|
||||
if (verbose):
|
||||
print filenum, n_to_read
|
||||
if nskip > 0:
|
||||
dummy=np.fromfile(f, dtype=self.id_type, count=nskip)
|
||||
if (verbose):
|
||||
print dummy
|
||||
locs = slice(found, found + n_to_read)
|
||||
dummy2 = np.fromfile(f, dtype=self.id_type, count=n_to_read)
|
||||
if (verbose):
|
||||
print dummy2
|
||||
self.SubIDs[locs]=dummy2
|
||||
found += n_to_read
|
||||
count += n_to_read
|
||||
sublen -= n_to_read
|
||||
|
||||
f.close()
|
||||
filenum += 1
|
||||
if filenum == self.nfiles: doneflag = True
|
||||
|
||||
if swap:
|
||||
self.SubIDs.byteswap(True)
|
||||
|
||||
|
||||
#+
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/fit_hod/readsubf.py
|
||||
# Copyright (C) 2010-2014 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2014 P. M. Sutter
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#+
|
||||
|
||||
# code for reading Subfind's subhalo_tab files
|
||||
# usage e.g.:
|
||||
#
|
||||
# import readsubf
|
||||
# cat = readsubf.subfind_catalog("./m_10002_h_94_501_z3_csf/",63,masstab=True)
|
||||
# print cat.nsubs
|
||||
# print "largest halo x position = ",cat.sub_pos[0][0]
|
||||
|
||||
import numpy as np
|
||||
import os
|
||||
import sys
|
||||
|
||||
class subfind_catalog:
|
||||
def __init__(self, basedir, snapnum, group_veldisp = False, masstab = False, long_ids = False, swap = False):
|
||||
self.filebase = basedir + "/groups_" + str(snapnum).zfill(3) + "/subhalo_tab_" + str(snapnum).zfill(3) + "."
|
||||
|
||||
#print
|
||||
#print "reading subfind catalog for snapshot",snapnum,"of",basedir
|
||||
|
||||
if long_ids: self.id_type = np.uint64
|
||||
else: self.id_type = np.uint32
|
||||
|
||||
self.group_veldisp = group_veldisp
|
||||
self.masstab = masstab
|
||||
|
||||
filenum = 0
|
||||
doneflag = False
|
||||
skip_gr = 0
|
||||
skip_sub = 0
|
||||
while not doneflag:
|
||||
curfile = self.filebase + str(filenum)
|
||||
|
||||
if (not os.path.exists(curfile)):
|
||||
print("file not found:", curfile)
|
||||
sys.exit()
|
||||
|
||||
f = open(curfile,'rb')
|
||||
|
||||
ngroups = np.fromfile(f, dtype=np.uint32, count=1)[0]
|
||||
totngroups = np.fromfile(f, dtype=np.uint32, count=1)[0]
|
||||
nids = np.fromfile(f, dtype=np.uint32, count=1)[0]
|
||||
totnids = np.fromfile(f, dtype=np.uint64, count=1)[0]
|
||||
ntask = np.fromfile(f, dtype=np.uint32, count=1)[0]
|
||||
nsubs = np.fromfile(f, dtype=np.uint32, count=1)[0]
|
||||
totnsubs = np.fromfile(f, dtype=np.uint32, count=1)[0]
|
||||
|
||||
if swap:
|
||||
ngroups = ngroups.byteswap()
|
||||
totngroups = totngroups.byteswap()
|
||||
nids = nids.byteswap()
|
||||
totnids = totnids.byteswap()
|
||||
ntask = ntask.byteswap()
|
||||
nsubs = nsubs.byteswap()
|
||||
totnsubs = totnsubs.byteswap()
|
||||
|
||||
if filenum == 0:
|
||||
self.ngroups = totngroups
|
||||
self.nids = totnids
|
||||
self.nfiles = ntask
|
||||
self.nsubs = totnsubs
|
||||
|
||||
self.group_len = np.empty(totngroups, dtype=np.uint32)
|
||||
self.group_offset = np.empty(totngroups, dtype=np.uint32)
|
||||
self.group_mass = np.empty(totngroups, dtype=np.float32)
|
||||
self.group_pos = np.empty(totngroups, dtype=np.dtype((np.float32,3)))
|
||||
self.group_m_mean200 = np.empty(totngroups, dtype=np.float32)
|
||||
self.group_r_mean200 = np.empty(totngroups, dtype=np.float32)
|
||||
self.group_m_crit200 = np.empty(totngroups, dtype=np.float32)
|
||||
self.group_r_crit200 = np.empty(totngroups, dtype=np.float32)
|
||||
self.group_m_tophat200 = np.empty(totngroups, dtype=np.float32)
|
||||
self.group_r_tophat200 = np.empty(totngroups, dtype=np.float32)
|
||||
if group_veldisp:
|
||||
self.group_veldisp_mean200 = np.empty(totngroups, dtype=np.float32)
|
||||
self.group_veldisp_crit200 = np.empty(totngroups, dtype=np.float32)
|
||||
self.group_veldisp_tophat200 = np.empty(totngroups, dtype=np.float32)
|
||||
self.group_contamination_count = np.empty(totngroups, dtype=np.uint32)
|
||||
self.group_contamination_mass = np.empty(totngroups, dtype=np.float32)
|
||||
self.group_nsubs = np.empty(totngroups, dtype=np.uint32)
|
||||
self.group_firstsub = np.empty(totngroups, dtype=np.uint32)
|
||||
|
||||
self.sub_len = np.empty(totnsubs, dtype=np.uint32)
|
||||
self.sub_offset = np.empty(totnsubs, dtype=np.uint32)
|
||||
self.sub_parent = np.empty(totnsubs, dtype=np.uint32)
|
||||
self.sub_mass = np.empty(totnsubs, dtype=np.float32)
|
||||
self.sub_pos = np.empty(totnsubs, dtype=np.dtype((np.float32,3)))
|
||||
self.sub_vel = np.empty(totnsubs, dtype=np.dtype((np.float32,3)))
|
||||
self.sub_cm = np.empty(totnsubs, dtype=np.dtype((np.float32,3)))
|
||||
self.sub_spin = np.empty(totnsubs, dtype=np.dtype((np.float32,3)))
|
||||
self.sub_veldisp = np.empty(totnsubs, dtype=np.float32)
|
||||
self.sub_vmax = np.empty(totnsubs, dtype=np.float32)
|
||||
self.sub_vmaxrad = np.empty(totnsubs, dtype=np.float32)
|
||||
self.sub_halfmassrad = np.empty(totnsubs, dtype=np.float32)
|
||||
self.sub_id_mostbound = np.empty(totnsubs, dtype=self.id_type)
|
||||
self.sub_grnr = np.empty(totnsubs, dtype=np.uint32)
|
||||
if masstab:
|
||||
self.sub_masstab = np.empty(totnsubs, dtype=np.dtype((np.float32,6)))
|
||||
|
||||
if ngroups > 0:
|
||||
locs = slice(skip_gr, skip_gr + ngroups)
|
||||
self.group_len[locs] = np.fromfile(f, dtype=np.uint32, count=ngroups)
|
||||
self.group_offset[locs] = np.fromfile(f, dtype=np.uint32, count=ngroups)
|
||||
self.group_mass[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
|
||||
self.group_pos[locs] = np.fromfile(f, dtype=np.dtype((np.float32,3)), count=ngroups)
|
||||
self.group_m_mean200[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
|
||||
self.group_r_mean200[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
|
||||
self.group_m_crit200[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
|
||||
self.group_r_crit200[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
|
||||
self.group_m_tophat200[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
|
||||
self.group_r_tophat200[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
|
||||
if group_veldisp:
|
||||
self.group_veldisp_mean200[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
|
||||
self.group_veldisp_crit200[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
|
||||
self.group_veldisp_tophat200[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
|
||||
self.group_contamination_count[locs] = np.fromfile(f, dtype=np.uint32, count=ngroups)
|
||||
self.group_contamination_mass[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
|
||||
self.group_nsubs[locs] = np.fromfile(f, dtype=np.uint32, count=ngroups)
|
||||
self.group_firstsub[locs] = np.fromfile(f, dtype=np.uint32, count=ngroups)
|
||||
skip_gr += ngroups
|
||||
|
||||
if nsubs > 0:
|
||||
locs = slice(skip_sub, skip_sub + nsubs)
|
||||
self.sub_len[locs] = np.fromfile(f, dtype=np.uint32, count=nsubs)
|
||||
self.sub_offset[locs] = np.fromfile(f, dtype=np.uint32, count=nsubs)
|
||||
self.sub_parent[locs] = np.fromfile(f, dtype=np.uint32, count=nsubs)
|
||||
self.sub_mass[locs] = np.fromfile(f, dtype=np.float32, count=nsubs)
|
||||
self.sub_pos[locs] = np.fromfile(f, dtype=np.dtype((np.float32,3)), count=nsubs)
|
||||
self.sub_vel[locs] = np.fromfile(f, dtype=np.dtype((np.float32,3)), count=nsubs)
|
||||
self.sub_cm[locs] = np.fromfile(f, dtype=np.dtype((np.float32,3)), count=nsubs)
|
||||
self.sub_spin[locs] = np.fromfile(f, dtype=np.dtype((np.float32,3)), count=nsubs)
|
||||
self.sub_veldisp[locs] = np.fromfile(f, dtype=np.float32, count=nsubs)
|
||||
self.sub_vmax[locs] = np.fromfile(f, dtype=np.float32, count=nsubs)
|
||||
self.sub_vmaxrad[locs] = np.fromfile(f, dtype=np.float32, count=nsubs)
|
||||
self.sub_halfmassrad[locs] = np.fromfile(f, dtype=np.float32, count=nsubs)
|
||||
self.sub_id_mostbound[locs] = np.fromfile(f, dtype=self.id_type, count=nsubs)
|
||||
self.sub_grnr[locs] = np.fromfile(f, dtype=np.uint32, count=nsubs)
|
||||
if masstab:
|
||||
self.sub_masstab[locs] = np.fromfile(f, dtype=np.dtype((np.float32,6)), count=nsubs)
|
||||
skip_sub += nsubs
|
||||
|
||||
curpos = f.tell()
|
||||
f.seek(0,os.SEEK_END)
|
||||
if curpos != f.tell(): print("Warning: finished reading before EOF for file",filenum)
|
||||
f.close()
|
||||
#print 'finished with file number',filenum,"of",ntask
|
||||
filenum += 1
|
||||
if filenum == self.nfiles: doneflag = True
|
||||
|
||||
if swap:
|
||||
self.group_len.byteswap(True)
|
||||
self.group_offset.byteswap(True)
|
||||
self.group_mass.byteswap(True)
|
||||
self.group_pos.byteswap(True)
|
||||
self.group_m_mean200.byteswap(True)
|
||||
self.group_r_mean200.byteswap(True)
|
||||
self.group_m_crit200.byteswap(True)
|
||||
self.group_r_crit200.byteswap(True)
|
||||
self.group_m_tophat200.byteswap(True)
|
||||
self.group_r_tophat200.byteswap(True)
|
||||
if group_veldisp:
|
||||
self.group_veldisp_mean200.byteswap(True)
|
||||
self.group_veldisp_crit200.byteswap(True)
|
||||
self.group_veldisp_tophat200.byteswap(True)
|
||||
self.group_contamination_count.byteswap(True)
|
||||
self.group_contamination_mass.byteswap(True)
|
||||
self.group_nsubs.byteswap(True)
|
||||
self.group_firstsub.byteswap(True)
|
||||
|
||||
self.sub_len.byteswap(True)
|
||||
self.sub_offset.byteswap(True)
|
||||
self.sub_parent.byteswap(True)
|
||||
self.sub_mass.byteswap(True)
|
||||
self.sub_pos.byteswap(True)
|
||||
self.sub_vel.byteswap(True)
|
||||
self.sub_cm.byteswap(True)
|
||||
self.sub_spin.byteswap(True)
|
||||
self.sub_veldisp.byteswap(True)
|
||||
self.sub_vmax.byteswap(True)
|
||||
self.sub_vmaxrad.byteswap(True)
|
||||
self.sub_halfmassrad.byteswap(True)
|
||||
self.sub_id_mostbound.byteswap(True)
|
||||
self.sub_grnr.byteswap(True)
|
||||
if masstab:
|
||||
self.sub_masstab.byteswap(True)
|
||||
|
||||
#print
|
||||
#print "number of groups =", self.ngroups
|
||||
#print "number of subgroups =", self.nsubs
|
||||
#if self.nsubs > 0:
|
||||
# print "largest group of length",self.group_len[0],"has",self.group_nsubs[0],"subhalos"
|
||||
# print
|
||||
|
||||
|
||||
|
||||
# code for reading Subfind's ID files
|
||||
# usage e.g.:
|
||||
#
|
||||
# import readsubf
|
||||
# ids = readsubf.subf_ids("./m_10002_h_94_501_z3_csf/", 0, 100)
|
||||
|
||||
|
||||
class subf_ids:
|
||||
def __init__(self, basedir, snapnum, substart, sublen, swap = False, verbose = False, long_ids = False, read_all = False):
|
||||
self.filebase = basedir + "/groups_" + str(snapnum).zfill(3) + "/subhalo_ids_" + str(snapnum).zfill(3) + "."
|
||||
|
||||
if (verbose):
|
||||
print()
|
||||
print("reading subhalo IDs for snapshot",snapnum,"of",basedir)
|
||||
|
||||
if long_ids: self.id_type = np.uint64
|
||||
else: self.id_type = np.uint32
|
||||
|
||||
|
||||
filenum = 0
|
||||
doneflag = False
|
||||
count=substart
|
||||
found=0
|
||||
|
||||
|
||||
while not doneflag:
|
||||
curfile = self.filebase + str(filenum)
|
||||
|
||||
if (not os.path.exists(curfile)):
|
||||
print("file not found:", curfile)
|
||||
sys.exit()
|
||||
|
||||
f = open(curfile,'rb')
|
||||
|
||||
Ngroups = np.fromfile(f, dtype=np.uint32, count=1)[0]
|
||||
TotNgroups = np.fromfile(f, dtype=np.uint32, count=1)[0]
|
||||
NIds = np.fromfile(f, dtype=np.uint32, count=1)[0]
|
||||
TotNids = np.fromfile(f, dtype=np.uint64, count=1)[0]
|
||||
NTask = np.fromfile(f, dtype=np.uint32, count=1)[0]
|
||||
Offset = np.fromfile(f, dtype=np.uint32, count=1)[0]
|
||||
|
||||
|
||||
if read_all:
|
||||
substart=0
|
||||
sublen=TotNids
|
||||
if swap:
|
||||
Ngroups = Ngroups.byteswap()
|
||||
TotNgroups = TotNgroups.byteswap()
|
||||
NIds = NIds.byteswap()
|
||||
TotNids = TotNids.byteswap()
|
||||
NTask = NTask.byteswap()
|
||||
Offset = Offset.byteswap()
|
||||
if filenum == 0:
|
||||
if (verbose):
|
||||
print("Ngroups = ", Ngroups)
|
||||
print("TotNgroups = ", Ngroups)
|
||||
print("NIds = ", NIds)
|
||||
print("TotNids = ", TotNids)
|
||||
print("NTask = ", NTask)
|
||||
print("Offset = ", Offset)
|
||||
self.nfiles = NTask
|
||||
self.SubLen=sublen
|
||||
self.SubIDs = np.empty(sublen, dtype=self.id_type)
|
||||
|
||||
|
||||
if count <= Offset+NIds:
|
||||
nskip = count - Offset
|
||||
nrem = Offset + NIds - count
|
||||
if sublen > nrem:
|
||||
n_to_read = nrem
|
||||
else:
|
||||
n_to_read = sublen
|
||||
if n_to_read > 0:
|
||||
if (verbose):
|
||||
print(filenum, n_to_read)
|
||||
if nskip > 0:
|
||||
dummy=np.fromfile(f, dtype=self.id_type, count=nskip)
|
||||
if (verbose):
|
||||
print(dummy)
|
||||
locs = slice(found, found + n_to_read)
|
||||
dummy2 = np.fromfile(f, dtype=self.id_type, count=n_to_read)
|
||||
if (verbose):
|
||||
print(dummy2)
|
||||
self.SubIDs[locs]=dummy2
|
||||
found += n_to_read
|
||||
count += n_to_read
|
||||
sublen -= n_to_read
|
||||
|
||||
f.close()
|
||||
filenum += 1
|
||||
if filenum == self.nfiles: doneflag = True
|
||||
|
||||
if swap:
|
||||
self.SubIDs.byteswap(True)
|
||||
|
||||
|
||||
|
|
2
python_tools/misc_util/figureOutMask.py
Executable file → Normal file
2
python_tools/misc_util/figureOutMask.py
Executable file → Normal file
|
@ -1,6 +1,6 @@
|
|||
#!/usr/bin/env python
|
||||
#+
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/setup.py
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/misc_util/figureOutMask.py
|
||||
# Copyright (C) 2010-2014 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2014 P. M. Sutter
|
||||
#
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#!/usr/bin/env python
|
||||
#+
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/pipeline_source/prepareCatalogs.in.py
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/pipeline_source/prepareInputs.in.py
|
||||
# Copyright (C) 2010-2014 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2014 P. M. Sutter
|
||||
#
|
||||
|
@ -18,12 +18,11 @@
|
|||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#+
|
||||
#+
|
||||
|
||||
import numpy as np
|
||||
import os
|
||||
import sys
|
||||
import void_python_tools as vp
|
||||
import vide as vp
|
||||
import argparse
|
||||
import imp
|
||||
import subprocess
|
||||
|
@ -59,9 +58,9 @@ parms = imp.load_source("name", defaultsFile)
|
|||
globals().update(vars(parms))
|
||||
|
||||
filename = args.parm
|
||||
print " Loading parameters from", filename
|
||||
print(" Loading parameters from", filename)
|
||||
if not os.access(filename, os.F_OK):
|
||||
print " Cannot find parameter file %s!" % filename
|
||||
print(" Cannot find parameter file %s!" % filename)
|
||||
exit(-1)
|
||||
parms = imp.load_source("name", filename)
|
||||
globals().update(vars(parms))
|
||||
|
@ -163,7 +162,7 @@ def writeScript(setName, dataFileNameBase, dataFormat,
|
|||
|
||||
header = """#!/usr/bin/env/python
|
||||
import os
|
||||
from void_python_tools.backend.classes import *
|
||||
from vide.backend.classes import *
|
||||
|
||||
continueRun = {continueRun} # set to True to enable restarting aborted jobs
|
||||
startCatalogStage = {startCatalogStage}
|
||||
|
@ -235,13 +234,13 @@ dataSampleList.append(newSample)
|
|||
# converter from redshift to comoving distance
|
||||
zVsDY = np.linspace(0., zBox+8*lbox*100./LIGHT_SPEED, 10000)
|
||||
zVsDX = np.zeros(len(zVsDY))
|
||||
for i in xrange(len(zVsDY)):
|
||||
for i in range(len(zVsDY)):
|
||||
zVsDX[i] = vp.angularDiameter(zVsDY[i], Om=Om)
|
||||
|
||||
boxWidthZ = np.interp(vp.angularDiameter(zBox,Om=Om)+100. / \
|
||||
LIGHT_SPEED*lbox, zVsDX, zVsDY)-zBox
|
||||
|
||||
for iSlice in xrange(numSlices):
|
||||
for iSlice in range(numSlices):
|
||||
|
||||
if useLightCone:
|
||||
dzSafe = 0.03
|
||||
|
@ -277,8 +276,8 @@ dataSampleList.append(newSample)
|
|||
else:
|
||||
dataFileName = dataFileNameBase + fileNum + suffix
|
||||
|
||||
for iX in xrange(numSubvolumes):
|
||||
for iY in xrange(numSubvolumes):
|
||||
for iX in range(numSubvolumes):
|
||||
for iY in range(numSubvolumes):
|
||||
|
||||
mySubvolume = "%d%d" % (iX, iY)
|
||||
|
||||
|
@ -322,7 +321,7 @@ if not os.access(catalogDir, os.F_OK): os.mkdir(catalogDir)
|
|||
baseResolution = float(numPart)/lbox/lbox/lbox # particles/Mpc^3
|
||||
prevSubSample = -1
|
||||
subSamples = sorted(subSamples, reverse=True)
|
||||
for iSubSample in xrange(len(subSamples)):
|
||||
for iSubSample in range(len(subSamples)):
|
||||
|
||||
subSampleList = subSamples[0:iSubSample+1]
|
||||
|
||||
|
@ -333,7 +332,7 @@ for iSubSample in xrange(len(subSamples)):
|
|||
elif subSampleMode == 'relative':
|
||||
keepFractionList.append(float(subSample))
|
||||
else:
|
||||
print "Unrecognized subSampleMode = ", subSampleMode
|
||||
print("Unrecognized subSampleMode = ", subSampleMode)
|
||||
exit(-1)
|
||||
thisSubSample = subSamples[iSubSample]
|
||||
maxKeep = keepFractionList[-1] * numPart
|
||||
|
@ -348,7 +347,7 @@ for iSubSample in xrange(len(subSamples)):
|
|||
fileNums[iRedshift]))
|
||||
|
||||
if args.script or args.all:
|
||||
print " Doing subsample", thisSubSample, "scripts"
|
||||
print(" Doing subsample", thisSubSample, "scripts")
|
||||
sys.stdout.flush()
|
||||
setName = prefix+"ss"+str(thisSubSample)
|
||||
|
||||
|
@ -397,11 +396,11 @@ for iSubSample in xrange(len(subSamples)):
|
|||
|
||||
|
||||
if (args.subsample or args.all) and doSubSamplingInPrep:
|
||||
print " Doing subsample", thisSubSample
|
||||
print(" Doing subsample", thisSubSample)
|
||||
sys.stdout.flush()
|
||||
|
||||
for (iRedshift, redshift) in enumerate(redshifts):
|
||||
print " redshift", redshift
|
||||
print(" redshift", redshift)
|
||||
sys.stdout.flush()
|
||||
|
||||
if dataFormat == "multidark" or dataFormat == "sdf":
|
||||
|
@ -500,7 +499,7 @@ for iSubSample in xrange(len(subSamples)):
|
|||
outFile.write("%s\n" %(redshift))
|
||||
outFile.write("%d\n" %(maxKeep))
|
||||
|
||||
for i in xrange(int(maxKeep)):
|
||||
for i in range(int(maxKeep)):
|
||||
x = np.random.uniform()*lbox
|
||||
y = np.random.uniform()*lbox
|
||||
z = np.random.uniform()*lbox
|
||||
|
@ -517,7 +516,7 @@ for iSubSample in xrange(len(subSamples)):
|
|||
if (args.script or args.all) and haloFileBase != "":
|
||||
|
||||
for minHaloMass in minHaloMasses:
|
||||
print " Doing halo script", minHaloMass
|
||||
print(" Doing halo script", minHaloMass)
|
||||
sys.stdout.flush()
|
||||
|
||||
# estimate number of halos to get density
|
||||
|
@ -580,15 +579,15 @@ if (args.script or args.all) and haloFileBase != "":
|
|||
dataFileNameList = fileList)
|
||||
|
||||
if (args.halos or args.all) and haloFileBase != "" and len(minHaloMasses) > 0:
|
||||
print " Doing halos - mass"
|
||||
print(" Doing halos - mass")
|
||||
sys.stdout.flush()
|
||||
|
||||
for minHaloMass in minHaloMasses:
|
||||
print " min halo mass = ", minHaloMass
|
||||
print(" min halo mass = ", minHaloMass)
|
||||
sys.stdout.flush()
|
||||
|
||||
for (iRedshift, redshift) in enumerate(redshifts):
|
||||
print " z = ", redshift
|
||||
print(" z = ", redshift)
|
||||
sys.stdout.flush()
|
||||
|
||||
if haloFileDummy == '':
|
||||
|
@ -671,7 +670,7 @@ if (args.halos or args.all) and haloFileBase != "" and len(minHaloMasses) > 0:
|
|||
if (args.script or args.all) and haloFileBase != "":
|
||||
|
||||
for haloDen in haloDenList:
|
||||
print " Doing halo script", haloDen
|
||||
print(" Doing halo script", haloDen)
|
||||
sys.stdout.flush()
|
||||
|
||||
# estimate number of halos to get density
|
||||
|
@ -710,15 +709,15 @@ if (args.script or args.all) and haloFileBase != "":
|
|||
dataFileNameList = fileList)
|
||||
|
||||
if (args.halos or args.all) and haloFileBase != "":
|
||||
print " Doing halos - density"
|
||||
print(" Doing halos - density")
|
||||
sys.stdout.flush()
|
||||
|
||||
for haloDen in haloDenList:
|
||||
print " halo density = ", haloDen
|
||||
print(" halo density = ", haloDen)
|
||||
sys.stdout.flush()
|
||||
|
||||
for (iRedshift, redshift) in enumerate(redshifts):
|
||||
print " z = ", redshift
|
||||
print(" z = ", redshift)
|
||||
sys.stdout.flush()
|
||||
|
||||
if haloFileDummy == '':
|
||||
|
@ -746,7 +745,7 @@ if (args.halos or args.all) and haloFileBase != "":
|
|||
|
||||
numPartExpect = int(np.ceil(haloDen * lbox**3))
|
||||
if numPart < numPartExpect:
|
||||
print " ERROR: not enough halos to support that density! Maximum is %g" % (1.*numPart / lbox**3)
|
||||
print(" ERROR: not enough halos to support that density! Maximum is %g" % (1.*numPart / lbox**3))
|
||||
exit(-1)
|
||||
|
||||
|
||||
|
@ -779,7 +778,7 @@ if (args.halos or args.all) and haloFileBase != "":
|
|||
actualDen = 1.*numPart / lbox**3
|
||||
keepFraction = haloDen / actualDen
|
||||
if numPart < numPartExpect:
|
||||
print " ERROR: not enough galaxies to support that density! Maximum is %g" % (1.*numPart / lbox**3)
|
||||
print(" ERROR: not enough galaxies to support that density! Maximum is %g" % (1.*numPart / lbox**3))
|
||||
exit(-1)
|
||||
|
||||
numKept = 0
|
||||
|
@ -861,7 +860,7 @@ root_filename {workDir}/hod_{sampleName}
|
|||
"""
|
||||
|
||||
if (args.script or args.all) and haloFileBase != "":
|
||||
print " Doing HOD scripts"
|
||||
print(" Doing HOD scripts")
|
||||
sys.stdout.flush()
|
||||
|
||||
for thisHod in hodParmList:
|
||||
|
@ -871,7 +870,7 @@ if (args.script or args.all) and haloFileBase != "":
|
|||
outFileName = sampleName+".dat"
|
||||
fileList.append(outFileName)
|
||||
|
||||
print " ", thisHod['name']
|
||||
print(" ", thisHod['name'])
|
||||
|
||||
# estimate number of halos to get density
|
||||
numPart = thisHod['galDensFinal'] * lbox**3
|
||||
|
@ -893,10 +892,10 @@ if (args.script or args.all) and haloFileBase != "":
|
|||
dataFileNameList = fileList)
|
||||
|
||||
if (args.hod or args.all) and haloFileBase != "":
|
||||
print " Doing HOD"
|
||||
print(" Doing HOD")
|
||||
sys.stdout.flush()
|
||||
for (iRedshift, redshift) in enumerate(redshifts):
|
||||
print " z = ", redshift
|
||||
print(" z = ", redshift)
|
||||
sys.stdout.flush()
|
||||
|
||||
if haloFileDummy == '':
|
||||
|
@ -918,7 +917,7 @@ if (args.hod or args.all) and haloFileBase != "":
|
|||
haloFile = outFile
|
||||
|
||||
for thisHod in hodParmList:
|
||||
print " ", thisHod['name']
|
||||
print(" ", thisHod['name'])
|
||||
sys.stdout.flush()
|
||||
|
||||
sampleName = getSampleName(prefix+"hod_"+thisHod['name'], redshift, False)
|
||||
|
@ -955,16 +954,16 @@ if (args.hod or args.all) and haloFileBase != "":
|
|||
hodWorked = False
|
||||
for line in open(tempFile):
|
||||
if "MLO" in line:
|
||||
print " (minimum halo mass = ", line.split()[1], ")"
|
||||
print(" (minimum halo mass = ", line.split()[1], ")")
|
||||
hodWorked = True
|
||||
break
|
||||
|
||||
if hodWorked:
|
||||
os.unlink(tempFile)
|
||||
else:
|
||||
print "HOD Failed! Log follows:"
|
||||
print("HOD Failed! Log follows:")
|
||||
for line in open(tempFile):
|
||||
print line
|
||||
print(line)
|
||||
exit(-1)
|
||||
|
||||
# now randomly subsample the galaxies to get desired density
|
||||
|
@ -977,7 +976,7 @@ if (args.hod or args.all) and haloFileBase != "":
|
|||
inFile.close()
|
||||
|
||||
if numPartActual < numPartExpect:
|
||||
print " ERROR: not enough galaxies to support that density! Maximum is %g" % (1.*numPartActual / lbox**3)
|
||||
print(" ERROR: not enough galaxies to support that density! Maximum is %g" % (1.*numPartActual / lbox**3))
|
||||
exit(-1)
|
||||
|
||||
actualDen = 1.*numPartActual / lbox**3
|
||||
|
@ -1015,4 +1014,4 @@ if (args.hod or args.all) and haloFileBase != "":
|
|||
|
||||
if dataFormat == "sdf": os.system("rm %s" % haloFile)
|
||||
|
||||
print " Done!"
|
||||
print(" Done!")
|
||||
|
|
|
@ -26,11 +26,11 @@ import os
|
|||
VOID_GSL=os.environ.get('VOID_GSL')
|
||||
|
||||
setup(
|
||||
name='void_python_tools',
|
||||
name='vide',
|
||||
version='1.0',
|
||||
cmdclass = {'build_ext': build_ext},
|
||||
include_dirs = [np.get_include()],
|
||||
packages=
|
||||
['void_python_tools','void_python_tools.backend','void_python_tools.apTools', 'void_python_tools.voidUtil',
|
||||
'void_python_tools.apTools.profiles','void_python_tools.apTools.chi2',],
|
||||
['vide','vide.backend','vide.apTools', 'vide.voidUtil',
|
||||
'vide.apTools.profiles','vide.apTools.chi2',],
|
||||
)
|
||||
|
|
22
python_tools/vide/__init__.py
Normal file
22
python_tools/vide/__init__.py
Normal file
|
@ -0,0 +1,22 @@
|
|||
#+
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/vide/__init__.py
|
||||
# Copyright (C) 2010-2014 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2014 P. M. Sutter
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#+
|
||||
from .backend import *
|
||||
from .apTools import *
|
||||
from .voidUtil import *
|
|
@ -1,5 +1,5 @@
|
|||
#+
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/void_python_tools/apTools/chi2/__init__.py
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/vide/apTools/__init__.py
|
||||
# Copyright (C) 2010-2014 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2014 P. M. Sutter
|
||||
#
|
||||
|
@ -17,4 +17,5 @@
|
|||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#+
|
||||
from cosmologyTools import *
|
||||
from .chi2 import *
|
||||
from .profiles import *
|
|
@ -1,5 +1,5 @@
|
|||
#+
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/void_python_tools/apTools/profiles/__init__.py
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/vide/apTools/chi2/__init__.py
|
||||
# Copyright (C) 2010-2014 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2014 P. M. Sutter
|
||||
#
|
||||
|
@ -17,4 +17,4 @@
|
|||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#+
|
||||
from getSurveyProps import *
|
||||
from .cosmologyTools import *
|
|
@ -1,5 +1,5 @@
|
|||
#+
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/void_python_tools/apTools/chi2/cosmologyTools.py
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/vide/apTools/chi2/cosmologyTools.py
|
||||
# Copyright (C) 2010-2014 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2014 P. M. Sutter
|
||||
#
|
||||
|
@ -22,7 +22,7 @@
|
|||
|
||||
import numpy as np
|
||||
import scipy.integrate as integrate
|
||||
from void_python_tools.backend import *
|
||||
from vide.backend import *
|
||||
|
||||
__all__=['expansion', 'angularDiameter', 'aveExpansion']
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
#+
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/void_python_tools/apTools/__init__.py
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/vide/apTools/profiles/__init__.py
|
||||
# Copyright (C) 2010-2014 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2014 P. M. Sutter
|
||||
#
|
||||
|
@ -17,5 +17,4 @@
|
|||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#+
|
||||
from chi2 import *
|
||||
from profiles import *
|
||||
from .getSurveyProps import *
|
|
@ -1,5 +1,5 @@
|
|||
#+
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/void_python_tools/apTools/profiles/getSurveyProps.py
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/vide/apTools/profiles/getSurveyProps.py
|
||||
# Copyright (C) 2010-2014 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2014 P. M. Sutter
|
||||
#
|
||||
|
@ -20,7 +20,7 @@
|
|||
import numpy as np
|
||||
import healpy as healpy
|
||||
import scipy.integrate
|
||||
import void_python_tools as ct
|
||||
import vide as ct
|
||||
import os
|
||||
|
||||
__all__=['getSurveyProps']
|
||||
|
@ -49,7 +49,7 @@ def getSurveyProps(maskFile, zmin, zmax, selFunMin, selFunMax, portion, selectio
|
|||
nbar = 1.0
|
||||
|
||||
elif not os.access(selectionFuncFile, os.F_OK):
|
||||
print " Warning, selection function file %s not found, using default of uniform selection." % selectionFuncFile
|
||||
print(" Warning, selection function file %s not found, using default of uniform selection." % selectionFuncFile)
|
||||
nbar = 1.0
|
||||
|
||||
else:
|
|
@ -1,5 +1,5 @@
|
|||
#+
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/void_python_tools/backend/__init__.py
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/vide/backend/__init__.py
|
||||
# Copyright (C) 2010-2014 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2014 P. M. Sutter
|
||||
#
|
||||
|
@ -17,5 +17,5 @@
|
|||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#+
|
||||
from classes import *
|
||||
from launchers import *
|
||||
from .classes import *
|
||||
from .launchers import *
|
|
@ -1,5 +1,5 @@
|
|||
#+
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/void_python_tools/backend/classes.py
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/vide/backend/classes.py
|
||||
# Copyright (C) 2010-2014 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2014 P. M. Sutter
|
||||
#
|
|
@ -1,12 +1,12 @@
|
|||
#+
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/void_python_tools/backend/launchers.py
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/vide/backend/launchers.py
|
||||
# Copyright (C) 2010-2014 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2014 P. M. Sutter
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
#
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
|
@ -19,12 +19,12 @@
|
|||
#+
|
||||
# -----------------------------------------------------------------------------
|
||||
# -----------------------------------------------------------------------------
|
||||
# routines which communicate with individual data analysis portions -
|
||||
# routines which communicate with individual data analysis portions -
|
||||
# they make the analyzeVoids.py script easier to read
|
||||
|
||||
import os
|
||||
import glob
|
||||
import classes
|
||||
from . import classes
|
||||
import numpy as np
|
||||
import numpy.ma as ma
|
||||
import os
|
||||
|
@ -34,9 +34,9 @@ import subprocess
|
|||
import sys
|
||||
from pylab import figure
|
||||
from netCDF4 import Dataset
|
||||
from void_python_tools.backend.classes import *
|
||||
from vide.backend.classes import *
|
||||
import pickle
|
||||
import void_python_tools.apTools as vp
|
||||
import vide.apTools as vp
|
||||
import scipy.interpolate as interpolate
|
||||
|
||||
NetCDFFile = Dataset
|
||||
|
@ -45,7 +45,7 @@ ncFloat = 'f8' # Double precision
|
|||
LIGHT_SPEED = 299792.458
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
def launchGenerate(sample, binPath, workDir=None, inputDataDir=None,
|
||||
def launchGenerate(sample, binPath, workDir=None, inputDataDir=None,
|
||||
zobovDir=None, figDir=None, logFile=None, useComoving=False,
|
||||
continueRun=None,regenerate=False):
|
||||
|
||||
|
@ -90,19 +90,19 @@ def launchGenerate(sample, binPath, workDir=None, inputDataDir=None,
|
|||
parmFile = os.getcwd()+"/generate_"+sample.fullName+".par"
|
||||
|
||||
if regenerate or not (continueRun and jobSuccessful(logFile, "Done!\n")):
|
||||
file(parmFile, mode="w").write(conf)
|
||||
arg1 = "--configFile=%s" % parmFile
|
||||
log = open(logFile, 'w')
|
||||
subprocess.call([binPath, arg1], stdout=log, stderr=log)
|
||||
log.close()
|
||||
with open(parmFile, mode="wt") as f:
|
||||
f.write(conf)
|
||||
arg1 = "--configFile=%s" % parmFile
|
||||
with open(logFile, 'wt') as log:
|
||||
subprocess.call([binPath, arg1], stdout=log, stderr=log)
|
||||
if jobSuccessful(logFile, "Done!\n"):
|
||||
print "done"
|
||||
print("done")
|
||||
else:
|
||||
print "FAILED!"
|
||||
print("FAILED!")
|
||||
exit(-1)
|
||||
|
||||
else:
|
||||
print "already done!"
|
||||
print("already done!")
|
||||
|
||||
if os.access(parmFile, os.F_OK): os.unlink(parmFile)
|
||||
|
||||
|
@ -131,8 +131,8 @@ def launchGenerate(sample, binPath, workDir=None, inputDataDir=None,
|
|||
# check if the final subsampling is done
|
||||
lastSample = sample.subsample.split(', ')[-1]
|
||||
doneLine = "Done! %5.2e\n" % float(lastSample)
|
||||
if (continueRun and jobSuccessful(logFile, doneLine)):
|
||||
print "already done!"
|
||||
if (continueRun and jobSuccessful(logFile, doneLine)):
|
||||
print("already done!")
|
||||
return
|
||||
|
||||
prevSubSample = -1
|
||||
|
@ -177,7 +177,7 @@ def launchGenerate(sample, binPath, workDir=None, inputDataDir=None,
|
|||
dataFileLine = "ramses " + datafile + "/"
|
||||
else:
|
||||
raise ValueError("unknown dataFormat '%s'" % sample.dataFormat)
|
||||
|
||||
|
||||
iX = float(sample.mySubvolume[0])
|
||||
iY = float(sample.mySubvolume[1])
|
||||
|
||||
|
@ -212,7 +212,7 @@ def launchGenerate(sample, binPath, workDir=None, inputDataDir=None,
|
|||
%s
|
||||
%s
|
||||
%s
|
||||
""" % (dataFileLine,
|
||||
""" % (dataFileLine,
|
||||
outputFile,
|
||||
outputFile+".par",
|
||||
includePecVelString,
|
||||
|
@ -225,7 +225,8 @@ def launchGenerate(sample, binPath, workDir=None, inputDataDir=None,
|
|||
|
||||
parmFile = os.getcwd()+"/generate_"+sample.fullName+".par"
|
||||
|
||||
file(parmFile, mode="w").write(conf)
|
||||
with open(parmFile, mode="wt") as f:
|
||||
f.write(conf)
|
||||
|
||||
if (prevSubSample == -1):
|
||||
cmd = "%s --configFile=%s" % (binPath,parmFile)
|
||||
|
@ -233,12 +234,12 @@ def launchGenerate(sample, binPath, workDir=None, inputDataDir=None,
|
|||
else:
|
||||
cmd = "%s --configFile=%s" % (binPath,parmFile)
|
||||
log = open(logFile, 'a')
|
||||
arg1 = "--configFile=%s" % parmFile
|
||||
arg1 = "--configFile=%s" % parmFile
|
||||
|
||||
|
||||
subprocess.call(cmd, stdout=log, stderr=log, shell=True)
|
||||
log.close()
|
||||
|
||||
|
||||
# remove intermediate files
|
||||
if (prevSubSample != -1):
|
||||
os.unlink(zobovDir+"/zobov_slice_"+sampleName+"_ss"+prevSubSample+".par")
|
||||
|
@ -246,14 +247,14 @@ def launchGenerate(sample, binPath, workDir=None, inputDataDir=None,
|
|||
|
||||
doneLine = "Done! %5.2e\n" % keepFraction
|
||||
if not jobSuccessful(logFile, doneLine):
|
||||
print "FAILED!" ### dies here for now
|
||||
print("FAILED!") ### dies here for now
|
||||
exit(-1)
|
||||
|
||||
prevSubSample = thisSubSample
|
||||
|
||||
if jobSuccessful(logFile, doneLine): print "done"
|
||||
|
||||
# place the final subsample
|
||||
if jobSuccessful(logFile, doneLine): print("done")
|
||||
|
||||
# place the final subsample
|
||||
os.system("mv %s %s" % (zobovDir+"/zobov_slice_"+sampleName+"_ss"+\
|
||||
prevSubSample, zobovDir+"/zobov_slice_"+sampleName))
|
||||
os.system("mv %s %s" % (zobovDir+"/zobov_slice_"+sampleName+"_ss"+\
|
||||
|
@ -273,7 +274,7 @@ def launchGenerate(sample, binPath, workDir=None, inputDataDir=None,
|
|||
# add to sample info file
|
||||
if sample.dataType == "observation":
|
||||
(boxVol, nbar) = vp.getSurveyProps(sample.maskFile, sample.zRange[0],
|
||||
sample.zRange[1], sample.zRange[0], sample.zRange[1], "all",
|
||||
sample.zRange[1], sample.zRange[0], sample.zRange[1], "all",
|
||||
useComoving=useComoving)
|
||||
else:
|
||||
iX = float(sample.mySubvolume[0])
|
||||
|
@ -290,11 +291,11 @@ def launchGenerate(sample, binPath, workDir=None, inputDataDir=None,
|
|||
|
||||
numTracers = int(open(zobovDir+"/mask_index.txt", "r").read())
|
||||
numTotal = int(open(zobovDir+"/total_particles.txt", "r").read())
|
||||
|
||||
meanSep = (1.*numTracers/boxVol/nbar)**(-1/3.)
|
||||
|
||||
meanSep = (1.*numTracers/boxVol/nbar)**(-1/3.)
|
||||
|
||||
# save this sample's information
|
||||
with open(zobovDir+"/sample_info.dat", 'w') as output:
|
||||
with open(zobovDir+"/sample_info.dat", mode='wb') as output:
|
||||
pickle.dump(sample, output, pickle.HIGHEST_PROTOCOL)
|
||||
|
||||
fp = open(zobovDir+"/sample_info.txt", 'w')
|
||||
|
@ -319,7 +320,7 @@ def launchGenerate(sample, binPath, workDir=None, inputDataDir=None,
|
|||
fp.write("Estimated mean tracer separation (Mpc/h): %g\n" % meanSep)
|
||||
fp.write("Minimum void size actually used (Mpc/h): %g\n" % sample.minVoidRadius)
|
||||
fp.close()
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
def launchZobov(sample, binPath, zobovDir=None, logDir=None, continueRun=None,
|
||||
numZobovDivisions=None, numZobovThreads=None):
|
||||
|
@ -343,7 +344,7 @@ def launchZobov(sample, binPath, zobovDir=None, logDir=None, continueRun=None,
|
|||
maskIndex = -1
|
||||
maxDen = 0.2
|
||||
if numZobovDivisions == 1:
|
||||
print " WARNING! You are using a single ZOBOV division with a simulation. Periodic boundaries will not be respected!"
|
||||
print(" WARNING! You are using a single ZOBOV division with a simulation. Periodic boundaries will not be respected!")
|
||||
|
||||
if not (continueRun and jobSuccessful(logFile, "Done!\n")):
|
||||
for fileName in glob.glob(zobovDir+"/part._"+sampleName+".*"):
|
||||
|
@ -373,49 +374,47 @@ def launchZobov(sample, binPath, zobovDir=None, logDir=None, continueRun=None,
|
|||
|
||||
# load volumes
|
||||
volFile = zobovDir+"/vol_"+sampleName+".dat"
|
||||
File = file(volFile)
|
||||
numPartTot = np.fromfile(File, dtype=np.int32,count=1)
|
||||
vols = np.fromfile(File, dtype=np.float32,count=numPartTot)
|
||||
File.close()
|
||||
with open(volFile, mode="rb") as File:
|
||||
numPartTot = np.fromfile(File, dtype=np.int32,count=1)
|
||||
vols = np.fromfile(File, dtype=np.float32,count=numPartTot)
|
||||
|
||||
# load redshifts
|
||||
partFile = zobovDir+"/zobov_slice_"+sample.fullName
|
||||
File = file(partFile)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
Np = np.fromfile(File, dtype=np.int32,count=1)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
with open(partFile, mode="rb") as File:
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
Np = np.fromfile(File, dtype=np.int32,count=1)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
|
||||
# x
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
redshifts = np.fromfile(File, dtype=np.float32,count=Np)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
# x
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
redshifts = np.fromfile(File, dtype=np.float32,count=Np)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
|
||||
# y
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
redshifts = np.fromfile(File, dtype=np.float32,count=Np)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
# y
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
redshifts = np.fromfile(File, dtype=np.float32,count=Np)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
|
||||
# z
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
redshifts = np.fromfile(File, dtype=np.float32,count=Np)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
# z
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
redshifts = np.fromfile(File, dtype=np.float32,count=Np)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
|
||||
# RA
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
redshifts = np.fromfile(File, dtype=np.float32,count=Np)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
# RA
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
redshifts = np.fromfile(File, dtype=np.float32,count=Np)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
|
||||
# Dec
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
redshifts = np.fromfile(File, dtype=np.float32,count=Np)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
# Dec
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
redshifts = np.fromfile(File, dtype=np.float32,count=Np)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
|
||||
# z
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
redshifts = np.fromfile(File, dtype=np.float32,count=Np)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
|
||||
# z
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
redshifts = np.fromfile(File, dtype=np.float32,count=Np)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
File.close()
|
||||
|
||||
# build selection function interpolation
|
||||
selfuncData = np.genfromtxt(sample.selFunFile)
|
||||
selfunc = interpolate.interp1d(selfuncData[:,0], selfuncData[:,1],
|
||||
|
@ -424,20 +423,20 @@ def launchZobov(sample, binPath, zobovDir=None, logDir=None, continueRun=None,
|
|||
# re-weight and write
|
||||
## TEST
|
||||
#redshifts /= 10000.
|
||||
for i in xrange(len(vols)):
|
||||
for i in range(len(vols)):
|
||||
vols[i] *= selfunc(redshifts[i])
|
||||
|
||||
volFile = zobovDir+"/vol_weighted_"+sampleName+".dat"
|
||||
File = file(volFile, 'w')
|
||||
numPartTot.astype(np.int32).tofile(File)
|
||||
vols.astype(np.float32).tofile(File)
|
||||
with open(volFile, mode='wb') as File:
|
||||
numPartTot.astype(np.int32).tofile(File)
|
||||
vols.astype(np.float32).tofile(File)
|
||||
|
||||
volFileToUse = zobovDir+"/vol_weighted_"+sampleName+".dat"
|
||||
else:
|
||||
volFileToUse = zobovDir+"/vol_"+sampleName+".dat"
|
||||
|
||||
|
||||
cmd = [binPath+"../c_tools/zobov2/jozov2/jozov2", \
|
||||
cmd = [binPath+"/jozov2", \
|
||||
zobovDir+"/adj_"+sampleName+".dat", \
|
||||
volFileToUse, \
|
||||
zobovDir+"/voidPart_"+sampleName+".dat", \
|
||||
|
@ -453,20 +452,20 @@ def launchZobov(sample, binPath, zobovDir=None, logDir=None, continueRun=None,
|
|||
os.unlink(fileName)
|
||||
|
||||
if jobSuccessful(logFile, "Done!\n"):
|
||||
print "done"
|
||||
print("done")
|
||||
else:
|
||||
print "FAILED!"
|
||||
print("FAILED!")
|
||||
exit(-1)
|
||||
|
||||
else:
|
||||
print "already done!"
|
||||
print("already done!")
|
||||
|
||||
if os.access(vozScript, os.F_OK):
|
||||
os.unlink(vozScript)
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
def launchPrune(sample, binPath,
|
||||
summaryFile=None, logFile=None, zobovDir=None,
|
||||
def launchPrune(sample, binPath,
|
||||
summaryFile=None, logFile=None, zobovDir=None,
|
||||
continueRun=None, useComoving=False):
|
||||
|
||||
sampleName = sample.fullName
|
||||
|
@ -500,7 +499,7 @@ def launchPrune(sample, binPath,
|
|||
minRadius = float(line.split()[5])
|
||||
break
|
||||
if minRadius == -1:
|
||||
print "Could not grab mean tracer separation!?"
|
||||
print("Could not grab mean tracer separation!?")
|
||||
exit(-1)
|
||||
else:
|
||||
minRadius = sample.minVoidRadius
|
||||
|
@ -530,25 +529,26 @@ def launchPrune(sample, binPath,
|
|||
cmd += " --outputDir=" + zobovDir
|
||||
cmd += " --sampleName=" + str(sampleName)
|
||||
log = open(logFile, 'w')
|
||||
log.write(f"Command is {cmd}\n")
|
||||
subprocess.call(cmd, stdout=log, stderr=log, shell=True)
|
||||
log.close()
|
||||
|
||||
if jobSuccessful(logFile, "NetCDF: Not a valid ID\n") or \
|
||||
jobSuccessful(logFile, "Done!\n"):
|
||||
print "done"
|
||||
print("done")
|
||||
else:
|
||||
print "FAILED!"
|
||||
print("FAILED!")
|
||||
#exit(-1)
|
||||
|
||||
else:
|
||||
print "already done!"
|
||||
print("already done!")
|
||||
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
def launchVoidOverlap(sample1, sample2, sample1Dir, sample2Dir,
|
||||
binPath, thisDataPortion=None,
|
||||
logFile=None,
|
||||
continueRun=None, outputFile=None,
|
||||
def launchVoidOverlap(sample1, sample2, sample1Dir, sample2Dir,
|
||||
binPath, thisDataPortion=None,
|
||||
logFile=None,
|
||||
continueRun=None, outputFile=None,
|
||||
overlapFrac=0.25,
|
||||
matchMethod=None, strictMatch=False):
|
||||
|
||||
|
@ -618,22 +618,22 @@ def launchVoidOverlap(sample1, sample2, sample1Dir, sample2Dir,
|
|||
log.close()
|
||||
|
||||
#if jobSuccessful(logFile, "Done!\n"):
|
||||
print "done"
|
||||
print("done")
|
||||
#else:
|
||||
# print "FAILED!"
|
||||
# exit(-1)
|
||||
|
||||
else:
|
||||
print "already done!"
|
||||
print("already done!")
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
def launchVelocityStack(sample, stack, binPath,
|
||||
def launchVelocityStack(sample, stack, binPath,
|
||||
velField_file,
|
||||
thisDataPortion=None, logDir=None,
|
||||
voidDir=None, runSuffix=None,
|
||||
zobovDir=None,
|
||||
summaryFile=None,
|
||||
summaryFile=None,
|
||||
continueRun=None, dataType=None, prefixRun=""):
|
||||
|
||||
sampleName = sample.fullName
|
||||
|
@ -644,7 +644,7 @@ def launchVelocityStack(sample, stack, binPath,
|
|||
logFile = logDir+("/%svelocity_stack_"%prefixRun)+sampleName+"_"+runSuffix+".out"
|
||||
|
||||
voidCenters=voidDir+"/centers.txt"
|
||||
# Rmax =
|
||||
# Rmax =
|
||||
|
||||
centralRadius = stack.rMin * 0.25
|
||||
Rextracut = stack.rMax*3 + 1
|
||||
|
@ -660,10 +660,10 @@ def launchVelocityStack(sample, stack, binPath,
|
|||
subprocess.call(cmd, stdout=log, stderr=log, shell=True)
|
||||
log.close()
|
||||
if jobSuccessful(logFile, "Done!\n"):
|
||||
print "done"
|
||||
print("done")
|
||||
else:
|
||||
print "FAILED!"
|
||||
print("FAILED!")
|
||||
exit(-1)
|
||||
|
||||
|
||||
else:
|
||||
print "already done!"
|
||||
print("already done!")
|
|
@ -1,5 +1,5 @@
|
|||
#+
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/void_python_tools/__init__.py
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/vide/voidUtil/__init__.py
|
||||
# Copyright (C) 2010-2014 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2014 P. M. Sutter
|
||||
#
|
||||
|
@ -17,6 +17,10 @@
|
|||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#+
|
||||
from void_python_tools.backend import *
|
||||
from void_python_tools.apTools import *
|
||||
from void_python_tools.voidUtil import *
|
||||
|
||||
from .catalogUtil import *
|
||||
from .plotDefs import *
|
||||
from .plotUtil import *
|
||||
from .matchUtil import *
|
||||
from .xcorUtil import *
|
||||
from .profileUtil import *
|
|
@ -1,12 +1,12 @@
|
|||
#+
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/void_python_tools/partUtil/partUtil.py
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/vide/voidUtil/catalogUtil.py
|
||||
# Copyright (C) 2010-2014 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2014 P. M. Sutter
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
#
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
|
@ -23,10 +23,10 @@
|
|||
import numpy as np
|
||||
from netCDF4 import Dataset
|
||||
import sys
|
||||
from void_python_tools.backend import *
|
||||
import void_python_tools.apTools as vp
|
||||
from vide.backend import *
|
||||
import vide.apTools as vp
|
||||
import pickle
|
||||
from periodic_kdtree import PeriodicCKDTree
|
||||
from .periodic_kdtree import PeriodicCKDTree
|
||||
import os
|
||||
|
||||
NetCDFFile = Dataset
|
||||
|
@ -34,7 +34,7 @@ ncFloat = 'f8'
|
|||
|
||||
# -----------------------------------------------------------------------------
|
||||
def loadPart(sampleDir):
|
||||
print " Loading particle data..."
|
||||
print(" Loading particle data...")
|
||||
sys.stdout.flush()
|
||||
|
||||
with open(sampleDir+"/sample_info.dat", 'rb') as input:
|
||||
|
@ -59,50 +59,47 @@ def loadPart(sampleDir):
|
|||
iLine = 0
|
||||
partData = []
|
||||
part = np.zeros((3))
|
||||
File = file(partFile)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
Np = np.fromfile(File, dtype=np.int32,count=1)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
with open(partFile, mode="rb") as File:
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
Np = np.fromfile(File, dtype=np.int32,count=1)[0]
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
x = np.fromfile(File, dtype=np.float32,count=Np)
|
||||
x *= mul[0]
|
||||
if isObservation != 1:
|
||||
x += ranges[0][0]
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
x = np.fromfile(File, dtype=np.float32,count=Np)
|
||||
x *= mul[0]
|
||||
if isObservation != 1:
|
||||
x += ranges[0][0]
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
y = np.fromfile(File, dtype=np.float32,count=Np)
|
||||
y *= mul[1]
|
||||
if isObservation != 1:
|
||||
y += ranges[1][0]
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
y = np.fromfile(File, dtype=np.float32,count=Np)
|
||||
y *= mul[1]
|
||||
if isObservation != 1:
|
||||
y += ranges[1][0]
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
z = np.fromfile(File, dtype=np.float32,count=Np)
|
||||
z *= mul[2]
|
||||
if isObservation != 1:
|
||||
z += ranges[2][0]
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
z = np.fromfile(File, dtype=np.float32,count=Np)
|
||||
z *= mul[2]
|
||||
if isObservation != 1:
|
||||
z += ranges[2][0]
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
RA = np.fromfile(File, dtype=np.float32,count=Np)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
RA = np.fromfile(File, dtype=np.float32,count=Np)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
Dec = np.fromfile(File, dtype=np.float32,count=Np)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
Dec = np.fromfile(File, dtype=np.float32,count=Np)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
redshift = np.fromfile(File, dtype=np.float32,count=Np)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
uniqueID = np.fromfile(File, dtype=np.int64,count=Np)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
|
||||
File.close()
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
redshift = np.fromfile(File, dtype=np.float32,count=Np)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
uniqueID = np.fromfile(File, dtype=np.int64,count=Np)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
|
||||
if isObservation == 1:
|
||||
x = x[0:maskIndex]# * 100/300000
|
||||
|
@ -127,15 +124,15 @@ def loadPart(sampleDir):
|
|||
# maskFile = sampleDir+"/"+os.path.basename(sample.maskFile)
|
||||
# print "Using maskfile found in:", maskFile
|
||||
# props = vp.getSurveyProps(maskFile, sample.zBoundary[0],
|
||||
# sample.zBoundary[1],
|
||||
# sample.zBoundary[0],
|
||||
# sample.zBoundary[1],
|
||||
# sample.zBoundary[0],
|
||||
# sample.zBoundary[1], "all",
|
||||
# selectionFuncFile=sample.selFunFile,
|
||||
# useComoving=sample.useComoving)
|
||||
# boxVol = props[0]
|
||||
# volNorm = maskIndex/boxVol
|
||||
#else:
|
||||
boxVol = np.prod(boxLen)
|
||||
boxVol = np.prod(boxLen)
|
||||
volNorm = len(x)/boxVol
|
||||
|
||||
isObservationData = isObservation == 1
|
||||
|
@ -163,10 +160,9 @@ def getVolNorm(sampleDir):
|
|||
mul[:] = ranges[:,1] - ranges[:,0]
|
||||
|
||||
partFile = sampleDir+"/zobov_slice_"+sample.fullName
|
||||
File = file(partFile)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
Np = np.fromfile(File, dtype=np.int32,count=1)
|
||||
File.close()
|
||||
with open(partFile, mode="rb") as File:
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
Np = np.fromfile(File, dtype=np.int32,count=1)[0]
|
||||
|
||||
boxLen = mul
|
||||
|
||||
|
@ -178,15 +174,15 @@ def getVolNorm(sampleDir):
|
|||
# maskFile = sampleDir+"/"+os.path.basename(sample.maskFile)
|
||||
# print "Using maskfile found in:", maskFile
|
||||
# props = vp.getSurveyProps(maskFile, sample.zBoundary[0],
|
||||
# sample.zBoundary[1],
|
||||
# sample.zBoundary[0],
|
||||
# sample.zBoundary[1],
|
||||
# sample.zBoundary[0],
|
||||
# sample.zBoundary[1], "all",
|
||||
# selectionFuncFile=sample.selFunFile,
|
||||
# useComoving=sample.useComoving)
|
||||
# boxVol = props[0]
|
||||
# volNorm = maskIndex/boxVol
|
||||
#else:
|
||||
boxVol = np.prod(boxLen)
|
||||
boxVol = np.prod(boxLen)
|
||||
volNorm = Np/boxVol
|
||||
|
||||
return volNorm
|
||||
|
@ -205,9 +201,9 @@ def loadPartVel(sampleDir):
|
|||
isObservation = getattr(File, 'is_observation')
|
||||
|
||||
if isObservation:
|
||||
print "No velocities for observations!"
|
||||
print("No velocities for observations!")
|
||||
return -1
|
||||
|
||||
|
||||
vx = File.variables['vel_x'][0:]
|
||||
vy = File.variables['vel_y'][0:]
|
||||
vz = File.variables['vel_z'][0:]
|
||||
|
@ -250,7 +246,7 @@ def shiftPart(inPart, center, periodicLine, ranges):
|
|||
boxLen = np.zeros((3))
|
||||
boxLen[0] = ranges[0][1] - ranges[0][0]
|
||||
boxLen[1] = ranges[1][1] - ranges[1][0]
|
||||
boxLen[2] = ranges[2][1] - ranges[2][0]
|
||||
boxLen[2] = ranges[2][1] - ranges[2][0]
|
||||
|
||||
# shift to box coordinates
|
||||
part[:,0] -= ranges[0][0]
|
||||
|
@ -317,7 +313,7 @@ def loadVoidCatalog(sampleDir, dataPortion="central", loadParticles=True,
|
|||
sample = pickle.load(input)
|
||||
catalog.sampleInfo = sample
|
||||
|
||||
print "Loading info..."
|
||||
print("Loading info...")
|
||||
infoFile = sampleDir+"/zobov_slice_"+sample.fullName+".par"
|
||||
File = NetCDFFile(infoFile, 'r')
|
||||
ranges = np.zeros((3,2))
|
||||
|
@ -341,7 +337,7 @@ def loadVoidCatalog(sampleDir, dataPortion="central", loadParticles=True,
|
|||
else:
|
||||
prefix = ""
|
||||
|
||||
print "Loading voids..."
|
||||
print("Loading voids...")
|
||||
fileName = sampleDir+"/"+prefix+"voidDesc_"+dataPortion+"_"+sample.fullName+".out"
|
||||
catData = np.loadtxt(fileName, comments="#", skiprows=2)
|
||||
catalog.voids = []
|
||||
|
@ -372,9 +368,9 @@ def loadVoidCatalog(sampleDir, dataPortion="central", loadParticles=True,
|
|||
))
|
||||
|
||||
catalog.numVoids = len(catalog.voids)
|
||||
print "Read %d voids" % catalog.numVoids
|
||||
print("Read %d voids" % catalog.numVoids)
|
||||
|
||||
print "Loading macrocenters..."
|
||||
print("Loading macrocenters...")
|
||||
iLine = 0
|
||||
for line in open(sampleDir+"/"+prefix+"macrocenters_"+dataPortion+"_"+sample.fullName+".out"):
|
||||
line = line.split()
|
||||
|
@ -392,7 +388,7 @@ def loadVoidCatalog(sampleDir, dataPortion="central", loadParticles=True,
|
|||
iLine += 1
|
||||
|
||||
|
||||
print "Loading derived void information..."
|
||||
print("Loading derived void information...")
|
||||
fileName = sampleDir+"/"+prefix+"centers_"+dataPortion+"_"+sample.fullName+".out"
|
||||
catData = np.loadtxt(fileName, comments="#")
|
||||
for (iLine,line) in enumerate(catData):
|
||||
|
@ -429,14 +425,14 @@ def loadVoidCatalog(sampleDir, dataPortion="central", loadParticles=True,
|
|||
iLine += 1
|
||||
|
||||
if loadParticles:
|
||||
print "Loading all particles..."
|
||||
print("Loading all particles...")
|
||||
partData, boxLen, volNorm, isObservationData, ranges, extraData = loadPart(sampleDir)
|
||||
numPartTot = len(partData)
|
||||
catalog.numPartTot = numPartTot
|
||||
catalog.partPos = partData
|
||||
catalog.part = []
|
||||
for i in xrange(len(partData)):
|
||||
catalog.part.append(Bunch(x = partData[i][0],
|
||||
for i in range(len(partData)):
|
||||
catalog.part.append(Bunch(x = partData[i][0],
|
||||
y = partData[i][1],
|
||||
z = partData[i][2],
|
||||
volume = 0,
|
||||
|
@ -444,79 +440,78 @@ def loadVoidCatalog(sampleDir, dataPortion="central", loadParticles=True,
|
|||
dec = extraData[i][1],
|
||||
redshift = extraData[i][2],
|
||||
uniqueID = extraData[i][3]))
|
||||
|
||||
|
||||
print "Loading volumes..."
|
||||
|
||||
|
||||
print("Loading volumes...")
|
||||
volFile = sampleDir+"/vol_"+sample.fullName+".dat"
|
||||
File = file(volFile)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
vols = np.fromfile(File, dtype=np.float32,count=numPartTot)
|
||||
for ivol in xrange(len(vols)):
|
||||
with open(volFile, mode="rb") as File:
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
vols = np.fromfile(File, dtype=np.float32,count=numPartTot)
|
||||
for ivol in range(len(vols)):
|
||||
catalog.part[ivol].volume = vols[ivol] / volNorm
|
||||
|
||||
print "Loading zone-void membership info..."
|
||||
print("Loading zone-void membership info...")
|
||||
zoneFile = sampleDir+"/voidZone_"+sample.fullName+".dat"
|
||||
catalog.void2Zones = []
|
||||
File = file(zoneFile)
|
||||
numZonesTot = np.fromfile(File, dtype=np.int32,count=1)
|
||||
catalog.numZonesTot = numZonesTot
|
||||
for iZ in xrange(numZonesTot):
|
||||
numZones = np.fromfile(File, dtype=np.int32,count=1)
|
||||
catalog.void2Zones.append(Bunch(numZones = numZones,
|
||||
zoneIDs = []))
|
||||
with open(zoneFile, mode="rb") as File:
|
||||
numZonesTot = np.fromfile(File, dtype=np.int32,count=1)[0]
|
||||
catalog.numZonesTot = numZonesTot
|
||||
for iZ in range(numZonesTot):
|
||||
numZones = np.fromfile(File, dtype=np.int32,count=1)[0]
|
||||
catalog.void2Zones.append(Bunch(numZones = numZones,
|
||||
zoneIDs = []))
|
||||
|
||||
for p in xrange(numZones):
|
||||
zoneID = np.fromfile(File, dtype=np.int32,count=1)[0]
|
||||
catalog.void2Zones[iZ].zoneIDs.append(zoneID)
|
||||
for p in range(numZones):
|
||||
zoneID = np.fromfile(File, dtype=np.int32,count=1)[0]
|
||||
catalog.void2Zones[iZ].zoneIDs.append(zoneID)
|
||||
|
||||
|
||||
print "Loading particle-zone membership info..."
|
||||
print("Loading particle-zone membership info...")
|
||||
zonePartFile = sampleDir+"/voidPart_"+sample.fullName+".dat"
|
||||
catalog.zones2Parts = []
|
||||
File = file(zonePartFile)
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
numZonesTot = np.fromfile(File, dtype=np.int32,count=1)
|
||||
for iZ in xrange(numZonesTot):
|
||||
numPart = np.fromfile(File, dtype=np.int32,count=1)
|
||||
catalog.zones2Parts.append(Bunch(numPart = numPart,
|
||||
partIDs = []))
|
||||
with open(zonePartFile) as File:
|
||||
chk = np.fromfile(File, dtype=np.int32,count=1)
|
||||
numZonesTot = np.fromfile(File, dtype=np.int32,count=1)[0]
|
||||
for iZ in range(numZonesTot):
|
||||
numPart = np.fromfile(File, dtype=np.int32,count=1)[0]
|
||||
catalog.zones2Parts.append(Bunch(numPart = numPart,
|
||||
partIDs = []))
|
||||
|
||||
for p in xrange(numPart):
|
||||
partID = np.fromfile(File, dtype=np.int32,count=1)[0]
|
||||
catalog.zones2Parts[iZ].partIDs.append(partID)
|
||||
for p in range(numPart):
|
||||
partID = np.fromfile(File, dtype=np.int32,count=1)[0]
|
||||
catalog.zones2Parts[iZ].partIDs.append(partID)
|
||||
|
||||
return catalog
|
||||
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
def getArray(objectList, attr):
|
||||
|
||||
if hasattr(objectList[0], attr):
|
||||
ndim = np.shape( np.atleast_1d( getattr(objectList[0], attr) ) )[0]
|
||||
attrArr = np.zeros(( len(objectList), ndim ))
|
||||
|
||||
for idim in xrange(ndim):
|
||||
|
||||
for idim in range(ndim):
|
||||
attrArr[:,idim] = np.fromiter((np.atleast_1d(getattr(v, attr))[idim] \
|
||||
for v in objectList), float )
|
||||
|
||||
|
||||
if ndim == 1: attrArr = attrArr[:,0]
|
||||
|
||||
return attrArr
|
||||
else:
|
||||
print " Attribute", attr, "not found!"
|
||||
print(" Attribute", attr, "not found!")
|
||||
return -1
|
||||
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
def getVoidPart(catalog, voidID):
|
||||
|
||||
partOut = []
|
||||
for iZ in xrange(catalog.void2Zones[voidID].numZones):
|
||||
for iZ in range(catalog.void2Zones[voidID].numZones):
|
||||
zoneID = catalog.void2Zones[voidID].zoneIDs[iZ]
|
||||
for p in xrange(catalog.zones2Parts[zoneID].numPart):
|
||||
for p in range(catalog.zones2Parts[zoneID].numPart):
|
||||
partID = catalog.zones2Parts[zoneID].partIDs[p]
|
||||
partOut.append(catalog.part[partID])
|
||||
|
||||
|
||||
return partOut
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
|
@ -589,4 +584,3 @@ def stackVoids(catalog, stackMode = "ball"):
|
|||
stackedPart.extend(shiftedPart)
|
||||
|
||||
return stackedPart
|
||||
|
12
python_tools/void_python_tools/voidUtil/matchUtil.py → python_tools/vide/voidUtil/matchUtil.py
Executable file → Normal file
12
python_tools/void_python_tools/voidUtil/matchUtil.py → python_tools/vide/voidUtil/matchUtil.py
Executable file → Normal file
|
@ -1,8 +1,8 @@
|
|||
#!/usr/bin/env python
|
||||
#+
|
||||
# VIDE -- Void IDentification and Examination -- ./crossCompare/analysis/mergerTree.py
|
||||
# Copyright (C) 2010-2013 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2013 P. M. Sutter
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/vide/voidUtil/matchUtil.py
|
||||
# Copyright (C) 2010-2014 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2014 P. M. Sutter
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
|
@ -20,7 +20,7 @@
|
|||
#+
|
||||
__all__=['compareCatalogs',]
|
||||
|
||||
from void_python_tools.backend import *
|
||||
from vide.backend import *
|
||||
import imp
|
||||
import pickle
|
||||
import os
|
||||
|
@ -60,7 +60,7 @@ def compareCatalogs(baseCatalogDir, compareCatalogDir,
|
|||
with open(compareCatalogDir+"/sample_info.dat", 'rb') as input:
|
||||
sample = pickle.load(input)
|
||||
|
||||
print " Comparing", baseSample.fullName, "to", sample.fullName, "...",
|
||||
print(" Comparing", baseSample.fullName, "to", sample.fullName, "...", end=' ')
|
||||
sys.stdout.flush()
|
||||
|
||||
sampleName = sample.fullName
|
||||
|
@ -79,5 +79,5 @@ def compareCatalogs(baseCatalogDir, compareCatalogDir,
|
|||
overlapFrac=overlapFrac,
|
||||
strictMatch=strictMatch)
|
||||
|
||||
print " Done!"
|
||||
print(" Done!")
|
||||
return
|
394
python_tools/vide/voidUtil/periodic_kdtree.py
Normal file
394
python_tools/vide/voidUtil/periodic_kdtree.py
Normal file
|
@ -0,0 +1,394 @@
|
|||
# periodic_kdtree.py
|
||||
#
|
||||
# A wrapper around scipy.spatial.kdtree to implement periodic boundary
|
||||
# conditions
|
||||
#
|
||||
# Written by Patrick Varilly, 6 Jul 2012
|
||||
# Released under the scipy license
|
||||
|
||||
import numpy as np
|
||||
from scipy.spatial import KDTree, cKDTree
|
||||
import itertools
|
||||
import heapq
|
||||
|
||||
def _gen_relevant_images(x, bounds, distance_upper_bound):
|
||||
# Map x onto the canonical unit cell, then produce the relevant
|
||||
# mirror images
|
||||
real_x = x - np.where(bounds > 0.0,
|
||||
np.floor(x / bounds) * bounds, 0.0)
|
||||
m = len(x)
|
||||
|
||||
xs_to_try = [real_x]
|
||||
for i in range(m):
|
||||
if bounds[i] > 0.0:
|
||||
disp = np.zeros(m)
|
||||
disp[i] = bounds[i]
|
||||
|
||||
if distance_upper_bound == np.inf:
|
||||
xs_to_try = list(
|
||||
itertools.chain.from_iterable(
|
||||
(_ + disp, _, _ - disp) for _ in xs_to_try))
|
||||
else:
|
||||
extra_xs = []
|
||||
|
||||
# Point near lower boundary, include image on upper side
|
||||
if abs(real_x[i]) < distance_upper_bound:
|
||||
extra_xs.extend(_ + disp for _ in xs_to_try)
|
||||
|
||||
# Point near upper boundary, include image on lower side
|
||||
if abs(bounds[i] - real_x[i]) < distance_upper_bound:
|
||||
extra_xs.extend(_ - disp for _ in xs_to_try)
|
||||
|
||||
xs_to_try.extend(extra_xs)
|
||||
|
||||
return xs_to_try
|
||||
|
||||
|
||||
class PeriodicKDTree(KDTree):
|
||||
"""
|
||||
kd-tree for quick nearest-neighbor lookup with periodic boundaries
|
||||
|
||||
See scipy.spatial.kdtree for details on kd-trees.
|
||||
|
||||
Searches with periodic boundaries are implemented by mapping all
|
||||
initial data points to one canonical periodic image, building an
|
||||
ordinary kd-tree with these points, then querying this kd-tree multiple
|
||||
times, if necessary, with all the relevant periodic images of the
|
||||
query point.
|
||||
|
||||
Note that to ensure that no two distinct images of the same point
|
||||
appear in the results, it is essential to restrict the maximum
|
||||
distance between a query point and a data point to half the smallest
|
||||
box dimension.
|
||||
"""
|
||||
|
||||
def __init__(self, bounds, data, leafsize=10):
|
||||
"""Construct a kd-tree.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
bounds : array_like, shape (k,)
|
||||
Size of the periodic box along each spatial dimension. A
|
||||
negative or zero size for dimension k means that space is not
|
||||
periodic along k.
|
||||
data : array_like, shape (n,k)
|
||||
The data points to be indexed. This array is not copied, and
|
||||
so modifying this data will result in bogus results.
|
||||
leafsize : positive int
|
||||
The number of points at which the algorithm switches over to
|
||||
brute-force.
|
||||
"""
|
||||
|
||||
# Map all points to canonical periodic image
|
||||
self.bounds = np.array(bounds)
|
||||
self.real_data = np.asarray(data)
|
||||
wrapped_data = (
|
||||
self.real_data - np.where(bounds > 0.0,
|
||||
(np.floor(self.real_data / bounds) * bounds), 0.0))
|
||||
|
||||
# Calculate maximum distance_upper_bound
|
||||
self.max_distance_upper_bound = np.min(
|
||||
np.where(self.bounds > 0, 0.5 * self.bounds, np.inf))
|
||||
|
||||
# Set up underlying kd-tree
|
||||
super(PeriodicKDTree, self).__init__(wrapped_data, leafsize)
|
||||
|
||||
# The following name is a kludge to override KDTree's private method
|
||||
def _KDTree__query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf):
|
||||
# This is the internal query method, which guarantees that x
|
||||
# is a single point, not an array of points
|
||||
#
|
||||
# A slight complication: k could be "None", which means "return
|
||||
# all neighbors within the given distance_upper_bound".
|
||||
|
||||
# Cap distance_upper_bound
|
||||
distance_upper_bound = min([distance_upper_bound,
|
||||
self.max_distance_upper_bound])
|
||||
|
||||
# Run queries over all relevant images of x
|
||||
hits_list = []
|
||||
for real_x in _gen_relevant_images(x, self.bounds, distance_upper_bound):
|
||||
hits_list.append(
|
||||
super(PeriodicKDTree, self)._KDTree__query(
|
||||
real_x, k, eps, p, distance_upper_bound))
|
||||
|
||||
# Now merge results
|
||||
if k is None:
|
||||
return list(heapq.merge(*hits_list))
|
||||
elif k > 1:
|
||||
return heapq.nsmallest(k, itertools.chain(*hits_list))
|
||||
elif k == 1:
|
||||
return [min(itertools.chain(*hits_list))]
|
||||
else:
|
||||
raise ValueError("Invalid k in periodic_kdtree._KDTree__query")
|
||||
|
||||
# The following name is a kludge to override KDTree's private method
|
||||
def _KDTree__query_ball_point(self, x, r, p=2., eps=0):
|
||||
# This is the internal query method, which guarantees that x
|
||||
# is a single point, not an array of points
|
||||
|
||||
# Cap r
|
||||
r = min(r, self.max_distance_upper_bound)
|
||||
|
||||
# Run queries over all relevant images of x
|
||||
results = []
|
||||
for real_x in _gen_relevant_images(x, self.bounds, r):
|
||||
results.extend(
|
||||
super(PeriodicKDTree, self)._KDTree__query_ball_point(
|
||||
real_x, r, p, eps))
|
||||
return results
|
||||
|
||||
def query_ball_tree(self, other, r, p=2., eps=0):
|
||||
raise NotImplementedError()
|
||||
|
||||
def query_pairs(self, r, p=2., eps=0):
|
||||
raise NotImplementedError()
|
||||
|
||||
def count_neighbors(self, other, r, p=2.):
|
||||
raise NotImplementedError()
|
||||
|
||||
def sparse_distance_matrix(self, other, max_distance, p=2.):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class PeriodicCKDTree(cKDTree):
|
||||
"""
|
||||
Cython kd-tree for quick nearest-neighbor lookup with periodic boundaries
|
||||
|
||||
See scipy.spatial.ckdtree for details on kd-trees.
|
||||
|
||||
Searches with periodic boundaries are implemented by mapping all
|
||||
initial data points to one canonical periodic image, building an
|
||||
ordinary kd-tree with these points, then querying this kd-tree multiple
|
||||
times, if necessary, with all the relevant periodic images of the
|
||||
query point.
|
||||
|
||||
Note that to ensure that no two distinct images of the same point
|
||||
appear in the results, it is essential to restrict the maximum
|
||||
distance between a query point and a data point to half the smallest
|
||||
box dimension.
|
||||
"""
|
||||
|
||||
def __init__(self, bounds, data, leafsize=10):
|
||||
"""Construct a kd-tree.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
bounds : array_like, shape (k,)
|
||||
Size of the periodic box along each spatial dimension. A
|
||||
negative or zero size for dimension k means that space is not
|
||||
periodic along k.
|
||||
data : array-like, shape (n,m)
|
||||
The n data points of dimension mto be indexed. This array is
|
||||
not copied unless this is necessary to produce a contiguous
|
||||
array of doubles, and so modifying this data will result in
|
||||
bogus results.
|
||||
leafsize : positive integer
|
||||
The number of points at which the algorithm switches over to
|
||||
brute-force.
|
||||
"""
|
||||
|
||||
# Map all points to canonical periodic image
|
||||
self.bounds = np.array(bounds)
|
||||
self.real_data = np.asarray(data)
|
||||
wrapped_data = (
|
||||
self.real_data - np.where(bounds > 0.0,
|
||||
(np.floor(self.real_data / bounds) * bounds), 0.0))
|
||||
|
||||
# Calculate maximum distance_upper_bound
|
||||
self.max_distance_upper_bound = np.min(
|
||||
np.where(self.bounds > 0, 0.5 * self.bounds, np.inf))
|
||||
|
||||
# Set up underlying kd-tree
|
||||
super(PeriodicCKDTree, self).__init__(wrapped_data, leafsize)
|
||||
|
||||
# Ideally, KDTree and cKDTree would expose identical query and __query
|
||||
# interfaces. But they don't, and cKDTree.__query is also inaccessible
|
||||
# from Python. We do our best here to cope.
|
||||
def __query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf):
|
||||
# This is the internal query method, which guarantees that x
|
||||
# is a single point, not an array of points
|
||||
#
|
||||
# A slight complication: k could be "None", which means "return
|
||||
# all neighbors within the given distance_upper_bound".
|
||||
|
||||
# Cap distance_upper_bound
|
||||
distance_upper_bound = np.min([distance_upper_bound,
|
||||
self.max_distance_upper_bound])
|
||||
|
||||
# Run queries over all relevant images of x
|
||||
hits_list = []
|
||||
for real_x in _gen_relevant_images(x, self.bounds, distance_upper_bound):
|
||||
d, i = super(PeriodicCKDTree, self).query(
|
||||
real_x, k, eps, p, distance_upper_bound)
|
||||
if k > 1:
|
||||
hits_list.append(list(zip(d, i)))
|
||||
else:
|
||||
hits_list.append([(d, i)])
|
||||
|
||||
# Now merge results
|
||||
if k > 1:
|
||||
return heapq.nsmallest(k, itertools.chain(*hits_list))
|
||||
elif k == 1:
|
||||
return [min(itertools.chain(*hits_list))]
|
||||
else:
|
||||
raise ValueError("Invalid k in periodic_kdtree._KDTree__query")
|
||||
|
||||
def query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf):
|
||||
"""
|
||||
Query the kd-tree for nearest neighbors
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like, last dimension self.m
|
||||
An array of points to query.
|
||||
k : integer
|
||||
The number of nearest neighbors to return.
|
||||
eps : non-negative float
|
||||
Return approximate nearest neighbors; the kth returned value
|
||||
is guaranteed to be no further than (1+eps) times the
|
||||
distance to the real k-th nearest neighbor.
|
||||
p : float, 1<=p<=infinity
|
||||
Which Minkowski p-norm to use.
|
||||
1 is the sum-of-absolute-values "Manhattan" distance
|
||||
2 is the usual Euclidean distance
|
||||
infinity is the maximum-coordinate-difference distance
|
||||
distance_upper_bound : nonnegative float
|
||||
Return only neighbors within this distance. This is used to prune
|
||||
tree searches, so if you are doing a series of nearest-neighbor
|
||||
queries, it may help to supply the distance to the nearest neighbor
|
||||
of the most recent point.
|
||||
|
||||
Returns
|
||||
-------
|
||||
d : array of floats
|
||||
The distances to the nearest neighbors.
|
||||
If x has shape tuple+(self.m,), then d has shape tuple+(k,).
|
||||
Missing neighbors are indicated with infinite distances.
|
||||
i : ndarray of ints
|
||||
The locations of the neighbors in self.data.
|
||||
If `x` has shape tuple+(self.m,), then `i` has shape tuple+(k,).
|
||||
Missing neighbors are indicated with self.n.
|
||||
|
||||
"""
|
||||
x = np.asarray(x)
|
||||
if np.shape(x)[-1] != self.m:
|
||||
raise ValueError("x must consist of vectors of length %d but has shape %s" % (self.m, np.shape(x)))
|
||||
if p<1:
|
||||
raise ValueError("Only p-norms with 1<=p<=infinity permitted")
|
||||
retshape = np.shape(x)[:-1]
|
||||
if retshape!=():
|
||||
if k>1:
|
||||
dd = np.empty(retshape+(k,),dtype=np.float)
|
||||
dd.fill(np.inf)
|
||||
ii = np.empty(retshape+(k,),dtype=np.int)
|
||||
ii.fill(self.n)
|
||||
elif k==1:
|
||||
dd = np.empty(retshape,dtype=np.float)
|
||||
dd.fill(np.inf)
|
||||
ii = np.empty(retshape,dtype=np.int)
|
||||
ii.fill(self.n)
|
||||
else:
|
||||
raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None")
|
||||
for c in np.ndindex(retshape):
|
||||
hits = self.__query(x[c], k=k, eps=eps, p=p, distance_upper_bound=distance_upper_bound)
|
||||
if k>1:
|
||||
for j in range(len(hits)):
|
||||
dd[c+(j,)], ii[c+(j,)] = hits[j]
|
||||
elif k==1:
|
||||
if len(hits)>0:
|
||||
dd[c], ii[c] = hits[0]
|
||||
else:
|
||||
dd[c] = np.inf
|
||||
ii[c] = self.n
|
||||
return dd, ii
|
||||
else:
|
||||
hits = self.__query(x, k=k, eps=eps, p=p, distance_upper_bound=distance_upper_bound)
|
||||
if k==1:
|
||||
if len(hits)>0:
|
||||
return hits[0]
|
||||
else:
|
||||
return np.inf, self.n
|
||||
elif k>1:
|
||||
dd = np.empty(k,dtype=np.float)
|
||||
dd.fill(np.inf)
|
||||
ii = np.empty(k,dtype=np.int)
|
||||
ii.fill(self.n)
|
||||
for j in range(len(hits)):
|
||||
dd[j], ii[j] = hits[j]
|
||||
return dd, ii
|
||||
else:
|
||||
raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None")
|
||||
|
||||
# Ideally, KDTree and cKDTree would expose identical __query_ball_point
|
||||
# interfaces. But they don't, and cKDTree.__query_ball_point is also
|
||||
# inaccessible from Python. We do our best here to cope.
|
||||
def __query_ball_point(self, x, r, p=2., eps=0):
|
||||
# This is the internal query method, which guarantees that x
|
||||
# is a single point, not an array of points
|
||||
|
||||
# Cap r
|
||||
r = min(r, self.max_distance_upper_bound)
|
||||
|
||||
# Run queries over all relevant images of x
|
||||
results = []
|
||||
for real_x in _gen_relevant_images(x, self.bounds, r):
|
||||
results.extend(super(PeriodicCKDTree, self).query_ball_point(
|
||||
real_x, r, p, eps))
|
||||
return results
|
||||
|
||||
def query_ball_point(self, x, r, p=2., eps=0):
|
||||
"""
|
||||
Find all points within distance r of point(s) x.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like, shape tuple + (self.m,)
|
||||
The point or points to search for neighbors of.
|
||||
r : positive float
|
||||
The radius of points to return.
|
||||
p : float, optional
|
||||
Which Minkowski p-norm to use. Should be in the range [1, inf].
|
||||
eps : nonnegative float, optional
|
||||
Approximate search. Branches of the tree are not explored if their
|
||||
nearest points are further than ``r / (1 + eps)``, and branches are
|
||||
added in bulk if their furthest points are nearer than
|
||||
``r * (1 + eps)``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
results : list or array of lists
|
||||
If `x` is a single point, returns a list of the indices of the
|
||||
neighbors of `x`. If `x` is an array of points, returns an object
|
||||
array of shape tuple containing lists of neighbors.
|
||||
|
||||
Notes
|
||||
-----
|
||||
If you have many points whose neighbors you want to find, you may
|
||||
save substantial amounts of time by putting them in a
|
||||
PeriodicCKDTree and using query_ball_tree.
|
||||
"""
|
||||
x = np.asarray(x).astype(np.float)
|
||||
if x.shape[-1] != self.m:
|
||||
raise ValueError("Searching for a %d-dimensional point in a " \
|
||||
"%d-dimensional KDTree" % (x.shape[-1], self.m))
|
||||
if len(x.shape) == 1:
|
||||
return self.__query_ball_point(x, r, p, eps)
|
||||
else:
|
||||
retshape = x.shape[:-1]
|
||||
result = np.empty(retshape, dtype=np.object)
|
||||
for c in np.ndindex(retshape):
|
||||
result[c] = self.__query_ball_point(x[c], r, p, eps)
|
||||
return result
|
||||
|
||||
def query_ball_tree(self, other, r, p=2., eps=0):
|
||||
raise NotImplementedError()
|
||||
|
||||
def query_pairs(self, r, p=2., eps=0):
|
||||
raise NotImplementedError()
|
||||
|
||||
def count_neighbors(self, other, r, p=2.):
|
||||
raise NotImplementedError()
|
||||
|
||||
def sparse_distance_matrix(self, other, max_distance, p=2.):
|
||||
raise NotImplementedError()
|
|
@ -1,7 +1,7 @@
|
|||
#+
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/void_python_tools/plotting/plotDefs.py
|
||||
# Copyright (C) 2010-2013 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2013 P. M. Sutter
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/vide/voidUtil/plotDefs.py
|
||||
# Copyright (C) 2010-2014 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2014 P. M. Sutter
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
|
@ -1,7 +1,7 @@
|
|||
#+
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/void_python_tools/plotting/plotTools.py
|
||||
# Copyright (C) 2010-2013 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2013 P. M. Sutter
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/vide/voidUtil/plotUtil.py
|
||||
# Copyright (C) 2010-2014 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2014 P. M. Sutter
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
|
@ -19,13 +19,13 @@
|
|||
#+
|
||||
__all__=['plotNumberFunction','plotEllipDist','plotVoidCells']
|
||||
|
||||
from void_python_tools.backend.classes import *
|
||||
from plotDefs import *
|
||||
from vide.backend.classes import *
|
||||
from .plotDefs import *
|
||||
import numpy as np
|
||||
import os
|
||||
import pylab as plt
|
||||
import void_python_tools.apTools as vp
|
||||
from void_python_tools.voidUtil import getArray, shiftPart, getVoidPart
|
||||
import vide.apTools as vp
|
||||
from vide.voidUtil import getArray, shiftPart, getVoidPart
|
||||
|
||||
def fill_between(x, y1, y2=0, ax=None, **kwargs):
|
||||
"""Plot filled region between `y1` and `y2`.
|
||||
|
@ -56,7 +56,7 @@ def plotNumberFunction(catalogList,
|
|||
# ellipDistList: array of len(catalogList),
|
||||
# each element has array of size bins, number, +/- 1 sigma
|
||||
|
||||
print "Plotting number function"
|
||||
print("Plotting number function")
|
||||
|
||||
catalogList = np.atleast_1d(catalogList)
|
||||
|
||||
|
@ -93,7 +93,7 @@ def plotNumberFunction(catalogList,
|
|||
|
||||
if cumulative:
|
||||
foundStart = False
|
||||
for iBin in xrange(len(hist)):
|
||||
for iBin in range(len(hist)):
|
||||
if not foundStart and hist[iBin] == 0:
|
||||
continue
|
||||
foundStart = True
|
||||
|
@ -154,7 +154,7 @@ def plotEllipDist(catalogList,
|
|||
# ellipDistList: array of len(catalogList),
|
||||
# each element has array of ellipticity distributions
|
||||
|
||||
print "Plotting ellipticity distributions"
|
||||
print("Plotting ellipticity distributions")
|
||||
|
||||
plt.clf()
|
||||
plt.xlabel(r"Ellipticity $\epsilon$", fontsize=14)
|
||||
|
@ -208,13 +208,13 @@ def plotVoidCells(catalog,
|
|||
plt.clf()
|
||||
|
||||
iVoid = -1
|
||||
for i in xrange(len(catalog.voids)):
|
||||
for i in range(len(catalog.voids)):
|
||||
if catalog.voids[i].voidID == voidID:
|
||||
iVoid = i
|
||||
break
|
||||
|
||||
if iVoid == -1:
|
||||
print "Void ID %d not found!" % voidID
|
||||
print("Void ID %d not found!" % voidID)
|
||||
return
|
||||
|
||||
sliceCenter = catalog.voids[iVoid].macrocenter
|
||||
|
@ -276,7 +276,7 @@ def plotVoidCells(catalog,
|
|||
cellsMaxlimz = zmax
|
||||
cellsradiuslim = 0.0
|
||||
|
||||
for p in xrange(len(volume)):
|
||||
for p in range(len(volume)):
|
||||
if (shiftedPartVoid[p,2]>(cellsMinlimz) and \
|
||||
shiftedPartVoid[p,2]<(cellsMaxlimz) and \
|
||||
radius[p]>cellsradiuslim):
|
|
@ -1,7 +1,7 @@
|
|||
#+
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/void_python_tools/plotting/plotTools.py
|
||||
# Copyright (C) 2010-2013 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2013 P. M. Sutter
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/vide/voidUtil/profileUtil.py
|
||||
# Copyright (C) 2010-2014 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2014 P. M. Sutter
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
|
@ -19,12 +19,12 @@
|
|||
#+
|
||||
__all__=['buildProfile','fitHSWProfile','getHSWProfile',]
|
||||
|
||||
from void_python_tools.backend.classes import *
|
||||
from void_python_tools.voidUtil import *
|
||||
from plotDefs import *
|
||||
from vide.backend.classes import *
|
||||
from vide.voidUtil import *
|
||||
from .plotDefs import *
|
||||
import numpy as np
|
||||
import os
|
||||
import void_python_tools.apTools as vp
|
||||
import vide.apTools as vp
|
||||
from scipy.optimize import curve_fit
|
||||
from scipy.interpolate import interp1d
|
||||
|
||||
|
@ -53,17 +53,17 @@ def buildProfile(catalog, rMin, rMax, nBins=10):
|
|||
rMaxProfile = rMin*3 + 2
|
||||
periodicLine = getPeriodic(catalog.sampleInfo)
|
||||
|
||||
print " Building particle tree..."
|
||||
print(" Building particle tree...")
|
||||
partTree = getPartTree(catalog)
|
||||
|
||||
print " Selecting voids to stack..."
|
||||
print(" Selecting voids to stack...")
|
||||
voidsToStack = [v for v in catalog.voids if (v.radius > rMin and v.radius < rMax)]
|
||||
|
||||
if len(voidsToStack) == 0:
|
||||
print " No voids to stack!"
|
||||
print(" No voids to stack!")
|
||||
return -1, -1, -1
|
||||
|
||||
print " Stacking voids..."
|
||||
print(" Stacking voids...")
|
||||
allProfiles = []
|
||||
for void in voidsToStack:
|
||||
center = void.macrocenter
|
||||
|
@ -197,7 +197,7 @@ def getHSWProfile(density, radius):
|
|||
|
||||
mySample = next((item for item in samples if item['name'] == density), None)
|
||||
if mySample == None:
|
||||
print "Sample", density," not found! Use one of ", [item['name'] for item in samples]
|
||||
print("Sample", density," not found! Use one of ", [item['name'] for item in samples])
|
||||
return
|
||||
|
||||
# interpolate the radii
|
|
@ -1,128 +1,147 @@
|
|||
import numpy as np
|
||||
import matplotlib as mpl
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.cm as cm
|
||||
from matplotlib import rc
|
||||
import xcorlib
|
||||
from void_python_tools.voidUtil import getArray
|
||||
|
||||
def computeXcor(catalog,
|
||||
figDir="./",
|
||||
Nmesh = 256,
|
||||
Nbin = 100
|
||||
):
|
||||
|
||||
# Computes and plots void-void and void-matter(galaxy) correlations
|
||||
# catalog: catalog to analyze
|
||||
# figDir: where to place plots
|
||||
# Nmesh: number of grid cells in cic mesh-interpolation
|
||||
# Nbin: number of bins in final plots
|
||||
|
||||
# Parameters
|
||||
Lbox = catalog.boxLen[0] # Boxlength
|
||||
Lboxcut = 0.
|
||||
Lbox -= 2*Lboxcut
|
||||
|
||||
# Input particle arrays of shape (N,3)
|
||||
xm = catalog.partPos # Halos / Galaxies / Dark matter
|
||||
xv = getArray(catalog.voids, 'macrocenter')
|
||||
|
||||
|
||||
# Interpolate to mesh
|
||||
dm, wm, ws = xcorlib.cic(xm, Lbox, Lboxcut = Lboxcut, Nmesh = Nmesh, weights = None)
|
||||
dv, wm, ws = xcorlib.cic(xv, Lbox, Lboxcut = Lboxcut, Nmesh = Nmesh, weights = None)
|
||||
|
||||
# Fourier transform
|
||||
dmk = np.fft.rfftn(dm)
|
||||
dvk = np.fft.rfftn(dv)
|
||||
|
||||
# 1D Power spectra & correlation functions
|
||||
((Nm, km, Pmm, SPmm),(Nmx, rm, Xmm, SXmm)) = xcorlib.powcor(dmk, dmk, Lbox, Nbin, 'lin', True, True, 1)
|
||||
((Nm, km, Pvm, SPvm),(Nmx, rm, Xvm, SXvm)) = xcorlib.powcor(dvk, dmk, Lbox, Nbin, 'lin', True, True, 1)
|
||||
((Nm, km, Pvv, SPvv),(Nmx, rm, Xvv, SXvv)) = xcorlib.powcor(dvk, dvk, Lbox, Nbin, 'lin', True, True, 1)
|
||||
|
||||
# Number densities
|
||||
nm = np.empty(len(km))
|
||||
nv = np.empty(len(km))
|
||||
nm[:] = len(xm)/Lbox**3
|
||||
nv[:] = len(xv)/Lbox**3
|
||||
|
||||
|
||||
# Plots
|
||||
mpl.rc('font', family='serif')
|
||||
ms = 2.5
|
||||
fs = 16
|
||||
mew = 0.1
|
||||
margin = 1.2
|
||||
kmin = km.min()/margin
|
||||
kmax = km.max()*margin
|
||||
rmin = rm.min()/margin
|
||||
rmax = rm.max()*margin
|
||||
|
||||
|
||||
# Density fields (projected)
|
||||
plt.imshow(np.sum(dm[:,:,:]+1,2),extent=[0,Lbox,0,Lbox],aspect='equal',cmap='YlGnBu_r',interpolation='gaussian')
|
||||
plt.xlabel(r'$x \;[h^{-1}\mathrm{Mpc}]$')
|
||||
plt.ylabel(r'$y \;[h^{-1}\mathrm{Mpc}]$')
|
||||
plt.title(r'Dark matter')
|
||||
plt.savefig(figDir+'/dm.eps', bbox_inches="tight")
|
||||
plt.savefig(figDir+'/dm.pdf', bbox_inches="tight")
|
||||
plt.savefig(figDir+'/dm.png', bbox_inches="tight")
|
||||
plt.clf()
|
||||
|
||||
plt.imshow(np.sum(dv[:,:,:]+1,2)/Nmesh,extent=[0,Lbox,0,Lbox],aspect='equal',cmap='YlGnBu_r',interpolation='gaussian')
|
||||
plt.xlabel(r'$x \;[h^{-1}\mathrm{Mpc}]$')
|
||||
plt.ylabel(r'$y \;[h^{-1}\mathrm{Mpc}]$')
|
||||
plt.title(r'Voids')
|
||||
plt.savefig(figDir+'/dv.eps', bbox_inches="tight") #, dpi=300
|
||||
plt.savefig(figDir+'/dv.pdf', bbox_inches="tight") #, dpi=300
|
||||
plt.savefig(figDir+'/dv.png', bbox_inches="tight") #, dpi=300
|
||||
plt.clf()
|
||||
|
||||
|
||||
# Power spectra & correlation functions
|
||||
pa ,= plt.plot(km, Pmm, 'k-o', ms=0.8*ms, mew=mew, mec='k')
|
||||
#plt.plot(km, Pmm-1./nm, 'k--', ms=ms, mew=mew)
|
||||
plt.fill_between(km, Pmm+SPmm, abs(Pmm-SPmm), color='k', alpha=0.2)
|
||||
pb ,= plt.plot(km, Pvm, 'm-D', ms=ms, mew=mew, mec='k')
|
||||
plt.plot(km, -Pvm, 'mD', ms=ms, mew=mew, mec='k')
|
||||
plt.fill_between(km, abs(Pvm+SPvm), abs(Pvm-SPvm), color='m', alpha=0.2)
|
||||
pc ,= plt.plot(km, Pvv, 'b-p', ms=1.3*ms, mew=mew, mec='k')
|
||||
#plt.plot(km, Pvv-1./nv, 'b--', ms=ms, mew=mew)
|
||||
plt.fill_between(km, Pvv+SPvv, abs(Pvv-SPvv), color='b', alpha=0.2)
|
||||
plt.xlabel(r'$k \;[h\mathrm{Mpc}^{-1}]$')
|
||||
plt.ylabel(r'$P(k) \;[h^{-3}\mathrm{Mpc}^3]$')
|
||||
plt.title(r'Power spectra')
|
||||
plt.xscale('log')
|
||||
plt.yscale('log')
|
||||
plt.xlim(kmin,kmax)
|
||||
plt.ylim(10**np.floor(np.log10(abs(Pvm[1:]).min()))/margin, max(10**np.ceil(np.log10(Pmm.max())),10**np.ceil(np.log10(Pvv.max())))*margin)
|
||||
plt.legend([pa, pb, pc],['tt', 'vt', 'vv'],'best',prop={'size':12})
|
||||
plt.savefig(figDir+'/power.eps', bbox_inches="tight")
|
||||
plt.savefig(figDir+'/power.pdf', bbox_inches="tight")
|
||||
plt.savefig(figDir+'/power.png', bbox_inches="tight")
|
||||
plt.clf()
|
||||
|
||||
pa ,= plt.plot(rm, Xmm, 'k-o', ms=0.8*ms, mew=mew)
|
||||
plt.fill_between(rm, abs(Xmm+SXmm), abs(Xmm-SXmm), color='k', alpha=0.2)
|
||||
pb ,= plt.plot(rm, Xvm, 'm-D', ms=ms, mew=mew)
|
||||
plt.plot(rm, -Xvm, 'mD', ms=ms, mew=mew)
|
||||
plt.fill_between(rm, abs(Xvm+SXvm), abs(Xvm-SXvm), color='m', alpha=0.2)
|
||||
pc ,= plt.plot(rm, Xvv, 'b-p', ms=1.3*ms, mew=mew)
|
||||
plt.plot(rm, -Xvv, 'bp', ms=ms, mew=1.3*mew)
|
||||
plt.fill_between(rm, abs(Xvv+SXvv), abs(Xvv-SXvv), color='b', alpha=0.2)
|
||||
plt.xlabel(r'$r \;[h^{-1}\mathrm{Mpc}]$')
|
||||
plt.ylabel(r'$\xi(r)$')
|
||||
plt.title(r'Correlation functions')
|
||||
plt.xscale('log')
|
||||
plt.yscale('log')
|
||||
plt.xlim(rmin,rmax)
|
||||
plt.ylim(min(10**np.floor(np.log10(abs(Xvm).min())),10**np.floor(np.log10(abs(Xmm).min())))/margin, max(10**np.ceil(np.log10(Xmm.max())),10**np.ceil(np.log10(Xvv.max())))*margin)
|
||||
plt.legend([pa, pb, pc],['tt', 'vt', 'vv'],'best',prop={'size':12})
|
||||
plt.savefig(figDir+'/correlation.eps', bbox_inches="tight")
|
||||
plt.savefig(figDir+'/correlation.pdf', bbox_inches="tight")
|
||||
plt.savefig(figDir+'/correlation.png', bbox_inches="tight")
|
||||
plt.clf()
|
||||
|
||||
|
||||
return
|
||||
#+
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/vide/voidUtil/xcorUtil.py
|
||||
# Copyright (C) 2010-2014 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2014 P. M. Sutter
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#+
|
||||
import numpy as np
|
||||
import matplotlib as mpl
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.cm as cm
|
||||
from matplotlib import rc
|
||||
from . import xcorlib
|
||||
from vide.voidUtil import getArray
|
||||
|
||||
def computeXcor(catalog,
|
||||
figDir="./",
|
||||
Nmesh = 256,
|
||||
Nbin = 100
|
||||
):
|
||||
|
||||
# Computes and plots void-void and void-matter(galaxy) correlations
|
||||
# catalog: catalog to analyze
|
||||
# figDir: where to place plots
|
||||
# Nmesh: number of grid cells in cic mesh-interpolation
|
||||
# Nbin: number of bins in final plots
|
||||
|
||||
# Parameters
|
||||
Lbox = catalog.boxLen[0] # Boxlength
|
||||
Lboxcut = 0.
|
||||
Lbox -= 2*Lboxcut
|
||||
|
||||
# Input particle arrays of shape (N,3)
|
||||
xm = catalog.partPos # Halos / Galaxies / Dark matter
|
||||
xv = getArray(catalog.voids, 'macrocenter')
|
||||
|
||||
|
||||
# Interpolate to mesh
|
||||
dm, wm, ws = xcorlib.cic(xm, Lbox, Lboxcut = Lboxcut, Nmesh = Nmesh, weights = None)
|
||||
dv, wm, ws = xcorlib.cic(xv, Lbox, Lboxcut = Lboxcut, Nmesh = Nmesh, weights = None)
|
||||
|
||||
# Fourier transform
|
||||
dmk = np.fft.rfftn(dm)
|
||||
dvk = np.fft.rfftn(dv)
|
||||
|
||||
# 1D Power spectra & correlation functions
|
||||
((Nm, km, Pmm, SPmm),(Nmx, rm, Xmm, SXmm)) = xcorlib.powcor(dmk, dmk, Lbox, Nbin, 'lin', True, True, 1)
|
||||
((Nm, km, Pvm, SPvm),(Nmx, rm, Xvm, SXvm)) = xcorlib.powcor(dvk, dmk, Lbox, Nbin, 'lin', True, True, 1)
|
||||
((Nm, km, Pvv, SPvv),(Nmx, rm, Xvv, SXvv)) = xcorlib.powcor(dvk, dvk, Lbox, Nbin, 'lin', True, True, 1)
|
||||
|
||||
# Number densities
|
||||
nm = np.empty(len(km))
|
||||
nv = np.empty(len(km))
|
||||
nm[:] = len(xm)/Lbox**3
|
||||
nv[:] = len(xv)/Lbox**3
|
||||
|
||||
|
||||
# Plots
|
||||
mpl.rc('font', family='serif')
|
||||
ms = 2.5
|
||||
fs = 16
|
||||
mew = 0.1
|
||||
margin = 1.2
|
||||
kmin = km.min()/margin
|
||||
kmax = km.max()*margin
|
||||
rmin = rm.min()/margin
|
||||
rmax = rm.max()*margin
|
||||
|
||||
|
||||
# Density fields (projected)
|
||||
plt.imshow(np.sum(dm[:,:,:]+1,2),extent=[0,Lbox,0,Lbox],aspect='equal',cmap='YlGnBu_r',interpolation='gaussian')
|
||||
plt.xlabel(r'$x \;[h^{-1}\mathrm{Mpc}]$')
|
||||
plt.ylabel(r'$y \;[h^{-1}\mathrm{Mpc}]$')
|
||||
plt.title(r'Dark matter')
|
||||
plt.savefig(figDir+'/dm.eps', bbox_inches="tight")
|
||||
plt.savefig(figDir+'/dm.pdf', bbox_inches="tight")
|
||||
plt.savefig(figDir+'/dm.png', bbox_inches="tight")
|
||||
plt.clf()
|
||||
|
||||
plt.imshow(np.sum(dv[:,:,:]+1,2)/Nmesh,extent=[0,Lbox,0,Lbox],aspect='equal',cmap='YlGnBu_r',interpolation='gaussian')
|
||||
plt.xlabel(r'$x \;[h^{-1}\mathrm{Mpc}]$')
|
||||
plt.ylabel(r'$y \;[h^{-1}\mathrm{Mpc}]$')
|
||||
plt.title(r'Voids')
|
||||
plt.savefig(figDir+'/dv.eps', bbox_inches="tight") #, dpi=300
|
||||
plt.savefig(figDir+'/dv.pdf', bbox_inches="tight") #, dpi=300
|
||||
plt.savefig(figDir+'/dv.png', bbox_inches="tight") #, dpi=300
|
||||
plt.clf()
|
||||
|
||||
|
||||
# Power spectra & correlation functions
|
||||
pa ,= plt.plot(km, Pmm, 'k-o', ms=0.8*ms, mew=mew, mec='k')
|
||||
#plt.plot(km, Pmm-1./nm, 'k--', ms=ms, mew=mew)
|
||||
plt.fill_between(km, Pmm+SPmm, abs(Pmm-SPmm), color='k', alpha=0.2)
|
||||
pb ,= plt.plot(km, Pvm, 'm-D', ms=ms, mew=mew, mec='k')
|
||||
plt.plot(km, -Pvm, 'mD', ms=ms, mew=mew, mec='k')
|
||||
plt.fill_between(km, abs(Pvm+SPvm), abs(Pvm-SPvm), color='m', alpha=0.2)
|
||||
pc ,= plt.plot(km, Pvv, 'b-p', ms=1.3*ms, mew=mew, mec='k')
|
||||
#plt.plot(km, Pvv-1./nv, 'b--', ms=ms, mew=mew)
|
||||
plt.fill_between(km, Pvv+SPvv, abs(Pvv-SPvv), color='b', alpha=0.2)
|
||||
plt.xlabel(r'$k \;[h\mathrm{Mpc}^{-1}]$')
|
||||
plt.ylabel(r'$P(k) \;[h^{-3}\mathrm{Mpc}^3]$')
|
||||
plt.title(r'Power spectra')
|
||||
plt.xscale('log')
|
||||
plt.yscale('log')
|
||||
plt.xlim(kmin,kmax)
|
||||
plt.ylim(10**np.floor(np.log10(abs(Pvm[1:]).min()))/margin, max(10**np.ceil(np.log10(Pmm.max())),10**np.ceil(np.log10(Pvv.max())))*margin)
|
||||
plt.legend([pa, pb, pc],['tt', 'vt', 'vv'],'best',prop={'size':12})
|
||||
plt.savefig(figDir+'/power.eps', bbox_inches="tight")
|
||||
plt.savefig(figDir+'/power.pdf', bbox_inches="tight")
|
||||
plt.savefig(figDir+'/power.png', bbox_inches="tight")
|
||||
plt.clf()
|
||||
|
||||
pa ,= plt.plot(rm, Xmm, 'k-o', ms=0.8*ms, mew=mew)
|
||||
plt.fill_between(rm, abs(Xmm+SXmm), abs(Xmm-SXmm), color='k', alpha=0.2)
|
||||
pb ,= plt.plot(rm, Xvm, 'm-D', ms=ms, mew=mew)
|
||||
plt.plot(rm, -Xvm, 'mD', ms=ms, mew=mew)
|
||||
plt.fill_between(rm, abs(Xvm+SXvm), abs(Xvm-SXvm), color='m', alpha=0.2)
|
||||
pc ,= plt.plot(rm, Xvv, 'b-p', ms=1.3*ms, mew=mew)
|
||||
plt.plot(rm, -Xvv, 'bp', ms=ms, mew=1.3*mew)
|
||||
plt.fill_between(rm, abs(Xvv+SXvv), abs(Xvv-SXvv), color='b', alpha=0.2)
|
||||
plt.xlabel(r'$r \;[h^{-1}\mathrm{Mpc}]$')
|
||||
plt.ylabel(r'$\xi(r)$')
|
||||
plt.title(r'Correlation functions')
|
||||
plt.xscale('log')
|
||||
plt.yscale('log')
|
||||
plt.xlim(rmin,rmax)
|
||||
plt.ylim(min(10**np.floor(np.log10(abs(Xvm).min())),10**np.floor(np.log10(abs(Xmm).min())))/margin, max(10**np.ceil(np.log10(Xmm.max())),10**np.ceil(np.log10(Xvv.max())))*margin)
|
||||
plt.legend([pa, pb, pc],['tt', 'vt', 'vv'],'best',prop={'size':12})
|
||||
plt.savefig(figDir+'/correlation.eps', bbox_inches="tight")
|
||||
plt.savefig(figDir+'/correlation.pdf', bbox_inches="tight")
|
||||
plt.savefig(figDir+'/correlation.png', bbox_inches="tight")
|
||||
plt.clf()
|
||||
|
||||
|
||||
return
|
|
@ -1,88 +1,107 @@
|
|||
import numpy as np
|
||||
|
||||
# CIC interpolation
|
||||
def cic(x, Lbox, Lboxcut = 0, Nmesh = 128, weights = None):
|
||||
|
||||
if weights == None: weights = 1
|
||||
wm = np.mean(weights)
|
||||
ws = np.mean(weights**2)
|
||||
|
||||
d = np.mod(x/(Lbox+2*Lboxcut)*Nmesh,1)
|
||||
|
||||
box = ([Lboxcut,Lbox+Lboxcut],[Lboxcut,Lbox+Lboxcut],[Lboxcut,Lbox+Lboxcut])
|
||||
|
||||
rho = np.histogramdd(x, range = box, bins = Nmesh, weights = weights*(1-d[:,0])*(1-d[:,1])*(1-d[:,2]))[0] \
|
||||
+ np.roll(np.histogramdd(x, range = box, bins = Nmesh, weights = weights*d[:,0]*(1-d[:,1])*(1-d[:,2]))[0],1,0) \
|
||||
+ np.roll(np.histogramdd(x, range = box, bins = Nmesh, weights = weights*(1-d[:,0])*d[:,1]*(1-d[:,2]))[0],1,1) \
|
||||
+ np.roll(np.histogramdd(x, range = box, bins = Nmesh, weights = weights*(1-d[:,0])*(1-d[:,1])*d[:,2])[0],1,2) \
|
||||
+ np.roll(np.roll(np.histogramdd(x, range = box, bins = Nmesh, weights = weights*d[:,0]*d[:,1]*(1-d[:,2]))[0],1,0),1,1) \
|
||||
+ np.roll(np.roll(np.histogramdd(x, range = box, bins = Nmesh, weights = weights*d[:,0]*(1-d[:,1])*d[:,2])[0],1,0),1,2) \
|
||||
+ np.roll(np.roll(np.histogramdd(x, range = box, bins = Nmesh, weights = weights*(1-d[:,0])*d[:,1]*d[:,2])[0],1,1),1,2) \
|
||||
+ np.roll(np.roll(np.roll(np.histogramdd(x, range = box, bins = Nmesh, weights = weights*d[:,0]*d[:,1]*d[:,2])[0],1,0),1,1),1,2)
|
||||
|
||||
rho /= wm
|
||||
|
||||
rho = rho/rho.mean() - 1.
|
||||
|
||||
return (rho, wm, ws)
|
||||
|
||||
|
||||
# Power spectra & correlation functions
|
||||
def powcor(d1, d2, Lbox, Nbin = 10, scale = 'lin', cor = False, cic = True, dim = 1):
|
||||
|
||||
Nmesh = len(d1)
|
||||
|
||||
# CIC correction
|
||||
if cic:
|
||||
wid = np.indices(np.shape(d1))
|
||||
wid[np.where(wid >= Nmesh/2)] -= Nmesh
|
||||
wid = wid*np.pi/Nmesh + 1e-100
|
||||
wcic = np.prod(np.sin(wid)/wid,0)**2
|
||||
|
||||
# Shell average power spectrum
|
||||
dk = 2*np.pi/Lbox
|
||||
Pk = np.conj(d1)*d2*(Lbox/Nmesh**2)**3
|
||||
if cic: Pk /= wcic**2
|
||||
|
||||
(Nm, km, Pkm, SPkm) = shellavg(np.real(Pk), dk, Nmesh, Nbin = Nbin, xmin = 0., xmax = Nmesh*dk/2, scale = scale, dim = dim)
|
||||
|
||||
# Inverse Fourier transform and shell average correlation function
|
||||
if cor:
|
||||
if cic: Pk *= wcic**2 # Undo cic-correction in correlation function
|
||||
dx = Lbox/Nmesh
|
||||
Xr = np.fft.irfftn(Pk)*(Nmesh/Lbox)**3
|
||||
|
||||
(Nmx, rm, Xrm, SXrm) = shellavg(np.real(Xr), dx, Nmesh, Nbin = Nbin/2, xmin = dx, xmax = 140., scale = scale, dim = dim)
|
||||
|
||||
return ((Nm, km, Pkm, SPkm),(Nmx, rm, Xrm, SXrm))
|
||||
|
||||
else: return (Nm, km, Pkm, SPkm)
|
||||
|
||||
|
||||
# Shell averaging
|
||||
def shellavg(f, dx, Nmesh, Nbin = 10, xmin = 0., xmax = 1., scale = 'lin', dim = 1):
|
||||
|
||||
x = np.indices(np.shape(f))
|
||||
x[np.where(x >= Nmesh/2)] -= Nmesh
|
||||
f = f.flatten()
|
||||
|
||||
if scale == 'lin': bins = xmin+(xmax-xmin)* np.linspace(0,1,Nbin+1)
|
||||
if scale == 'log': bins = xmin*(xmax/xmin)**np.linspace(0,1,Nbin+1)
|
||||
|
||||
if dim == 1: # 1D
|
||||
x = dx*np.sqrt(np.sum(x**2,0)).flatten()
|
||||
Nm = np.histogram(x, bins = bins)[0]
|
||||
xm = np.histogram(x, bins = bins, weights = x)[0]/Nm
|
||||
fm = np.histogram(x, bins = bins, weights = f)[0]/Nm
|
||||
fs = np.sqrt((np.histogram(x, bins = bins, weights = f**2)[0]/Nm - fm**2)/(Nm-1))
|
||||
return (Nm, xm, fm, fs)
|
||||
|
||||
elif dim == 2: # 2D
|
||||
xper = dx*np.sqrt(x[0,:,:,:]**2 + x[1,:,:,:]**2 + 1e-100).flatten()
|
||||
xpar = dx*np.abs(x[2,:,:,:]).flatten()
|
||||
x = dx*np.sqrt(np.sum(x**2,0)).flatten()
|
||||
Nm = np.histogram2d(xper, xpar, bins = [bins,bins])[0]
|
||||
xmper = np.histogram2d(xper, xpar, bins = [bins,bins], weights = xper)[0]/Nm
|
||||
xmpar = np.histogram2d(xper, xpar, bins = [bins,bins], weights = xpar)[0]/Nm
|
||||
fm = np.histogram2d(xper, xpar, bins = [bins,bins], weights = f)[0]/Nm
|
||||
return (Nm, xmper, xmpar, fm)
|
||||
#+
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/vide/voidUtil/xcorlib.py
|
||||
# Copyright (C) 2010-2014 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2014 P. M. Sutter
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#+
|
||||
import numpy as np
|
||||
|
||||
# CIC interpolation
|
||||
def cic(x, Lbox, Lboxcut = 0, Nmesh = 128, weights = None):
|
||||
|
||||
if weights == None: weights = 1
|
||||
wm = np.mean(weights)
|
||||
ws = np.mean(weights**2)
|
||||
|
||||
d = np.mod(x/(Lbox+2*Lboxcut)*Nmesh,1)
|
||||
|
||||
box = ([Lboxcut,Lbox+Lboxcut],[Lboxcut,Lbox+Lboxcut],[Lboxcut,Lbox+Lboxcut])
|
||||
|
||||
rho = np.histogramdd(x, range = box, bins = Nmesh, weights = weights*(1-d[:,0])*(1-d[:,1])*(1-d[:,2]))[0] \
|
||||
+ np.roll(np.histogramdd(x, range = box, bins = Nmesh, weights = weights*d[:,0]*(1-d[:,1])*(1-d[:,2]))[0],1,0) \
|
||||
+ np.roll(np.histogramdd(x, range = box, bins = Nmesh, weights = weights*(1-d[:,0])*d[:,1]*(1-d[:,2]))[0],1,1) \
|
||||
+ np.roll(np.histogramdd(x, range = box, bins = Nmesh, weights = weights*(1-d[:,0])*(1-d[:,1])*d[:,2])[0],1,2) \
|
||||
+ np.roll(np.roll(np.histogramdd(x, range = box, bins = Nmesh, weights = weights*d[:,0]*d[:,1]*(1-d[:,2]))[0],1,0),1,1) \
|
||||
+ np.roll(np.roll(np.histogramdd(x, range = box, bins = Nmesh, weights = weights*d[:,0]*(1-d[:,1])*d[:,2])[0],1,0),1,2) \
|
||||
+ np.roll(np.roll(np.histogramdd(x, range = box, bins = Nmesh, weights = weights*(1-d[:,0])*d[:,1]*d[:,2])[0],1,1),1,2) \
|
||||
+ np.roll(np.roll(np.roll(np.histogramdd(x, range = box, bins = Nmesh, weights = weights*d[:,0]*d[:,1]*d[:,2])[0],1,0),1,1),1,2)
|
||||
|
||||
rho /= wm
|
||||
|
||||
rho = rho/rho.mean() - 1.
|
||||
|
||||
return (rho, wm, ws)
|
||||
|
||||
|
||||
# Power spectra & correlation functions
|
||||
def powcor(d1, d2, Lbox, Nbin = 10, scale = 'lin', cor = False, cic = True, dim = 1):
|
||||
|
||||
Nmesh = len(d1)
|
||||
|
||||
# CIC correction
|
||||
if cic:
|
||||
wid = np.indices(np.shape(d1))
|
||||
wid[np.where(wid >= Nmesh/2)] -= Nmesh
|
||||
wid = wid*np.pi/Nmesh + 1e-100
|
||||
wcic = np.prod(np.sin(wid)/wid,0)**2
|
||||
|
||||
# Shell average power spectrum
|
||||
dk = 2*np.pi/Lbox
|
||||
Pk = np.conj(d1)*d2*(Lbox/Nmesh**2)**3
|
||||
if cic: Pk /= wcic**2
|
||||
|
||||
(Nm, km, Pkm, SPkm) = shellavg(np.real(Pk), dk, Nmesh, Nbin = Nbin, xmin = 0., xmax = Nmesh*dk/2, scale = scale, dim = dim)
|
||||
|
||||
# Inverse Fourier transform and shell average correlation function
|
||||
if cor:
|
||||
if cic: Pk *= wcic**2 # Undo cic-correction in correlation function
|
||||
dx = Lbox/Nmesh
|
||||
Xr = np.fft.irfftn(Pk)*(Nmesh/Lbox)**3
|
||||
|
||||
(Nmx, rm, Xrm, SXrm) = shellavg(np.real(Xr), dx, Nmesh, Nbin = Nbin/2, xmin = dx, xmax = 140., scale = scale, dim = dim)
|
||||
|
||||
return ((Nm, km, Pkm, SPkm),(Nmx, rm, Xrm, SXrm))
|
||||
|
||||
else: return (Nm, km, Pkm, SPkm)
|
||||
|
||||
|
||||
# Shell averaging
|
||||
def shellavg(f, dx, Nmesh, Nbin = 10, xmin = 0., xmax = 1., scale = 'lin', dim = 1):
|
||||
|
||||
x = np.indices(np.shape(f))
|
||||
x[np.where(x >= Nmesh/2)] -= Nmesh
|
||||
f = f.flatten()
|
||||
|
||||
if scale == 'lin': bins = xmin+(xmax-xmin)* np.linspace(0,1,Nbin+1)
|
||||
if scale == 'log': bins = xmin*(xmax/xmin)**np.linspace(0,1,Nbin+1)
|
||||
|
||||
if dim == 1: # 1D
|
||||
x = dx*np.sqrt(np.sum(x**2,0)).flatten()
|
||||
Nm = np.histogram(x, bins = bins)[0]
|
||||
xm = np.histogram(x, bins = bins, weights = x)[0]/Nm
|
||||
fm = np.histogram(x, bins = bins, weights = f)[0]/Nm
|
||||
fs = np.sqrt((np.histogram(x, bins = bins, weights = f**2)[0]/Nm - fm**2)/(Nm-1))
|
||||
return (Nm, xm, fm, fs)
|
||||
|
||||
elif dim == 2: # 2D
|
||||
xper = dx*np.sqrt(x[0,:,:,:]**2 + x[1,:,:,:]**2 + 1e-100).flatten()
|
||||
xpar = dx*np.abs(x[2,:,:,:]).flatten()
|
||||
x = dx*np.sqrt(np.sum(x**2,0)).flatten()
|
||||
Nm = np.histogram2d(xper, xpar, bins = [bins,bins])[0]
|
||||
xmper = np.histogram2d(xper, xpar, bins = [bins,bins], weights = xper)[0]/Nm
|
||||
xmpar = np.histogram2d(xper, xpar, bins = [bins,bins], weights = xpar)[0]/Nm
|
||||
fm = np.histogram2d(xper, xpar, bins = [bins,bins], weights = f)[0]/Nm
|
||||
return (Nm, xmper, xmpar, fm)
|
0
python_tools/void_pipeline/__init__.py
Normal file
0
python_tools/void_pipeline/__init__.py
Normal file
140
python_tools/void_pipeline/__main__.py
Normal file
140
python_tools/void_pipeline/__main__.py
Normal file
|
@ -0,0 +1,140 @@
|
|||
#!/usr/bin/env python
|
||||
#+
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/void_pipeline/__init__.py
|
||||
# Copyright (C) 2010-2014 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2014 P. M. Sutter
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#+
|
||||
|
||||
# Stage 1 : generate particles
|
||||
# Stage 2 : find voids
|
||||
# Stage 3 : prune catalog
|
||||
|
||||
from vide.backend import *
|
||||
import vide
|
||||
import imp
|
||||
import os
|
||||
import pickle
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
if (len(sys.argv) == 1):
|
||||
print("Usage: ./generateCatalog.py parameter_file.py")
|
||||
exit(-1)
|
||||
|
||||
if (len(sys.argv) > 1):
|
||||
filename = sys.argv[1]
|
||||
print(" Loading parameters from", filename)
|
||||
if not os.access(filename, os.F_OK):
|
||||
print(" Cannot find parameter file %s!" % filename)
|
||||
exit(-1)
|
||||
parms = imp.load_source("name", filename)
|
||||
regenerateFlag = False
|
||||
globals().update(vars(parms))
|
||||
void_path = os.path.split(vide.__file__)[0]
|
||||
ZOBOV_PATH=f'{void_path}/bin/'
|
||||
CTOOLS_PATH=ZOBOV_PATH
|
||||
print(f"ZOBOV_PATH is {ZOBOV_PATH}")
|
||||
else:
|
||||
print(" Using default parameters")
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
if not os.access(logDir, os.F_OK):
|
||||
os.makedirs(logDir)
|
||||
|
||||
if not os.access(figDir, os.F_OK):
|
||||
os.makedirs(figDir)
|
||||
|
||||
if not continueRun:
|
||||
print(" Cleaning out log files...")
|
||||
|
||||
if startCatalogStage <= 1 and glob.glob(logDir+"/generate*") != []:
|
||||
os.system("rm %s/generate*" % logDir)
|
||||
if startCatalogStage <= 2 and glob.glob(logDir+"/zobov*") != []:
|
||||
os.system("rm %s/zobov*" % logDir)
|
||||
if startCatalogStage <= 3 and glob.glob(logDir+"/prune*") != []:
|
||||
os.system("rm %s/prune*" % logDir)
|
||||
|
||||
for sample in dataSampleList:
|
||||
|
||||
sampleName = sample.fullName
|
||||
|
||||
print(" Working with data set", sampleName, "...")
|
||||
zobovDir = workDir+"/sample_"+sampleName+"/"
|
||||
sample.zobovDir = zobovDir
|
||||
|
||||
if not os.access(zobovDir, os.F_OK):
|
||||
os.makedirs(zobovDir)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
if (startCatalogStage <= 1) and (endCatalogStage >= 1) and not sample.isCombo:
|
||||
print(" Extracting tracers from catalog...", end=' ')
|
||||
sys.stdout.flush()
|
||||
|
||||
logFile = logDir+"/generate_"+sampleName+".out"
|
||||
|
||||
if sample.dataType == "observation":
|
||||
GENERATE_PATH = CTOOLS_PATH+"/generateFromCatalog"
|
||||
else:
|
||||
GENERATE_PATH = CTOOLS_PATH+"/generateMock"
|
||||
|
||||
launchGenerate(sample, GENERATE_PATH, workDir=workDir,
|
||||
inputDataDir=inputDataDir, zobovDir=zobovDir,
|
||||
figDir=figDir, logFile=logFile, useComoving=sample.useComoving,
|
||||
continueRun=continueRun, regenerate=regenerateFlag)
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
if (startCatalogStage <= 2) and (endCatalogStage >= 2) and not sample.isCombo:
|
||||
print(" Extracting voids with ZOBOV...", end=' ')
|
||||
sys.stdout.flush()
|
||||
|
||||
launchZobov(sample, ZOBOV_PATH, zobovDir=zobovDir, logDir=logDir,
|
||||
continueRun=continueRun, numZobovDivisions=numZobovDivisions,
|
||||
numZobovThreads=numZobovThreads)
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
if (startCatalogStage <= 3) and (endCatalogStage >= 3) and not sample.isCombo:
|
||||
|
||||
print(" Taking data portions", "...", end=' ')
|
||||
sys.stdout.flush()
|
||||
|
||||
logFile = logDir+"/pruneVoids_"+sampleName+".out"
|
||||
|
||||
PRUNE_PATH = CTOOLS_PATH+"/pruneVoids"
|
||||
|
||||
launchPrune(sample, PRUNE_PATH,
|
||||
logFile=logFile, zobovDir=zobovDir,
|
||||
useComoving=sample.useComoving, continueRun=continueRun)
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
if (startCatalogStage <= 4) and (endCatalogStage >= 4):
|
||||
|
||||
print(" Plotting...", end=' ')
|
||||
sys.stdout.flush()
|
||||
|
||||
#for thisDataPortion in dataPortions:
|
||||
#plotRedshiftDistribution(workDir, dataSampleList, figDir, showPlot=False,
|
||||
# dataPortion=thisDataPortion, setName=setName)
|
||||
#plotSizeDistribution(workDir, dataSampleList, figDir, showPlot=False,
|
||||
# dataPortion=thisDataPortion, setName=setName)
|
||||
#plotNumberDistribution(workDir, dataSampleList, figDir, showPlot=False,
|
||||
# dataPortion=thisDataPortion, setName=setName)
|
||||
#plotVoidDistribution(workDir, dataSampleList, figDir, showPlot=False,
|
||||
# dataPortion=thisDataPortion, setName=setName)
|
||||
|
||||
print("\n Done!")
|
103
python_tools/void_pipeline/datasets/example_observation.py
Normal file
103
python_tools/void_pipeline/datasets/example_observation.py
Normal file
|
@ -0,0 +1,103 @@
|
|||
#!/usr/bin/env python
|
||||
#+
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/void_pipeline/datasets/example_observation.py
|
||||
# Copyright (C) 2010-2014 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2014 P. M. Sutter
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#+
|
||||
|
||||
import os
|
||||
import numpy as np
|
||||
from vide.backend.classes import *
|
||||
|
||||
# if True, will scan log files for last known completed state and run from there
|
||||
continueRun = False
|
||||
|
||||
# stages:
|
||||
# 1 : extract redshift slices from data
|
||||
# 2 : void extraction using zobov
|
||||
# 3 : removal of small voids and voids near the edge
|
||||
startCatalogStage = 1
|
||||
endCatalogStage = 3
|
||||
|
||||
basePath = os.path.dirname(os.path.abspath(__file__))
|
||||
basePath = os.path.abspath(os.path.join(basePath,"..","..","..","examples"))
|
||||
|
||||
# directory for input data files
|
||||
inputDataDir = basePath
|
||||
|
||||
# void catalog output directory
|
||||
workDir = os.path.join(basePath,"example_observation")
|
||||
|
||||
# output directory for log files
|
||||
logDir = os.path.join(basePath,"logs","example_observation")
|
||||
|
||||
# output directory for figures
|
||||
figDir = os.path.join(basePath,"figs","example_observation")
|
||||
|
||||
# optimization: maximum number of parallel threads to use
|
||||
numZobovThreads = 2
|
||||
|
||||
# optimization: number of subdivisions of the box
|
||||
numZobovDivisions = 2
|
||||
|
||||
# don't change this
|
||||
dataSampleList = []
|
||||
|
||||
# define your volume-limited samples
|
||||
newSample = Sample(
|
||||
# path to galaxy file is inputDataDir+dataFile
|
||||
dataFile = "example_observation.dat",
|
||||
|
||||
# full name for this sample
|
||||
fullName = "example_observation",
|
||||
|
||||
# a convenient nickname
|
||||
nickName = "exobs",
|
||||
|
||||
# don't change this
|
||||
dataType = "observation",
|
||||
|
||||
# assume sample is volume-limited?
|
||||
volumeLimited = True,
|
||||
|
||||
# HEALpix mask file
|
||||
maskFile = inputDataDir+"/example_observation_mask.fits",
|
||||
|
||||
# radial selection function (if not volume limited)
|
||||
selFunFile = None,
|
||||
|
||||
# max and min redshifts of galaxies in your sample
|
||||
zBoundary = (0.0, 0.15),
|
||||
|
||||
# max and min redshifts where you want to find voids
|
||||
zRange = (0.1, 0.15),
|
||||
|
||||
# leave this at -1 for mean particle separation,
|
||||
# or specify your own in Mpc/h
|
||||
minVoidRadius = -1,
|
||||
|
||||
# density of mock particles in cubic Mpc/h
|
||||
# (make this as high as you can afford)
|
||||
fakeDensity = 0.05,
|
||||
|
||||
# if true, convert to comoving space using LCDM cosmology
|
||||
useComoving = True
|
||||
|
||||
)
|
||||
dataSampleList.append(newSample)
|
||||
|
||||
# repeat the above block for any other samples
|
167
python_tools/void_pipeline/datasets/example_simulation.py
Normal file
167
python_tools/void_pipeline/datasets/example_simulation.py
Normal file
|
@ -0,0 +1,167 @@
|
|||
#+
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/void_pipeline/datasets/example_simulation.py
|
||||
# Copyright (C) 2010-2014 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2014 P. M. Sutter
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#+
|
||||
import os
|
||||
|
||||
PWD=os.getcwd()
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# -----------------------------------------------------------------------------
|
||||
# CONFIGURATION
|
||||
|
||||
# if True, will scan log files for last known completed state and run from there
|
||||
continueRun = False
|
||||
|
||||
# stages:
|
||||
# 1 : extract redshift slices from data
|
||||
# 2 : void extraction using zobov
|
||||
# 3 : removal of small voids and voids near the edge
|
||||
startCatalogStage = 1
|
||||
endCatalogStage = 3
|
||||
|
||||
# directory for the input simulation files
|
||||
catalogDir = PWD+"/examples/"
|
||||
|
||||
# void catalog output directory
|
||||
voidOutputDir = PWD+"/examples/example_simulation/"
|
||||
|
||||
# output directory for log files
|
||||
logDir = PWD+"/logs/example_simulation/"
|
||||
|
||||
# output directory for figures
|
||||
figDir = PWD+"/figs/example_simulation/"
|
||||
|
||||
# where to place the pipeline scripts
|
||||
scriptDir = PWD+"/example_simulation/"
|
||||
|
||||
# don't change
|
||||
dataType = "simulation"
|
||||
|
||||
# available formats for simulation: gadget, sdf, multidark
|
||||
dataFormat = "multidark"
|
||||
|
||||
# units of position in Mpc/h
|
||||
dataUnit = 1
|
||||
|
||||
# place particles on the lightcone (z-axis in sims)?
|
||||
useLightCone = False
|
||||
|
||||
# add peculiar velocities?
|
||||
doPecVel = False
|
||||
|
||||
# optimization: maximum number of parallel threads to use
|
||||
numZobovThreads = 2
|
||||
|
||||
# optimization: number of subdivisions of the box
|
||||
numZobovDivisions = 2
|
||||
|
||||
# prefix to give all outputs
|
||||
prefix = "sim_"
|
||||
|
||||
# how many independent slices along the z-axis?
|
||||
numSlices = 1
|
||||
|
||||
# how many subdivisions along the x- and y- axis?
|
||||
# ( = 2 will make 4 subvolumes for each slice, = 3 will make 9, etc.)
|
||||
numSubvolumes = 1
|
||||
|
||||
|
||||
###############################################################################
|
||||
# Particles
|
||||
|
||||
# common filename of particle files
|
||||
particleFileBase = "example_simulation_NNNN.dat"
|
||||
|
||||
# this flag will be replaced by values in fileNums list below
|
||||
particleFileDummy = 'NNNN'
|
||||
|
||||
# list of file numbers for the particle files
|
||||
fileNums = ["z0.0"]
|
||||
|
||||
# redshift of each file in the above fileNums list
|
||||
redshifts = ["0.0"]
|
||||
|
||||
# list of desired subsamples - these are in unts of h Mpc^-3!
|
||||
subSamples = [1.0]
|
||||
|
||||
# if True, do the subsampling in preparation (available for sdf and multidark)
|
||||
doSubSamplingInPrep = False
|
||||
|
||||
# if 'absolute', subSamples are given in particles per cubic Mpc/h
|
||||
# if 'relative', subSamples are given as a fraction of input particles
|
||||
subSampleMode = "relative"
|
||||
|
||||
# shift the z-coord of sims with redshift
|
||||
shiftSimZ = False
|
||||
|
||||
###############################################################################
|
||||
# Halos
|
||||
|
||||
# common filename of halo files, leave blank to ignore halos
|
||||
haloFileBase = ""
|
||||
#haloFileBase = "mf_4s_1G_1k_bgc2_NNNNN.sdf"
|
||||
|
||||
# this flag will be replaced by values in fileNums list above
|
||||
haloFileDummy = ''
|
||||
#haloFileDummy = 'NNNNN'
|
||||
|
||||
# minimum halo mass cuts to apply for the halo catalog
|
||||
# use "none" to get all halos
|
||||
minHaloMasses = []
|
||||
#minHaloMasses = ["none", 1.2e13]
|
||||
|
||||
# locations of data in the halo catalog
|
||||
haloFileMCol = 6 # mass
|
||||
haloFileXCol = 0 # x
|
||||
haloFileYCol = 1 # y
|
||||
haloFileZCol = 2 # z
|
||||
haloFileVXCol = 3 # v_x
|
||||
haloFileVYCol = 4 # v_y
|
||||
haloFileVZCol = 5 # v_z
|
||||
haloFileColSep = ',' # separator
|
||||
haloFileNumComLines = 0 # number of comments before data
|
||||
|
||||
|
||||
###############################################################################
|
||||
# simulation information
|
||||
|
||||
numPart = 1024*1024*1024
|
||||
lbox = 999.983 # Mpc/h
|
||||
omegaM = 0.2847979853038958
|
||||
hubble = 0.6962 # h_0
|
||||
|
||||
|
||||
###############################################################################
|
||||
# HOD
|
||||
|
||||
# each of the HOD sets will be applied to each halo catalog defined above
|
||||
hodParmList = [
|
||||
#{'name' : "LowRes", #BOSS: Manera et al. 2012, eq. 26
|
||||
# 'Mmin' : 0.0,
|
||||
# 'M1' : 1.e14,
|
||||
# 'sigma_logM' : 0.596,
|
||||
# 'alpha' : 1.0127,
|
||||
# 'Mcut' : 1.19399e13,
|
||||
# 'galDens' : 0.0002,
|
||||
#},
|
||||
]
|
||||
|
||||
# END CONFIGURATION
|
||||
# -----------------------------------------------------------------------------
|
||||
# -----------------------------------------------------------------------------
|
|
@ -1,26 +0,0 @@
|
|||
#+
|
||||
# VIDE -- Void IDentification and Examination -- ./python_tools/void_python_tools/partUtil/__init__.py
|
||||
# Copyright (C) 2010-2014 Guilhem Lavaux
|
||||
# Copyright (C) 2011-2014 P. M. Sutter
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#+
|
||||
|
||||
from catalogUtil import *
|
||||
from plotDefs import *
|
||||
from plotUtil import *
|
||||
from matchUtil import *
|
||||
from xcorUtil import *
|
||||
from profileUtil import *
|
Loading…
Add table
Add a link
Reference in a new issue