Initial import

This commit is contained in:
Guilhem Lavaux 2023-05-29 10:41:03 +02:00
commit 56a50eead3
820 changed files with 192077 additions and 0 deletions

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,16 @@
#+
# ARES/HADES/BORG Package -- ./scripts/ares_tools/__init__.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
from .read_all_h5 import *
from .analysis import analysis, IndexTracker
try:
from .visu import vtktools
except ImportError:
print("Skipping VTK tools")

View file

@ -0,0 +1,10 @@
#+
# ARES/HADES/BORG Package -- ./scripts/ares_tools/analysis/__init__.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
from .analysis import analysis, IndexTracker

View file

@ -0,0 +1,673 @@
#+
# ARES/HADES/BORG Package -- ./scripts/ares_tools/analysis/analysis.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
from ..read_all_h5 import explore_chain, rebuild_spliced_h5
import errno
import h5py as h5
import numpy as np
import healpy as hp
import numexpr as ne
import os
import math
from pylab import *
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, show
#import numba
'''
@numba.jit
def _special_spectrum_builder(P, PP, tmp, alpha, N_i, Pk_i):
for i in range(Pk_i.size):
for j in range(PP.size):
v = PP[j]**(-alpha)*exp(- N_i[i]*Pk_i[i] / (2 * PP[j]))
tmp[j] = v
total += v
for j in range(PP.size):
P[j] += tmp[j] / total
'''
#ARES/HADES/BORG image scanning class
class IndexTracker:
def __init__(self, ax, X):
self.ax = ax
self.ax.set_title('use up/down keys to navigate images')
self.X = X
rows,cols,self.slices = X.shape
self.ind = self.slices/2
cmax=X.max()
cmin=X.min()
self.im = self.ax.imshow(self.X[:,self.ind,:],vmax=cmax,vmin=cmin)
self.update()
def onscroll(self, event):
#print ("%s " % (event.key))
if event.key=='up':
self.ind = np.clip(self.ind+1, 0, self.slices-1)
else:
self.ind = np.clip(self.ind-1, 0, self.slices-1)
self.update()
def update(self):
self.im.set_data(self.X[:,self.ind,:])
self.ax.set_ylabel('slice %s'%self.ind)
self.im.axes.figure.canvas.draw()
def detect_ncpus(path):
ncpu = 0
try:
while True:
with open("%s_%d" % (path,ncpu), mode= "rb") as f:
ncpu += 1
except IOError as e:
if e.errno != errno.ENOENT:
raise e
return ncpu
#ARES/HADES/BORG analysis class
class analysis:
def __init__(self, chain_path='.', LSS_framework='ARES', start=0, step=1):
self.chain_path = chain_path
self.LSS_framework = LSS_framework
self.description = "This Class is part of the ARES/HADES/BORG analysis framework"
self.author = "Copyright (C) 2009-2017 Jens Jasche \n Copyright (C) 2014-2017 Guilhem Lavaux"
#get chain setup
self.L0=0
self.L1=0
self.L2=0
self.N0=0
self.N1=0
self.N2=0
self.x0=0
self.x1=0
self.x2=0
self.ncpus = detect_ncpus(os.path.join(self.chain_path, "restart.h5"))
self.mcmc_list=[]
Fmax=start
while True:
try:
os.stat("mcmc_%d.h5" % Fmax)
except:
break
self.mcmc_list.append(Fmax)
Fmax += step
with h5.File(os.path.join(self.chain_path, "restart.h5_0"), mode="r") as f:
#print markov.keys()
#print info.keys()
#print f['scalars'].keys()
self.L0 = f['scalars']['L0'][0]
self.L1 = f['scalars']['L1'][0]
self.L2 = f['scalars']['L2'][0]
self.N0 = int(f['scalars']['N0'][:])
self.N1 = int(f['scalars']['N1'][:])
self.N2 = int(f['scalars']['N2'][:])
self.xmin0 = int(f['scalars']['corner0'][:])
self.xmin1 = int(f['scalars']['corner1'][:])
self.xmin2 = int(f['scalars']['corner2'][:])
self.ncat = int(f['scalars']['NCAT'][:])
if(LSS_framework!='BORG'):
self.kmodes = f['/scalars/k_modes'][:]
self.nmodes = len(self.kmodes)
#get brefs
#self.k_keys = rebuild_spliced_h5(os.path.join(self.chain_path, "restart.h5"), ["scalars.k_keys"], self.ncpus)["scalars.k_keys"]
bref=[]
if LSS_framework != 'VIRBIUS' and LSS_framework != 'LYA':
for i in range(self.ncat):
bref.append(f['scalars']['galaxy_bias_ref_'+str(i)][:])
self.bias_ref=np.array(bref)
def _internal_power(self, P, Nbins, range, unit=False):
if not hasattr(self, '_k'):
ik0 = np.fft.fftfreq(self.N0, d=self.L0/self.N0)*2*np.pi
ik1 = np.fft.fftfreq(self.N1, d=self.L1/self.N1)*2*np.pi
ik2 = np.fft.fftfreq(self.N2, d=self.L2/self.N2)*2*np.pi
k = self._k = np.sqrt(ik0[:,None,None]**2+ik1[None,:,None]**2+ik2[None,None,:(self.N2//2+1)]**2)
self._Pw, _ = np.histogram(k, bins=Nbins, range=range)
Pw = self._Pw
else:
k = self._k
Pw = self._Pw
P, b = np.histogram(k, weights=P, bins=Nbins, range=range)
if not unit:
P /= self.L0*self.L1*self.L2
cond = Pw > 0
P[cond] /= Pw[cond]
return P, Pw, b
def rebin_power_spectrum(self, chain_id, Nbins=100, range=(0,1), unit=False):
with h5.File(os.path.join(self.chain_path, "mcmc_%d.h5" % (chain_id,)), mode="r") as f:
P = f['/scalars/powerspectrum'][...] * (self.L0 * self.L1 * self.L2)
return self._internal_power(P[self.k_keys], Nbins, range, unit=unit)
def compute_power_spectrum_mock(self, Nbins=100, unit=False, range=(0,1)):
with h5.File(os.path.join(self.chain_path, "mock_data.h5"), mode="r") as f:
shat = f['/scalars/s_hat_field'][...]
return self._internal_power(ne.evaluate('real(s)**2+imag(s)**2', dict(s=shat)), Nbins, range, unit=unit)
def compute_power_spectrum_galaxydata(self, Nbins=100, range=(0,1)):
with h5.File(os.path.join(self.chain_path, "mock_data.h5"), mode="r") as f:
# FFT galaxy data
dV = self.L0*self.L1*self.L2/(self.N0*self.N1*self.N2)
fd = np.fft.rfftn(f['/scalars/galaxy_data_0'][...])*dV
# remove zero mode
fd[0][0][0] = 0. + 0.j
return self._internal_power(ne.evaluate('real(s)**2+imag(s)**2', dict(s=fd)), Nbins, range)
def compute_power_shat_spectrum(self, chain_id, Nbins=100, unit=False, range=(0,1)):
dV = self.L0*self.L1*self.L2/(self.N0*self.N1*self.N2)
with h5.File(os.path.join(self.chain_path, "mcmc_%d.h5" % (chain_id,)), mode="r") as f:
if '/scalars/s_hat_field' in f:
shat = f['/scalars/s_hat_field'][...]
else:
shat = np.fft.rfftn(f['/scalars/s_field'][...])*dV
return self._internal_power(ne.evaluate('real(s)**2+imag(s)**2', dict(s=shat)), Nbins, range, unit=unit)
def compute_power_shat_cross_spectrum(self, chain_id1, chain_id2,
Nbins=100, unit=False, range=(0,1)):
dV = self.L0*self.L1*self.L2/(self.N0*self.N1*self.N2)
with h5.File(os.path.join(self.chain_path, "mcmc_%d.h5" % (chain_id1,)), mode="r") as f:
if '/scalars/s_hat_field' in f:
shat1 = f['/scalars/s_hat_field'][...]
else:
shat1 = np.fft.rfftn(f['/scalars/s_field'][...])*dV
with h5.File(os.path.join(self.chain_path, "mcmc_%d.h5" % (chain_id2,)), mode="r") as f:
if '/scalars/s_hat_field' in f:
shat2 = f['/scalars/s_hat_field'][...]
else:
shat2 = np.fft.rfftn(f['/scalars/s_field'][...])*dV
return self._internal_power(ne.evaluate('real(s1)*real(s2)+imag(s1)*imag(s2)', dict(s1=shat1,s2=shat2)), Nbins, range, unit=unit)
#return self._internal_power(ne.evaluate('real(s)**2+imag(s)**2', dict(s=shat)), Nbins, range, unit=unit)
# maybe 'unit' argument is not sensible here...
def compute_power_spectrum_finaldensity(self, chain_id, Nbins=100, unit=False, range=(0,1)):
dV = self.L0*self.L1*self.L2/(self.N0*self.N1*self.N2)
with h5.File(os.path.join(self.chain_path, "mcmc_%d.h5" % (chain_id,)), mode="r") as f:
fd = np.fft.rfftn(f['/scalars/BORG_final_density'][...]) * dV
return self._internal_power(ne.evaluate('real(s)**2+imag(s)**2', dict(s=fd)), Nbins, range, unit=unit)
# compute power spectrum of real-space field from given path and field name
def compute_power_shat_spectrum_file(self, path, fieldname='phases', Nbins=100, unit=False, range=(0,1)):
dV = self.L0*self.L1*self.L2/(self.N0*self.N1*self.N2)
with h5.File(path, mode="r") as f:
if fieldname in f:
# do FFT
shat = np.fft.rfftn(f[fieldname][...])*dV
# remove zero mode
shat[0][0][0] = 0. + 0.j
else:
print("No field '%s' found in file." % fieldname)
return self._internal_power(ne.evaluate('real(s)**2+imag(s)**2', dict(s=shat)), Nbins, range, unit=unit)
def check_biasref(self):
return self.bias_ref
def get_ncat(self):
return self.ncat
def get_mask(self,msknr):
selkey = "scalars.galaxy_sel_window_%s"%msknr
return \
rebuild_spliced_h5(
os.path.join(
self.chain_path,"restart.h5"
),
[selkey],
self.ncpus
)[selkey]
def get_data(self,datnr):
datkey = "scalars.galaxy_data_%s"%datnr
return \
rebuild_spliced_h5(
os.path.join(
self.chain_path,"restart.h5"
),
[datkey],
self.ncpus
)[datkey]
def scan_datacube(self,data):
fig = figure()
ax = fig.add_subplot(111)
plt.jet()
tracker = IndexTracker(ax, data)
fig.canvas.mpl_connect('key_press_event', tracker.onscroll)
show()
def get_2d_marginal(self,attribute_a='s_field',attribute_b='s_field',id_a=None,id_b=None, first_sample=0,last_sample=1000):
print( '-'*60)
print( 'Estimate 2d marginals for parameters ', attribute_a, ' and ', attribute_b , ' for ' , self.LSS_framework, ' run!')
print( '-'*60)
if(id_a==None or id_b==None):
print( "Error: no index chosen")
return -1
#2) collect chain
samples_a = []
samples_b = []
for i,a in explore_chain(self.chain_path, first_sample,last_sample, 1):
d = a[attribute_a][:]
e = a[attribute_b][:]
samples_a.append(d[id_a])
samples_b.append(e[id_b])
H, xedges, yedges = np.histogram2d(samples_a, samples_b)
return xedges,yedges, H
def get_cross_corcoeff(self,attribute_a='s_field',attribute_b='s_field',id_a=None,id_b=None, first_sample=0,last_sample=1000):
print( '-'*60)
print( 'Estimate 2d marginals for parameters ', attribute_a, ' and ', attribute_b , ' for ' , self.LSS_framework, ' run!')
print( '-'*60)
if(id_a==None or id_b==None):
print("Error: no index chosen")
return -1
#2) collect chain
samples_a = []
samples_b = []
nelements_a = len(id_a[0])
nelements_b = len(id_b[0])
mu_a = np.zeros(nelements_a)
var_a = np.zeros(nelements_a)
mu_b = np.zeros(nelements_b)
var_b = np.zeros(nelements_b)
nn=1
for i,a in explore_chain(self.chain_path, first_sample,last_sample, 1):
d = a[attribute_a][:]
e = a[attribute_b][:]
aux_a = d[id_a]
aux_b = e[id_b]
mu_a = (nn-1.)/float(nn)*mu_a +1./float(nn)*aux_a
if(nn>1):
aux = (mu_a-aux_a)**2
var_a = (nn-1.)/nn*var_a+1./(nn-1)*aux
mu_b = (nn-1.)/float(nn)*mu_b +1./float(nn)*aux_b
if(nn>1):
aux = (mu_b-aux_b)**2
var_b = (nn-1.)/nn*var_b+1./(nn-1)*aux
samples_a.append(aux_a)
samples_b.append(aux_b)
nn+=1
pc= np.zeros((nelements_a,nelements_b))
cnt=0
for n in range(nn-1):
x=samples_a[n]
y=samples_b[n]
pc += np.multiply.outer(x-mu_a, y-mu_b)
cnt+=1
return pc/float(cnt) #/np.sqrt(var_a*var_b)
def get_trace(self,attribute='s_field',element_id=None, first_sample=0,last_sample=1000):
print( '-'*60)
print( 'Record trace for parameters ', attribute , ' for ' , self.LSS_framework, ' run!')
print( '-'*60)
'''
if(element_id==None):
print "Error: no list of indices provided"
return -1
'''
#1) collect chain
samples = []
nn=1
for i,a in explore_chain(self.chain_path, first_sample,last_sample, 1):
d = a[attribute][:]
if (element_id!=None):
samples.append(d[element_id])
else:
samples.append(d)
nn+=1
return samples
def get_corrlength(self,attribute='s_field',element_id=None,nlength=100, first_sample=0,last_sample=1000):
print( '-'*60)
print( 'Estimate correlation length for parameters ', attribute , ' for ' , self.LSS_framework, ' run!')
print( '-'*60)
if(element_id==None):
print( "Error: no list of indices provided")
return -1
if(nlength>last_sample-first_sample):
print("Warning: Chain setting not long enough set nlength to last_sample")
nlength = last_sample-first_sample -1
nelements = len(element_id[0])
#1) calculate mean and variance
mu = np.zeros(nelements)
var = np.zeros(nelements)
#2) collect chain
samples = []
nn=1
for i,a in explore_chain(self.chain_path, first_sample,last_sample, 1):
d = a[attribute][:]
print( np.shape(d))
mu = (nn-1.)/float(nn)*mu +1./float(nn)*d[element_id]
if(nn>1):
aux = (mu-d[element_id])**2
var = (nn-1.)/nn*var+1./(nn-1)*aux
samples.append(d[element_id])
nn+=1
cl = np.zeros((nlength,nelements))
cl_count= np.zeros(nlength)
for i in range(nlength):
for j in range(len(samples)-i):
cl[i]+= (samples[j]-mu)*(samples[j+i]-mu)/var
cl_count[i] +=1.;
for i in range(nlength):
cl[i]/=cl_count[i]
return np.array(range(nlength)), cl
def print_job(self,msg):
print('-'*60)
print(msg)
print('-'*60)
def build_power_spectrum_chain(Nbins=256):
opts=dict(Nbins=Nbins,range=(0,self.kmodes.max()))
#FIXME: Do not use the first element
Pref = self.rebin_power_spectrum(self.mcmc_list[0], **opts)
try:
data = np.load("power_%s.npz" % suffix)
loc_names = names[len(data['P']):]
PP = list(data['P'])
except:
PP = []
loc_names = list(names)
print(loc_names)
if len(loc_names) == 0:
return
for i in pb.ProgressBar()(loc_names):
PP.append(ss.compute_power_shat_spectrum(i, **opts))
bins = 0.5*(Pref[2][1:]+Pref[2][:-1])
np.savez("power_%s.npz" % suffix, bins=bins, P=PP, startMC=startMC, Fmax=Fmax, Pref=Pref)
def spectrum_pdf(self, first_sample=0, last_sample=-1, sample_steps=10, gridsize=1000, Pmin=None, Pmax=None):
P = np.zeros((gridsize, Npk), dtype=np.float64)
if Pmin is None or Pmax is None:
P0m,P0M = np.inf,0
for i,a in explore_chain(self.chain_path, first_sample,last_sample, sample_steps):
P0m = a['/scalars/powerspectrum'].min()
P0M = a['/scalars/powerspectrum'].max()
Pb_m,Pb_M = min(P0m, Pb_m),max(P0M,Pb_M)
if Pmin is None:
Pmin = Pb_m
if Pmax is None:
Pmax = Pb_M
PP = Pmin*np.exp(np.arange(gridsize)*np.log(Pmax/Pmin))
N=0
prior=0
N_ib = 0.5*(self.Nk+prior)[None,:]
for i,a in explore_chain(self.chain_path, first_sample,last_sample, sample_steps):
Pk_i = a['/scalars/powerspectrum'][:]
N_i = self.Nk
_special_spectrum_builder(P, PP, tmp_PP, N_ib, N_i, Pk_i)
N += 1
P /= N
return P
def get_spherical_slice(self,vdata,nside=32, observer=np.array([0,0,0]),rslice = 150.):
def RenderSphere(VolumeData3D,image,rslice,observer,Larr,Narr):
print( "Rendering Sphere...")
NSIDE=hp.npix2nside(len(image))
idx=Larr[0]/Narr[0]
idy=Larr[1]/Narr[1]
idz=Larr[2]/Narr[2]
for ipix in range(len(image)):
#get direction of pixel and calculate unit vectors
dx,dy,dz=hp.pix2vec(NSIDE, ipix)
d = math.sqrt(dx * dx + dy * dy + dz * dz)
dx = dx / d; dy = dy / d; dz = dz / d # ray unit vector
rayX = observer[0]+rslice*dx; rayY = observer[1]+rslice*dy; rayZ = observer[2]+rslice*dz
rayX /= idx; rayY /= idy; rayZ /= idz
#find voxel inside box
ix = int(round(rayX))
iy = int(round(rayY))
iz = int(round(rayZ))
image[ipix]=np.nan
if ix > -1 and ix < Narr[0] \
or iy > -1 and iy < Narr[1] \
or iz > -1 and iz < Narr[2]:
jx = (ix+1) % Narr[0];
jy = (iy+1) % Narr[1];
jz = (iz+1) % Narr[2];
rx = (rayX - ix);
ry = (rayY - iy);
rz = (rayZ - iz);
qx = 1-rx;
qy = 1-ry;
qz = 1-rz;
val = VolumeData3D[ix,iy,iz] * qx * qy * qz +VolumeData3D[ix,iy,jz] * qx * qy * rz +VolumeData3D[ix,jy,iz] * qx * ry * qz +VolumeData3D[ix,jy,jz] * qx * ry * rz +VolumeData3D[jx,iy,iz] * rx * qy * qz +VolumeData3D[jx,iy,jz] * rx * qy * rz +VolumeData3D[jx,jy,iz] * rx * ry * qz +VolumeData3D[jx,jy,jz] * rx * ry * rz;
image[ipix]=val
print( '\r'+str(100 * ipix / (len(image) - 1)).zfill(3) + "%")
obs = np.array([observer[0]-self.xmin0,observer[1]-self.xmin1,observer[2]-self.xmin2])
Larr=np.array([self.L0,self.L1,self.L2])
Narr=np.array([self.N0,self.N1,self.N2])
image = np.zeros(hp.nside2npix(nside))
RenderSphere(vdata,image,rslice,obs,Larr,Narr)
return image
def mean_var_density(self, first_sample=0,last_sample=-1,sample_steps=10):
self.print_job('Estimate mean and variance of density fields for %s run!' % self.LSS_framework)
if(self.LSS_framework=='ARES'):
mu_i = np.zeros((self.N0,self.N1,self.N2))
var_i = np.zeros((self.N0,self.N1,self.N2))
nn=1
for i,a in explore_chain(self.chain_path, first_sample,last_sample, sample_steps):
d = a['s_field'][:]
mu_i = (nn-1.)/float(nn)*mu_i +1./float(nn)*d
if(nn>1):
aux = (mu_i-d)**2
var_i = (nn-1.)/nn*var_i+1./(nn-1)*aux
nn+=1
return mu_i, var_i
elif(self.LSS_framework=='HADES'):
mu_i = np.zeros((self.N0,self.N1,self.N2))
var_i = np.zeros((self.N0,self.N1,self.N2))
nn=1
for i,a in explore_chain(self.chain_path, first_sample,last_sample, sample_steps):
d = a['s_field'][:]
mu_i = (nn-1.)/float(nn)*mu_i +1./float(nn)*d
if(nn>1):
aux = (mu_i-d)**2
var_i = (nn-1.)/nn*var_i+1./(nn-1)*aux
nn+=1
return mu_i, var_i
else:
mu_i = np.zeros((self.N0,self.N1,self.N2))
mu_f = np.zeros((self.N0,self.N1,self.N2))
var_i = np.zeros((self.N0,self.N1,self.N2))
var_f = np.zeros((self.N0,self.N1,self.N2))
nn=1
for i,a in explore_chain(self.chain_path, first_sample,last_sample, sample_steps):
d = a['s_field'][:]
mu_i = (nn-1.)/float(nn)*mu_i +1./float(nn)*d
if(nn>1):
aux = (mu_i-d)**2
var_i = (nn-1.)/nn*var_i+1./(nn-1)*aux
d = a['BORG_final_density'][:]
mu_f = (nn-1.)/float(nn)*mu_f +1./float(nn)*d
if(nn>1):
aux = (mu_f-d)**2
var_f = (nn-1.)/nn*var_f+1./(nn-1)*aux
nn+=1
return mu_i, var_i, mu_f, var_f
def mean_var_spec(self, first_sample=0,last_sample=-1,sample_steps=10):
self.print_job('Estimate mean and variance of density fields for %s run!' % self.LSS_framework)
if(self.LSS_framework=='ARES'):
mu = np.zeros(self.nmodes)
var = np.zeros(self.nmodes)
nn=1
for i,a in explore_chain(self.chain_path, first_sample,last_sample, sample_steps):
d = a['/scalars/powerspectrum'][:]
mu = (nn-1.)/float(nn)*mu +1./float(nn)*d
if(nn>1):
aux = (mu-d)**2
var = (nn-1.)/nn*var+1./(nn-1)*aux
nn+=1
return self.kmodes,mu, var
elif(self.LSS_framework=='HADES'):
mu = np.zeros(self.nmodes)
var = np.zeros(self.nmodes)
nn=1
for i,a in explore_chain(self.chain_path, first_sample,last_sample, sample_steps):
d = a['/scalars/powerspectrum'][:]
mu = (nn-1.)/float(nn)*mu +1./float(nn)*d
if(nn>1):
aux = (mu-d)**2
var = (nn-1.)/nn*var+1./(nn-1)*aux
nn+=1
return self.kmodes,mu, var
else:
mu = np.zeros(self.nmodes)
var = np.zeros(self.nmodes)
nn=1
for i,a in explore_chain(self.chain_path, first_sample,last_sample, sample_steps):
d = a['/scalars/powerspectrum'][:]
mu = (nn-1.)/float(nn)*mu +1./float(nn)*d
if(nn>1):
aux = (mu-d)**2
var = (nn-1.)/nn*var+1./(nn-1)*aux
nn+=1
return self.kmodes,mu, var

View file

@ -0,0 +1,495 @@
#+
# ARES/HADES/BORG Package -- ./scripts/ares_tools/read_all_h5.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
import os
import numpy as np
import numexpr as ne
import h5py as h5
def isnotebook():
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
try:
import tqdm
if isnotebook():
u_tqdm = tqdm.tqdm_notebook
else:
u_tqdm = tqdm.tqdm
def progress(iterator):
L = list(iterator)
return u_tqdm(L)
except:
def progress(iterator):
for i, q in enumerate(iterator):
if ((i % 100) == 0):
print(i)
yield q
def default_slicer(x): return x[...]
class Bunch(object):
def __init__(self, **kwds):
self.__dict__.update(**kwds)
def __del__(self):
if hasattr(self, '_lazy') and self._lazy:
# close the file/group
if isinstance(self._group, h5.File):
print("Closing HDF5 file")
try:
self._group.close()
except:
# Eat all exceptions
pass
def insert(self, aname, data):
if len(aname) == 1:
self.__dict__.update({aname[0]: data})
else:
if aname[0] in self.__dict__:
k = self.__dict__[aname[0]]
else:
k = Bunch()
self.__dict__.update({aname[0]: k})
k.insert(aname[1:], data)
def read_group(g, lazy=False):
m = {}
for k in g:
if hasattr(g[k], 'keys'):
m[k] = read_group(g[k], lazy=lazy)
else:
if lazy:
m[k] = g[k]
else:
m[k] = g[k][:]
if lazy:
m['_group'] = g
m['_lazy'] = lazy
return Bunch(**m)
def read_attr_group(g, ename, bunch, slicer=default_slicer):
for a in ename:
g = g[a]
if bunch == None:
bunch = Bunch()
bunch.insert(ename, slicer(g))
return bunch
def read_all_h5(fname, lazy=False):
if lazy:
f = h5.File(fname, mode="r")
return read_group(f, lazy=True)
else:
with h5.File(fname, mode="r") as f:
return read_group(f, lazy=False)
def read_attr_h5(fname, egroup, slicer=default_slicer):
with h5.File(fname, mode="r") as f:
b = None
for e in egroup:
b = read_attr_group(f, e, b, slicer=slicer)
return b
def grabber(obj, e):
if len(e) == 1:
return getattr(obj, e[0])
else:
return grabber(getattr(obj, e[0]), e[1:])
def rebuild_spliced_h5(path, element_list, ncpu, verbose=False, flex_cpu=False):
"""Rebuild a set of fields which are spliced across different hdf5 files.
The use of this function is typically dedicated to the analysis of MPI run.
Parameters
----------
path : string
base path for the set of hdf5 file. A suffix "_[CPUID]" will be appended.
element_list : list of string
list of elements to rebuild from the set of files
ncpu : int
number of cpus for the run
verbose : boolean
if True the code will run with verbose output
flex_cpu : boolean
if True, ncpu is understood as the maximum number of cpus. If the code does
find a file then it stops the rebuilding without failing.
Returns
-------
dictionnary
each element name is a key, and the value is a numpy array. The arrays are concatenated
according to their first axis.
"""
b = [None for e in element_list]
egrab = [e.split('.') for e in element_list]
for cpu in range(ncpu):
fname = path + "_%d" % cpu
if verbose:
print("Loading CPU file '%s'" % fname)
try:
a = read_attr_h5(fname, egrab)
except OSError:
if flex_cpu:
break
raise
for j, e in enumerate(egrab):
if b[j] is None:
b[j] = grabber(a, e)
else:
b[j] = np.append(b[j], grabber(a, e), axis=0)
dtype = [(e, t[0].dtype, t[0].shape) for e, t in zip(element_list, b)]
arr = {}
for e, q in zip(element_list, b):
arr[e] = q
return arr
def chain_iterator(path, start=0, step=1, err_up=0, end=-1, prefix="mcmc", need_id=False):
import os
i = start
while True:
fname = os.path.join(path, "%s_%d.h5" % (prefix,i,))
try:
os.stat(fname)
except IOError:
if (i >= err_up):
return
else:
i += step
continue
if need_id:
yield (i,fname)
else:
yield fname
i += step
if end > 0 and i > end:
break
def read_chain_h5(path, element_list, start=0, step=1, err_up=0, slicer=default_slicer, prefix="mcmc", flexible=True):
"""
read_chain_h5(path,element_list,start=0,step=1,err_up=0,slicer=default_slicer)
Arguments:
* path: path where you have the chain (mcmc_* files)
* element_list: list of strings where the MCMC objects are stored. For example, you
have scalars.galaxy_nmean_0 for the nbar parameter of the first catalog.
* start: the first element of the chain to consider
* step: if you want to thin the chain by "step"
* err_up: whether to accept I/O errors when opening files up to the specified MCMC id
* slicer: a lambda function that can only take a subset of the array of the
specified MCMC object. For example, it can be lambda d: d[:,:,64], to indicate only
the plane 64 in the 3d grid that is being loaded. [64,...]
Returns:
* a columned numpy array "a". You have one column for each element_id of the
element_list. You can access one of the column like this:
a["scalars.galaxy_nmean_0"]
"""
i = start
b = [[] for e in element_list]
egrab = [e.split('.') for e in element_list]
for fname in progress(chain_iterator(path, start=start, step=step, err_up=err_up, prefix=prefix)):
try:
a = read_attr_h5(fname, egrab, slicer=slicer)
except OSError:
if not flexible:
raise
else:
break
for j, e in enumerate(egrab):
b[j].append(grabber(a, e))
dtype = [(e, t[0].dtype, t[0].shape) for e, t in zip(element_list, b)]
arr = np.empty(len(b[0]), dtype=dtype)
for e, q in zip(element_list, b):
arr[e] = q
return arr
def chain_compute_xcor(path, start=0, Nbins=100):
import cosmotool as ct
import numexpr as ne
i = 0
with h5.File(os.path.join(path, "restart.h5_0"), mode="r") as f:
L0 = f['scalars']['L0'][:]
L1 = f['scalars']['L1'][:]
L2 = f['scalars']['L2'][:]
N0 = int(f['scalars']['N0'][:])
N1 = int(f['scalars']['N1'][:])
N2 = int(f['scalars']['N2'][:])
ix = np.fft.fftfreq(N0, d=1.0/L0)[:, None, None]
iy = np.fft.fftfreq(N1, d=1.0/L1)[None, :, None]
iz = np.fft.fftfreq(N2, d=1.0/L2)[None, None, :]
r2 = ne.evaluate('sqrt(ix**2+iy**2+iz**2)')
rmax = r2.max()
ir = (r2 * Nbins / rmax).astype(np.int32).ravel()
xi = []
W = np.bincount(ir, minlength=Nbins)
fft = ct.CubeFT(L0, N0)
while True:
try:
if i % 10 == 0:
print(i)
fname = os.path.join(path, "mcmc_%d.h5" % (i+start))
with h5.File(fname, mode="r") as f:
fft.density = f["scalars"]["s_field"][:]
fft.rfft()
ne.evaluate("complex(real(d)**2 + imag(d)**2, 0)",
local_dict={'d': fft.dhat}, out=fft.dhat, casting='unsafe')
fft.irfft()
xi.append(np.bincount(
ir, weights=fft.density.ravel(), minlength=Nbins))
i += 1
except Exception as e:
print(repr(e))
break
xi = np.array(xi) / W
r = np.arange(Nbins) * rmax / Nbins
return r, xi
def explore_chain(path, start, end=-1, step=1, quiet=True):
"""
Arguments:
* path
* start
* end
* step
Returns:
* iterator with hdf5 object. Example:
for i in explore_chain(".", 0):
mean = i['/scalars/galaxy_nmean_0'][0]
# Then do stuff with "mean"
"""
n = int(start)
nmax = int(end)
step = int(step)
k = 0
while (nmax == -1) or (n < nmax):
p = path + "/mcmc_%d.h5" % n
if not quiet and (k % 100) == 0:
print("%d" % n)
try:
f = h5.File(p, mode="r")
except Exception as e:
print(e)
break
try:
yield n, f['scalars']
finally:
f.close()
n += step
k += 1
def build_power_histogram(path, start=0, step=1, Nhisto=100, quiet=True, logP=True, Prange=(0.1, 1e5)):
"""
Use the scalars.powerspectrum mcmc element to build the PDF of the posterior
powerspectrum
Arguments:
* path
* start
* step
* Nhisto: number of bins for each k mode of the P(k) histogram
* quiet:
* logP: whether you want to use a log scale for plotting
* Prange: a tuple for giving the entire P range to represent
Returns:
* a tuple: t=(kmodes, Pgrid, Pk_pdf_values) which is directly usable in pcolormesh
like pcolormesh(*t)
"""
# print path+ "/restart.h5_0"
for _, scalars in explore_chain(path, start, end=start+1, step=1, quiet=quiet):
Nk = scalars["powerspectrum"].size
with h5.File(path + "/restart.h5_0", mode="r") as f:
k_mode = f["/scalars/k_modes"][:]
Phisto = np.zeros((Nk, Nhisto), dtype=np.int)
if logP:
logPmin = np.log10(Prange[0])
DeltaLog = np.log10(Prange[1]/Prange[0])/(Nhisto)
def transform(P): return ((np.log10(P)-logPmin)/DeltaLog)
else:
Pmin = Prange[0]
DeltaP = (Prange[1]-Prange[0])/(Nhisto)
def transform(P): return (P-Pmin)/DeltaP
for n, scalars in explore_chain(path, start, step=step, quiet=quiet):
P = scalars["powerspectrum"][:]
iP = np.floor(transform(P))
ik = np.where((np.isnan(iP) == False)*(iP >= 0)*(iP < Nhisto))
iP = iP[ik].astype(np.int)
for i, j in zip(ik[0], iP):
Phisto[i, j] = Phisto[i, j]+1
k_mode = k_mode[:, None].repeat(Nhisto, axis=1)
if logP:
Pg = 10**((np.arange(Nhisto)+0.5)*DeltaLog + logPmin)
else:
Pg = ((np.arange(Nhisto)+0.5)*DeltaP + Pmin)
Pg = Pg[None, :].repeat(Nk, axis=0)
return k_mode, Pg, Phisto
def read_chain_complex_avg_dev(path, op, start=0, end=-1, do_dev=False, step=1, slicer=default_slicer, prefix="mcmc"):
"""
Compute mean and standard deviation of the given element_list
Arguments:
* path
* op:
* element_list
* start
* do_dev: boolean for computing the standard deviation (or not)
* slicer:
Returns:
* a columned numpy array. Each column has a name that corresponds to an element with an additional dimension. For example, a['scalars.galaxy_nmean_0'][0] -> mean,
a['scalars.galaxy_nmean_0'][1] is the standard deviation.
"""
i = 0
b = None
bd = None
try:
for fname in progress(chain_iterator(path, start=start, step=step, end=end, prefix=prefix)):
with h5.File(fname, mode="r") as ff:
if b is None:
b = op(ff)
if do_dev:
bd = np.zeros(b.shape)
else:
data = op(ff)
ne.evaluate('r*a+k*c',
dict(k=1/float(i+1),
r=(float(i)/float(i+1)),
a=b,
c=data),
out=b)
if do_dev and i > 1:
ne.evaluate('k*(xn-mun)**2 + f*bdn',
dict(k=1/float(i),
f=float(i)/float(i+1),
xn=data,
mun=b,
bdn=bd),
out=bd)
i += 1
except OSError:
pass
bd = np.sqrt(bd)
return b, bd
def read_chain_avg_dev(path, element_list, start=0, end=-1, do_dev=False, operator=lambda x: x, step=1, slicer=default_slicer, prefix="mcmc", err_up=0):
"""
Compute mean and standard deviation of the given element_list
Arguments:
* path
* element_list
* start
* do_dev: boolean for computing the standard deviation (or not)
* operator: applies the operator on all the elements before computing the mean and
standard deviation.
* slicer:
Returns:
* a columned numpy array. Each column has a name that corresponds to an element with an additional dimension. For example, a['scalars.galaxy_nmean_0'][0] -> mean,
a['scalars.galaxy_nmean_0'][1] is the standard deviation.
"""
i = 0
b = [None for e in element_list]
bd = [None for e in element_list]
egrab = [e.split('.') for e in element_list]
for fname in progress(chain_iterator(path, start=start, step=step, end=end, err_up=err_up, prefix=prefix)):
a = read_attr_h5(fname, egrab, slicer=slicer)
for j, e in enumerate(egrab):
if b[j] is None:
b[j] = operator(grabber(a, e))
if do_dev:
bd[j] = np.zeros(b[j].shape)
else:
data = operator(grabber(a, e))
ne.evaluate('r*a+k*c',
dict(k=1/float(i+1),
r=(float(i)/float(i+1)),
a=b[j],
c=data),
out=b[j])
if do_dev and i > 0:
ne.evaluate('k*(xn-mun)**2 + f*bdn',
dict(k=1/float(i),
f=float(i)/float(i+1),
xn=data,
mun=b[j],
bdn=bd[j]),
out=bd[j])
i+=1
dtype = [(e, t.dtype, t.shape) for e, t in zip(element_list, b)]
arr = np.empty(2 if do_dev else 1, dtype=dtype)
for e, q, q2 in zip(element_list, b, bd):
arr[e][0] = q
if do_dev:
arr[e][1] = np.sqrt(q2)
return arr

View file

@ -0,0 +1,9 @@
#+
# ARES/HADES/BORG Package -- ./scripts/ares_tools/visu/__init__.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+

View file

@ -0,0 +1,109 @@
#+
# ARES/HADES/BORG Package -- ./scripts/ares_tools/visu/vtktools.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
import numpy as np
from vtk.util.numpy_support import get_vtk_array_type, create_vtk_array, get_numpy_array_type
def numpy_scalar_to_vtk(np_array):
"""This function converts a numpy scalar array to a VTK array.
Args:
np_array (np.array): A numpy array
Returns:
vtk.vtkArray: An array of the closest possible type of numpy array. The array is deep
copied to avoid SEGV.
"""
atype = get_vtk_array_type(np_array.dtype)
array = create_vtk_array(atype)
array.SetNumberOfComponents(1)
ntype = get_numpy_array_type(atype)
adata = np.ravel(np_array).astype(ntype)
array.SetVoidArray(adata, len(adata), 1)
copy = array.NewInstance()
copy.DeepCopy(array)
return copy
def numpy_vector_to_vtk(np_array):
"""This function converts a numpy scalar array to a VTK array.
Args:
np_array (np.array): A numpy array
Returns:
vtk.vtkArray: An array of the closest possible type of numpy array. The array is deep
copied to avoid SEGV.
"""
if np_array.shape[3] != 3:
raise ValueError()
atype = get_vtk_array_type(np_array.dtype)
array = create_vtk_array(atype)
array.SetNumberOfComponents(3)
ntype = get_numpy_array_type(atype)
adata = np.ravel(np_array).astype(ntype)
array.SetVoidArray(adata, len(adata), 1)
copy = array.NewInstance()
copy.DeepCopy(array)
return copy
def smooth_array(a, L=[1.0,1.0,1.0], R=0.1):
a_hat = np.fft.rfftn(a)
ik = [np.fft.fftfreq(iN, d=iL/iN)*2*np.pi for iN,iL in zip(a.shape,L)]
k2 = ik[0][:,None,None]**2 + ik[1][None,:,None]**2 + ik[2][None,None,:a.shape[2]/2+1]**2
a_hat *= np.exp(-0.5*k2*R**2)
return np.fft.irfftn(a_hat)
def displacement_array(a, L=[1.0,1.0,1.0], R=0.1):
a_hat = np.fft.rfftn(a)
ik = [np.fft.fftfreq(iN, d=iL/iN)*2*np.pi for iN,iL in zip(a.shape,L)]
k2 = ik[0][:,None,None]**2 + ik[1][None,:,None]**2 + ik[2][None,None,:a.shape[2]/2+1]**2
b = np.empty(a.shape + (3,), dtype=np.float32)
b_hat = -a_hat * 1j*ik[0][:,None,None]/k2
b_hat[0,0,0]=0
b[...,0] = np.fft.irfftn(b_hat)
b_hat = -a_hat * 1j*ik[1][None,:,None]/k2
b_hat[0,0,0]=0
b[...,1] = np.fft.irfftn(b_hat)
b_hat = -a_hat * 1j*ik[2][None,None,:a.shape[2]/2+1]/k2
b_hat[0,0,0]=0
b[...,2] = np.fft.irfftn(b_hat)
return b
def setupImageData3D(img_data, np_array, dims=[1.0,1.0,1.0], name="numpy array"):
"""This function setups a 3D image data object.
"""
shape = np_array.shape
dx = (d0/(N0-1) for d0,N0 in zip(dims,shape))
img_data.SetOrigin(*(-d0/2 for d0 in dims)) # default values
img_data.SetSpacing(*dx)
img_data.SetDimensions(*shape) # number of points in each direction
array = numpy_scalar_to_vtk(np_array)
img_data.GetPointData().AddArray(array)
array.SetName(name)
return array

View file

@ -0,0 +1,85 @@
#+
# ARES/HADES/BORG Package -- ./scripts/check_gradients.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
import matplotlib
matplotlib.use('Agg')
from ares_tools import read_all_h5
import pylab as plt
from sys import argv
file='dump.h5'
if (len(argv) > 1):
file = argv[1]
print('Reading from %s.' % file)
g=read_all_h5(file)
#ss=16
#step=10
ss=4#8*8
step=5
prior = g.scalars.gradient_array_prior[::ss,:,:].flatten()
prior_ref = g.scalars.gradient_array_prior[::ss,:,:].flatten()
dpr_adj_re= prior.real
dpr_ref_re= prior_ref.real
dpr_adj_im= prior.imag
dpr_ref_im= prior_ref.imag
lh = g.scalars.gradient_array_lh[::ss,:,:].flatten()
lh_ref = g.scalars.gradient_array_lh_ref[::ss,:,:].flatten()
dlh_adj_re= lh.real
dlh_ref_re= 1*lh_ref.real
dlh_adj_im= lh.imag
dlh_ref_im= 1*lh_ref.imag
fig = plt.figure(figsize=(12, 6))
fig.subplots_adjust(left=0.08, right=0.98, bottom=0.1, top=0.95, wspace=0.25, hspace=0.16)
ax1=plt.subplot(2,2,1) # left subplot in top row
plt.axhline(0.0, color='black', linestyle=':')
plt.plot(dpr_adj_re[::step],'ro',markersize=5.)
plt.plot(dpr_ref_re[::step],color='blue')
ax1.yaxis.get_major_formatter().set_powerlimits((-2, 2))
ax1.xaxis.set_ticklabels('')
plt.ylabel('dPSI_prior_real')
ax2=plt.subplot(2,2,2) # right subplot in top row
plt.axhline(0.0, color='black', linestyle=':')
gg,=plt.plot(dpr_adj_im[::step],'ro',markersize=5.)
rr,=plt.plot(dpr_ref_im[::step],color='blue')
ax2.legend((gg,rr),('gradient','finite diff'))
ax2.yaxis.get_major_formatter().set_powerlimits((-2, 2))
ax2.xaxis.set_ticklabels('')
plt.ylabel('dPSI_prior_imag')
ax3=plt.subplot(2,2,3) # left subplot in bottom row
plt.axhline(0.0, color='black', linestyle=':')
plt.plot(dlh_adj_re[::step],'ro',markersize=5.)
plt.plot(dlh_ref_re[::step],color='blue')
ax3.yaxis.get_major_formatter().set_powerlimits((-2, 2))
plt.xlabel('voxel ID')
plt.ylabel('dPSI_likelihood_real')
ax4=plt.subplot(2,2,4) # right subplot in bottom row
plt.axhline(0.0, color='black', linestyle=':')
plt.plot(dlh_adj_im[::step],'ro',markersize=5.)
plt.plot(dlh_ref_im[::step],color='blue')
ax4.yaxis.get_major_formatter().set_powerlimits((-2, 2))
plt.xlabel('voxel ID')
plt.ylabel('dPSI_likelihood_imag')
plt.savefig('check_gradient.png')
#plt.scatter(dpr_adj_re,dpr_ref_re)
#plt.show()

View file

@ -0,0 +1,78 @@
#+
# ARES/HADES/BORG Package -- ./scripts/check_likelihood_trace.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
import read_all_h5
import h5py as h5
from pylab import *
from analysis.analysis import *
#This routine calculates the log-likelihood for a given catalog
def psi_lh(delta,Nobs,selection,nmean,bias,rho_g,eps_g):
print 'calculating log likelihood...'
#only calculate for values inside observed domain
foo=np.where(selection>0.)
delta=delta[foo]
rho=1.+delta+1e-12
Nobs=Nobs[foo]
selection=selection[foo]
lamb = selection*nmean*(rho**bias)*np.exp(-rho_g*(rho**(-eps_g)))
aux= lamb - Nobs*(np.log(selection*nmean)+bias*np.log(rho)-rho_g*(rho**(-eps_g)))
print 'done!'
return np.sum(aux)
chain_path="/scratch/jasche/panphasia_run_pm/"
ares=analysis(chain_path=chain_path,LSS_framework='BORG')
ncat=ares.get_ncat()
#load data and masks
mask=[]
nobs=[]
for i in range(ncat):
print 'load data of catalog Nr.:',i,'...'
mask.append(ares.get_mask_spliced(i,ncpu=32))
nobs.append(ares.get_data_spliced(i,ncpu=32))
print 'done!'
#open sample
hh=[]
xx=[]
for l in range(822,1350,1):
#set log likelihood to zero
H=0
#open file and get data for smaple
with h5.File(chain_path + 'mcmc_'+str(l)+'.h5', mode="r") as f:
delta = f['scalars']['BORG_final_density'][:]
haux=np.zeros(ncat+1)
for i in range(ncat):
nmean = f['scalars']['galaxy_nmean_' +str(i)][:]
bias = f['scalars']['galaxy_bias_' +str(i)][:]
rho_g = f['scalars']['galaxy_rho_g_' +str(i)][:]
eps_g = f['scalars']['galaxy_eps_g_' +str(i)][:]
print nmean,bias,rho_g,eps_g
haux[i] = psi_lh(delta,nobs[i],mask[i] ,nmean,bias,rho_g,eps_g)
haux[ncat]+=haux[i]
print nmean,bias,rho_g,eps_g,haux[i]
hh.append(haux)
xx.append(l)
hha=np.array(hh)
xxa=np.array(xx)
print l
np.savez('lh_trace_pm',hh=hha,xx=xxa)
plt.plot(xxa,hha)
plt.show()

View file

@ -0,0 +1,63 @@
#+
# ARES/HADES/BORG Package -- ./scripts/dump_initial_field.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
import pylab as plt
import h5py
import numpy as np
from read_all_h5 import explore_chain
chain_path="."
f = h5py.File('restart.h5_0', "r+")
print list(f['/scalars'])
xmin0 = f['/scalars/corner0'][:]
xmin1 = f['/scalars/corner1'][:]
xmin2 = f['/scalars/corner2'][:]
L0 = f['/scalars/L0'][:]
L1 = f['/scalars/L1'][:]
L2 = f['/scalars/L2'][:]
cosmology=f['/scalars/cosmology'][:]
xmin=np.array([xmin0,xmin1,xmin2])
L=np.array([L0,L1,L2])
outdir ='/scratch/jasche/'
for i,a in explore_chain(chain_path, 5000,8900, 10):
d = a['s_field'][:]
fname = outdir+'borg_ic_2m++_'+ str(i)
print "Saving file : ", fname+'.npz'
np.savez(fname,
ICfield = d,
BoxLength = L,
posmin = xmin,
omega_r = cosmology[0][0],
omega_k = cosmology[0][1],
omega_m = cosmology[0][2],
omega_b = cosmology[0][3],
omega_q = cosmology[0][4],
w = cosmology[0][5],
n_s = cosmology[0][6],
wprime = cosmology[0][7],
sigma8 = cosmology[0][8],
h100 = cosmology[0][10],
beta = cosmology[0][11])
data = np.load(fname+'.npz')
print data.keys()
plt.imshow(data['ICfield'][:,:,128])
plt.show()

View file

@ -0,0 +1,111 @@
#+
# ARES/HADES/BORG Package -- ./scripts/ini_generator/gen_subcat_conf.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
import sys
import os.path
import argparse
pp = argparse.ArgumentParser()
pp.add_argument('--output', type=str, required=True)
pp.add_argument('--configs', type=str, required=True)
pp.add_argument('--header', type=str, required=True)
args = pp.parse_args()
out_ini= args.output
all_config_files=args.configs.split(':')
subcat_id=0
PATTERN="""datafile=%(catalog)s
maskdata=%(mask)s
"""
def apply_cut_magnitude(CAT_config, j):
CUT_PATTERN="""galaxy_bright_absolute_magnitude_cut=%(absmag_bright)15.15lf
galaxy_faint_absolute_magnitude_cut=%(absmag_faint)15.15lf
"""
Nsubcat = CAT_config['num_subcat']
DeltaMag = (CAT_config['absmag_max'] - CAT_config['absmag_min'])
MagMin = CAT_config['absmag_min']
absmag_bright = DeltaMag * j / Nsubcat + MagMin
absmag_faint = DeltaMag * (j+1) / Nsubcat + MagMin
f.write(CUT_PATTERN % {'absmag_bright':absmag_bright,'absmag_faint':absmag_faint})
def apply_cut_distance(CAT_config, j):
CUT_PATTERN="""file_dmin=%(dmin)15.15lf
file_dmax=%(dmax)15.15lf
"""
Nsubcat = CAT_config['num_subcat']
DeltaMag = (CAT_config['d_max'] - CAT_config['d_min'])
MagMin = CAT_config['d_min']
dmin = DeltaMag * j / Nsubcat + MagMin
dmax = DeltaMag * (j+1) / Nsubcat + MagMin
f.write(CUT_PATTERN % {'dmin':dmin,'dmax':dmax})
def execfile(filename, globals=None, locals=None):
if globals is None:
globals = sys._getframe(1).f_globals
if locals is None:
locals = sys._getframe(1).f_locals
with open(filename, "r") as fh:
exec(fh.read()+"\n", globals, locals)
with open(out_ini, mode="wt") as f:
with open(args.header, mode="rt") as fh:
f.write(fh.read())
f.write("\n")
print("All configs = %r" % all_config_files)
for config_file in all_config_files:
path_config,_ = os.path.split(config_file)
def file_subs(s):
return os.path.join(path_config,s)
config_locals={}
config_globals={'FILE':file_subs}
print("Analyze %s" % config_file)
execfile(config_file, config_globals, config_locals)
CAT_config = config_locals['CONFIG']
del config_locals['CONFIG']
CAT_config['catalog'] = os.path.join(path_config,CAT_config['catalog'])
if CAT_config['cutter']=='magnitude':
cut_function = apply_cut_magnitude
elif CAT_config['cutter']=='distance':
cut_function = apply_cut_distance
else:
print("Unknown cutter '%s'" % CAT_config['cutter'])
sys.exit(1)
Nsubcat = CAT_config['num_subcat']
for j in range(Nsubcat):
f.write("[catalog_%(subcat_id)d]\n" % {'subcat_id':subcat_id})
for k,v in config_locals.items():
if type(v)==str:
f.write("%s=%s\n" % (k,v))
elif type(v)==tuple:
if len(v) > 0:
f.write((("%s=" + "%r,"*len(v)) % ((k,) + v))[:-1] + "\n")
else:
f.write("%s=%r\n" % (k,v))
cut_function(CAT_config, j)
f.write(PATTERN % CAT_config)
if (j==CAT_config.get('ref_subcat',-1)):
f.write("refbias=true\n")
else:
f.write("refbias=false\n")
f.write("\n")
subcat_id += 1
f.write("[run]\nNCAT=%d\n\n" % subcat_id)

View file

@ -0,0 +1,44 @@
[system]
console_output=logares.txt
VERBOSE_LEVEL = 2
N0 = 256
N1 = 256
N2 = 256
L0 = 600
L1 = 600
L2 = 600
corner0 = -300
corner1 = -300
corner2 = -300
NUM_MODES=100
N_MC=10000
test_mode=true
# If true, the initial power spectrum of the chain is set to the cosmological one
seed_cpower=true
# Indicate which samplers should be blocked for testing purposes
#messenger_signal_blocked=false
#power_sampler_a_blocked=false
#power_sampler_b_blocked=false
#bias_sampler_blocked=false
[cosmology]
omega_r = 0
omega_k = 0
omega_m = 0.30
omega_b = 0.045
omega_q = 0.70
w = -1
wprime = 0
n_s = 1
sigma8 = 0.80
h100 = 0.65
beta = 0.51
z0 = 0

View file

@ -0,0 +1,25 @@
#+
# ARES/HADES/BORG Package -- ./scripts/ini_generator/template_sdss_main.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
CONFIG=dict(
absmag_min=-23.,absmag_max = -17.,
num_subcat=6,catalog='sdss_ares_cat.txt',
mask='SDSSDR7MASK_4096.fits', ref_subcat=0,
cutter='magnitude'
)
radial_selection = 'schechter'
schechter_mstar = -20.44
schechter_alpha = -1.05
schechter_sampling_rate = 2000
schechter_dmax = 1000
bias = 1
nmean = 1
galaxy_bright_apparent_magnitude_cut = 13.5
galaxy_faint_apparent_magnitude_cut = 17.6

69
scripts/lic_plot/lic.py Normal file
View file

@ -0,0 +1,69 @@
#+
# ARES/HADES/BORG Package -- ./scripts/lic_plot/lic.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
import numpy as np
def lic_flow(vectors,len_pix=10):
vectors = np.asarray(vectors)
m,n,two = vectors.shape
if two!=2:
raise ValueError
result = np.zeros((2*len_pix+1,m,n,2),dtype=np.int32) # FIXME: int16?
center = len_pix
result[center,:,:,0] = np.arange(m)[:,np.newaxis]
result[center,:,:,1] = np.arange(n)[np.newaxis,:]
for i in range(m):
for j in range(n):
y = i
x = j
fx = 0.5
fy = 0.5
for k in range(len_pix):
vx, vy = vectors[y,x]
print x, y, vx, vy
if vx>=0:
tx = (1-fx)/vx
else:
tx = -fx/vx
if vy>=0:
ty = (1-fy)/vy
else:
ty = -fy/vy
if tx<ty:
print "x step"
if vx>0:
x+=1
fy+=vy*tx
fx=0.
else:
x-=1
fy+=vy*tx
fx=1.
else:
print "y step"
if vy>0:
y+=1
fx+=vx*ty
fy=0.
else:
y-=1
fx+=vx*ty
fy=1.
if x<0: x=0
if y<0: y=0
if x>=n: x=n-1
if y>=m: y=m-1
result[center+k+1,i,j,:] = y, x
return result

View file

@ -0,0 +1,71 @@
#+
# ARES/HADES/BORG Package -- ./scripts/lic_plot/lic_demo.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
import numpy as np
import pylab as plt
import lic_internal
dpi = 100
size = 700
video = False
vortex_spacing = 0.5
extra_factor = 2.
a = np.array([1,0])*vortex_spacing
b = np.array([np.cos(np.pi/3),np.sin(np.pi/3)])*vortex_spacing
rnv = int(2*extra_factor/vortex_spacing)
vortices = [n*a+m*b for n in range(-rnv,rnv) for m in range(-rnv,rnv)]
vortices = [(x,y) for (x,y) in vortices if -extra_factor<x<extra_factor and -extra_factor<y<extra_factor]
xs = np.linspace(-1,1,size).astype(np.float32)[None,:]
ys = np.linspace(-1,1,size).astype(np.float32)[:,None]
vectors = np.zeros((size,size,2),dtype=np.float32)
for (x,y) in vortices:
rsq = (xs-x)**2+(ys-y)**2
vectors[...,0] += (ys-y)/rsq
vectors[...,1] += -(xs-x)/rsq
texture = np.random.rand(size,size).astype(np.float32)
plt.bone()
frame=0
if video:
kernellen = 31
for t in np.linspace(0,1,16*5):
kernel = np.sin(np.arange(kernellen)*np.pi/kernellen)*(1+np.sin(2*np.pi*5*(np.arange(kernellen)/float(kernellen)+t)))
kernel = kernel.astype(np.float32)
image = lic_internal.line_integral_convolution(vectors, texture, kernel)
plt.clf()
plt.axis('off')
plt.figimage(image)
plt.gcf().set_size_inches((size/float(dpi),size/float(dpi)))
plt.savefig("flow-%04d.png"%frame,dpi=dpi)
frame += 1
else:
kernellen=31
kernel = np.sin(np.arange(kernellen)*np.pi/kernellen)
kernel = kernel.astype(np.float32)
image = lic_internal.line_integral_convolution(vectors, texture, kernel)
plt.clf()
plt.axis('off')
plt.figimage(image)
plt.gcf().set_size_inches((size/float(dpi),size/float(dpi)))
plt.savefig("flow-image.png",dpi=dpi)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,100 @@
#+
# ARES/HADES/BORG Package -- ./scripts/lic_plot/lic_internal.pyx
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
import numpy as np
cimport numpy as np
cdef void _advance(float vx, float vy,
int* x, int* y, float*fx, float*fy, int w, int h):
cdef float tx, ty
if vx>=0:
tx = (1-fx[0])/vx
else:
tx = -fx[0]/vx
if vy>=0:
ty = (1-fy[0])/vy
else:
ty = -fy[0]/vy
if tx<ty:
if vx>=0:
x[0]+=1
fx[0]=0
else:
x[0]-=1
fx[0]=1
fy[0]+=tx*vy
else:
if vy>=0:
y[0]+=1
fy[0]=0
else:
y[0]-=1
fy[0]=1
fx[0]+=ty*vx
if x[0]>=w:
x[0]=w-1 # FIXME: other boundary conditions?
if x[0]<0:
x[0]=0 # FIXME: other boundary conditions?
if y[0]<0:
y[0]=0 # FIXME: other boundary conditions?
if y[0]>=h:
y[0]=h-1 # FIXME: other boundary conditions?
#np.ndarray[float, ndim=2]
def line_integral_convolution(
np.ndarray[float, ndim=3] vectors,
np.ndarray[float, ndim=2] texture,
np.ndarray[float, ndim=1] kernel):
cdef int i,j,k,x,y
cdef int h,w,kernellen
cdef int t
cdef float fx, fy, tx, ty
cdef np.ndarray[float, ndim=2] result
h = vectors.shape[0]
w = vectors.shape[1]
t = vectors.shape[2]
kernellen = kernel.shape[0]
if t!=2:
raise ValueError("Vectors must have two components (not %d)" % t)
result = np.zeros((h,w),dtype=np.float32)
for i in range(h):
for j in range(w):
x = j
y = i
fx = 0.5
fy = 0.5
k = kernellen//2
#print i, j, k, x, y
result[i,j] += kernel[k]*texture[x,y]
while k<kernellen-1:
_advance(vectors[y,x,0],vectors[y,x,1],
&x, &y, &fx, &fy, w, h)
k+=1
#print i, j, k, x, y
result[i,j] += kernel[k]*texture[x,y]
x = j
y = i
fx = 0.5
fy = 0.5
while k>0:
_advance(-vectors[y,x,0],-vectors[y,x,1],
&x, &y, &fx, &fy, w, h)
k-=1
#print i, j, k, x, y
result[i,j] += kernel[k]*texture[x,y]
return result

22
scripts/lic_plot/setup.py Normal file
View file

@ -0,0 +1,22 @@
#+
# ARES/HADES/BORG Package -- ./scripts/lic_plot/setup.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = [
Extension("lic_internal", ["lic_internal.pyx"],
include_dirs=[numpy.get_include()])
],
)

View file

@ -0,0 +1,94 @@
#+
# ARES/HADES/BORG Package -- ./scripts/merge_mpi_restart.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
import os
import h5py as h5
import errno
from ares_tools import rebuild_spliced_h5
def detect_ncpus(path):
ncpu = 0
try:
while True:
with open("%s_%d" % (path,ncpu), mode= "rb") as f:
ncpu += 1
except IOError as e:
if e.errno != errno.ENOENT:
raise e
return ncpu
def detect_job_to_merge(path):
array_list=[]
other_list=[]
group_list=[]
def _handle_item(name, obj):
if isinstance(obj, h5.Group):
group_list.append(name)
if not isinstance(obj, h5.Dataset):
return
if len(obj.shape) >= 3:
array_list.append(name)
else:
other_list.append(name)
with h5.File("%s_0" % path, mode="r") as f:
f.visititems(_handle_item)
return array_list,other_list,group_list
def load_nonarray(path, objlist):
arr = {}
with h5.File("%s_0" % path, mode="r") as f:
for oname in objlist:
print("Loading %s..." % oname)
if oname == '/scalars/BORG_version':
arr[oname] = np.array([f[oname][0]], dtype='S')
else:
arr[oname] = f[oname][:]
return arr
def load_merged(path):
ncpu = detect_ncpus(path)
array_list,nonarray_list,group_list = detect_job_to_merge(path)
array_elts = ['.'.join(e.split('/')) for e in array_list]
print("Loading spliced arrays")
arr = load_nonarray(path, nonarray_list)
arr2 = rebuild_spliced_h5(path, array_elts, ncpu, verbose=True)
for k in arr2.keys():
arr['/'.join(k.split('.'))] = arr2[k]
return arr,group_list
def save_merged(outpath, omap):
omap,group_list = omap
with h5.File(outpath, mode="w") as f:
for g in group_list:
if not g in f:
f.create_group(g)
for o in omap.keys():
print("Saving object '%s'" % o)
f.create_dataset(o, data=omap[o])
if __name__=="__main__":
merged_obj = load_merged("./restart.h5")
save_merged("./merged_restart.h5", merged_obj)

View file

@ -0,0 +1,85 @@
#+
# ARES/HADES/BORG Package -- ./scripts/migrate_restart.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
import os
import h5py as h5
import errno
from ares_tools import rebuild_spliced_h5
def detect_ncpus(path):
ncpu = 0
try:
while True:
with open("%s_%d" % (path,ncpu), mode= "rb") as f:
ncpu += 1
except IOError as e:
if e.errno != errno.ENOENT:
raise e
return ncpu
def detect_job(path):
obj_list=[]
group_list=[]
def _handle_item(name, obj):
if isinstance(obj, h5.Group):
group_list.append(name)
if not isinstance(obj, h5.Dataset):
return
obj_list.append(name)
with h5.File("%s_0" % path, mode="r") as f:
f.visititems(_handle_item)
return obj_list,group_list
def load_nonarray(path, outpath, objlist, group_list):
arr = {}
with h5.File(path, mode="r") as f, h5.File(outpath, mode="w") as of:
for g in group_list:
if g[:4] == 'info':
newname = g[4:]
elif g[:6] == 'markov':
newname = g[6:]
if len(newname)==0:
continue
if newname in of:
continue
print("Create group %s" % newname)
of.create_group(newname)
for oname in objlist:
print("Loading %s..." % oname)
if oname[:4] == 'info':
newname = oname[4:]
elif oname[:6] == 'markov':
newname = oname[6:]
else:
print("Beuh ! " + oname)
abort
if oname == '/info/scalars/BORG_version':
of[newname] = np.array([f[oname][0]], dtype='S')
else:
of[newname] = f[oname][:]
def migrate(path, newpath):
ncpu = detect_ncpus(path)
elem_list,group_list = detect_job(path)
print("Loading spliced arrays")
for n in range(ncpu):
load_nonarray("%s_%d" % (path,n), "%s_%d" % (newpath,n), elem_list, group_list)
if __name__=="__main__":
migrate("./restart.h5", "./new/restart.h5")

274
scripts/misc/check_bias.py Normal file
View file

@ -0,0 +1,274 @@
#+
# ARES/HADES/BORG Package -- ./scripts/misc/check_bias.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
import read_all_h5
from pylab import *
n=[]
b0=[]
b1=[]
b2=[]
b3=[]
b4=[]
b5=[]
b6=[]
b7=[]
b8=[]
b9=[]
b10=[]
b11=[]
b12=[]
b13=[]
b14=[]
b15=[]
rho_g0=[]
rho_g1=[]
rho_g2=[]
rho_g3=[]
rho_g4=[]
rho_g5=[]
rho_g6=[]
rho_g7=[]
rho_g8=[]
rho_g9=[]
rho_g10=[]
rho_g11=[]
rho_g12=[]
rho_g13=[]
rho_g14=[]
rho_g15=[]
eps_g0=[]
eps_g1=[]
eps_g2=[]
eps_g3=[]
eps_g4=[]
eps_g5=[]
eps_g6=[]
eps_g7=[]
eps_g8=[]
eps_g9=[]
eps_g10=[]
eps_g11=[]
eps_g12=[]
eps_g13=[]
eps_g14=[]
eps_g15=[]
n0=[]
n1=[]
n2=[]
n3=[]
n4=[]
n5=[]
n6=[]
n7=[]
n8=[]
n9=[]
n10=[]
n11=[]
n12=[]
n13=[]
n14=[]
n15=[]
accept=[]
i=0
#while True:
for l in range(0,440,1):
a = \
read_all_h5.read_all_h5("mcmc_%d.h5" % l)
try:
n0.append(a.scalars.galaxy_nmean_0)
n1.append(a.scalars.galaxy_nmean_1)
n2.append(a.scalars.galaxy_nmean_2)
n3.append(a.scalars.galaxy_nmean_3)
n4.append(a.scalars.galaxy_nmean_4)
n5.append(a.scalars.galaxy_nmean_5)
n6.append(a.scalars.galaxy_nmean_6)
n7.append(a.scalars.galaxy_nmean_7)
n8.append(a.scalars.galaxy_nmean_8)
n9.append(a.scalars.galaxy_nmean_9)
n10.append(a.scalars.galaxy_nmean_10)
n11.append(a.scalars.galaxy_nmean_11)
n12.append(a.scalars.galaxy_nmean_12)
n13.append(a.scalars.galaxy_nmean_13)
n14.append(a.scalars.galaxy_nmean_14)
n15.append(a.scalars.galaxy_nmean_15)
b0.append(a.scalars.galaxy_bias_0)
b1.append(a.scalars.galaxy_bias_1)
b2.append(a.scalars.galaxy_bias_2)
b3.append(a.scalars.galaxy_bias_3)
b4.append(a.scalars.galaxy_bias_4)
b5.append(a.scalars.galaxy_bias_5)
b6.append(a.scalars.galaxy_bias_6)
b7.append(a.scalars.galaxy_bias_7)
b8.append(a.scalars.galaxy_bias_8)
b9.append(a.scalars.galaxy_bias_9)
b10.append(a.scalars.galaxy_bias_10)
b11.append(a.scalars.galaxy_bias_11)
b12.append(a.scalars.galaxy_bias_12)
b13.append(a.scalars.galaxy_bias_13)
b14.append(a.scalars.galaxy_bias_14)
b15.append(a.scalars.galaxy_bias_15)
rho_g0.append(a.scalars.galaxy_rho_g_0)
rho_g1.append(a.scalars.galaxy_rho_g_1)
rho_g2.append(a.scalars.galaxy_rho_g_2)
rho_g3.append(a.scalars.galaxy_rho_g_3)
rho_g4.append(a.scalars.galaxy_rho_g_4)
rho_g5.append(a.scalars.galaxy_rho_g_5)
rho_g6.append(a.scalars.galaxy_rho_g_6)
rho_g7.append(a.scalars.galaxy_rho_g_7)
rho_g8.append(a.scalars.galaxy_rho_g_8)
rho_g9.append(a.scalars.galaxy_rho_g_9)
rho_g10.append(a.scalars.galaxy_rho_g_10)
rho_g11.append(a.scalars.galaxy_rho_g_11)
rho_g12.append(a.scalars.galaxy_rho_g_12)
rho_g13.append(a.scalars.galaxy_rho_g_13)
rho_g14.append(a.scalars.galaxy_rho_g_14)
rho_g15.append(a.scalars.galaxy_rho_g_15)
eps_g0.append(a.scalars.galaxy_eps_g_0)
eps_g1.append(a.scalars.galaxy_eps_g_1)
eps_g2.append(a.scalars.galaxy_eps_g_2)
eps_g3.append(a.scalars.galaxy_eps_g_3)
eps_g4.append(a.scalars.galaxy_eps_g_4)
eps_g5.append(a.scalars.galaxy_eps_g_5)
eps_g6.append(a.scalars.galaxy_eps_g_6)
eps_g7.append(a.scalars.galaxy_eps_g_7)
eps_g8.append(a.scalars.galaxy_eps_g_8)
eps_g9.append(a.scalars.galaxy_eps_g_9)
eps_g10.append(a.scalars.galaxy_eps_g_10)
eps_g11.append(a.scalars.galaxy_eps_g_11)
eps_g12.append(a.scalars.galaxy_eps_g_12)
eps_g13.append(a.scalars.galaxy_eps_g_13)
eps_g14.append(a.scalars.galaxy_eps_g_14)
eps_g15.append(a.scalars.galaxy_eps_g_14)
accept.append(a.scalars.hades_accept_count)
print l
except AttributeError:
break
i += 1
rate =np.cumsum(np.array(accept))
norm =np.cumsum(np.ones(len(accept)))
plt.plot(rate/norm)
plt.show()
print
plt.plot(b0,label=str(0))
plt.plot(b1,label=str(1))
plt.plot(b2,label=str(2))
plt.plot(b3,label=str(3))
plt.plot(b4,label=str(4))
plt.plot(b5,label=str(5))
plt.plot(b6,label=str(6))
plt.plot(b7,label=str(7))
plt.plot(b8,label=str(0))
plt.plot(b9,label=str(1))
plt.plot(b10,label=str(2))
plt.plot(b11,label=str(3))
plt.plot(b12,label=str(4))
plt.plot(b13,label=str(5))
plt.plot(b14,label=str(6))
plt.plot(b15,label=str(7))
legend()
plt.savefig('check_bias.png')
plt.show()
plt.plot(np.log10(rho_g0),label=str(0))
plt.plot(np.log10(rho_g1),label=str(1))
plt.plot(np.log10(rho_g2),label=str(2))
plt.plot(np.log10(rho_g3),label=str(3))
plt.plot(np.log10(rho_g4),label=str(4))
plt.plot(np.log10(rho_g5),label=str(5))
plt.plot(np.log10(rho_g6),label=str(6))
plt.plot(np.log10(rho_g7),label=str(7))
plt.plot(np.log10(rho_g8),label=str(0))
plt.plot(np.log10(rho_g9),label=str(1))
plt.plot(np.log10(rho_g10),label=str(2))
plt.plot(np.log10(rho_g11),label=str(3))
plt.plot(np.log10(rho_g12),label=str(4))
plt.plot(np.log10(rho_g13),label=str(5))
plt.plot(np.log10(rho_g14),label=str(6))
plt.plot(np.log10(rho_g15),label=str(7))
legend()
plt.savefig('check_rho_g.png')
plt.show()
x=np.arange(600)*0.04+1e-12
y0=n0[-1]*x**b0[-1]*np.exp(-rho_g0[-1]*x**(-eps_g0[-1]))
y1=n1[-1]*x**b1[-1]*np.exp(-rho_g1[-1]*x**(-eps_g1[-1]))
y2=n2[-1]*x**b2[-1]*np.exp(-rho_g2[-1]*x**(-eps_g2[-1]))
y3=n3[-1]*x**b3[-1]*np.exp(-rho_g3[-1]*x**(-eps_g3[-1]))
y4=n4[-1]*x**b4[-1]*np.exp(-rho_g4[-1]*x**(-eps_g4[-1]))
y5=n5[-1]*x**b5[-1]*np.exp(-rho_g5[-1]*x**(-eps_g5[-1]))
y6=n6[-1]*x**b6[-1]*np.exp(-rho_g6[-1]*x**(-eps_g6[-1]))
y7=n7[-1]*x**b7[-1]*np.exp(-rho_g7[-1]*x**(-eps_g7[-1]))
y8=n8[-1]*x**b8[-1]*np.exp(-rho_g8[-1]*x**(-eps_g8[-1]))
y9=n9[-1]*x**b9[-1]*np.exp(-rho_g9[-1]*x**(-eps_g9[-1]))
y10=n10[-1]*x**b10[-1]*np.exp(-rho_g10[-1]*x**(-eps_g10[-1]))
y11=n11[-1]*x**b11[-1]*np.exp(-rho_g11[-1]*x**(-eps_g11[-1]))
y12=n12[-1]*x**b12[-1]*np.exp(-rho_g12[-1]*x**(-eps_g12[-1]))
y13=n13[-1]*x**b13[-1]*np.exp(-rho_g13[-1]*x**(-eps_g13[-1]))
y14=n14[-1]*x**b14[-1]*np.exp(-rho_g14[-1]*x**(-eps_g14[-1]))
y15=n15[-1]*x**b15[-1]*np.exp(-rho_g15[-1]*x**(-eps_g15[-1]))
plt.plot(x,np.log(y0),label=str(0),color='red')
plt.plot(x,np.log(y1),label=str(1),color='blue')
plt.plot(x,np.log(y2),label=str(2),color='green')
plt.plot(x,np.log(y3),label=str(3),color='orange')
plt.plot(x,np.log(y4),label=str(4),color='yellow')
plt.plot(x,np.log(y5),label=str(5),color='black')
plt.plot(x,np.log(y6),label=str(6),color='gray')
plt.plot(x,np.log(y7),label=str(7),color='magenta')
plt.plot(x,np.log(y8),label=str(0),color='red')
plt.plot(x,np.log(y9),label=str(1),color='blue')
plt.plot(x,np.log(y10),label=str(2),color='green')
plt.plot(x,np.log(y11),label=str(3),color='orange')
plt.plot(x,np.log(y12),label=str(4),color='yellow')
plt.plot(x,np.log(y13),label=str(5),color='black')
plt.plot(x,np.log(y14),label=str(6),color='gray')
plt.plot(x,np.log(y15),label=str(7),color='magenta')
plt.plot(x,np.log(x),label=str(99))
plt.ylim([-8,5])
legend(loc='lower right', shadow=True)
#plt.show()
gcf().savefig("check_bias.png")

View file

@ -0,0 +1,26 @@
#+
# ARES/HADES/BORG Package -- ./scripts/misc/check_integrator.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
import h5py as h5
import numpy as np
import pylab as plt
fig = plt.figure(1)
fig.clf()
ax = fig.add_subplot(111)
f = h5.File("symplectic.h5")
for k in f.keys():
ax.semilogy(np.abs(f[k]['energy']), label=k)
f.close()
ax.legend()
fig.savefig("symplectic.png")

View file

@ -0,0 +1,29 @@
#+
# ARES/HADES/BORG Package -- ./scripts/misc/check_velocities.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
from read_all_h5 import *
import pylab as plt
g=read_all_h5('dump_velocities.h5')
V = g.scalars.L0[0]*g.scalars.L1[0]*g.scalars.L2[0]
q = g.scalars.k_pos_test
H=100.
D=1.
a=1.
f=g.scalars.cosmology['omega_m']**(5./9)
vref = 2* q/((q**2).sum()) / V * g.scalars.A_k_test * f * H * a**2 * D
vborg = g.scalars.lpt_vel[:,::].max(axis=0)
print "vref = %r" % vref
print "vborg = %r" % vborg
print "ratio = %r" % (vborg/vref)

View file

@ -0,0 +1,29 @@
#+
# ARES/HADES/BORG Package -- ./scripts/misc/convert_2m++.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
import numpy as np
from scipy import constants as sconst
LIGHT_SPEED = sconst.c/1000. #In km/s
catalog = np.load("2m++.npy")
with open("2MPP.txt", mode="w") as f:
cond = (catalog['flag_vcmb']==1)*(catalog['flag_zoa']==0)*(catalog['best_velcmb'] > 100)
for i,c in enumerate(catalog[cond]):
M = c['K2MRS'] - 5*np.log10(c['best_velcmb']/100*1e5)
zo = c['velcmb']/LIGHT_SPEED
z = zo
f.write(
"%d %lg %lg %lg %lg %lg %lg\n" %
(i, np.radians(c['ra']), np.radians(c['dec']), zo, c['K2MRS'], M, z)
)

View file

@ -0,0 +1,42 @@
#+
# ARES/HADES/BORG Package -- ./scripts/misc/plot_power.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
import read_all_h5
from pylab import *
P=[]
n=[]
b=[]
n1=[]
b1=[]
i=0
while True:
a = \
read_all_h5.read_all_h5("mcmc_%d.h5" % i)
try:
P.append(a.scalars.powerspectrum)
n.append(a.scalars.galaxy_nmean_0[0])
b.append(a.scalars.galaxy_bias_0[0])
n1.append(a.scalars.galaxy_nmean_1[0])
b1.append(a.scalars.galaxy_bias_1[0])
except AttributeError:
break
i += 1
k = read_all_h5.read_all_h5("info.h5").scalars.k_modes
P = np.array(P)
f=figure(1)
clf()
loglog(k[:,None].repeat(P.shape[0],axis=1),P.transpose())
f=figure(2)
clf()
plot(n)

85
scripts/misc/plot_void.py Normal file
View file

@ -0,0 +1,85 @@
#+
# ARES/HADES/BORG Package -- ./scripts/misc/plot_void.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
from pylab import *
from read_all_h5 import explore_chain
def box2sphere(x,y,z):
#calculate radii
r=np.sqrt(x**2+y**2+z**2)
print np.shape(r),np.shape(x)
dec=np.zeros(np.shape(r))
'''
foo= np.where(r>0)
dec[foo]=np.arcsin(z[foo]/r[foo])
'''
ra=np.arctan2(y,x)
print np.shape(r),np.shape(ra)
return ra,dec,r
chain_path="."
N = 256
L = 677.7
Nb = 128
f = np.sqrt(3)*0.5
ix = np.arange(N)*L/N - 0.5*L
ra,dec,r=box2sphere(ix[:,None,None],ix[None,:,None],ix[None,None,:])
r = np.sqrt(ix[:,None,None]**2 + ix[None,:,None]**2 + ix[None,None,:]**2)
print np.shape(r)
H, b = np.histogram(r, range=(0,f*L), bins=Nb)
Hw_mean=np.zeros(np.shape(H))
cnt=0
mu = np.zeros(np.shape(H))
var = np.zeros(np.shape(H))
nn=1
for i,a in explore_chain(chain_path, 400,4100, 10):
d = a['BORG_final_density'][:]
Hw, b = np.histogram(r, weights=d, range=(0,f*L), bins=Nb)
Hw /= H
mu = (nn-1.)/float(nn)*mu +1./float(nn)*Hw
if(nn>1):
aux = (mu-Hw)**2
var = (nn-1.)/nn*var+1./(nn-1)*aux
nn+=1
plot(b[1:], mu, label='average', color='red')
fill_between(b[1:], mu, mu+np.sqrt(var), interpolate=True, color='gray', alpha='0.5')
fill_between(b[1:], mu-np.sqrt(var), mu, interpolate=True, color='gray', alpha='0.5')
fill_between(b[1:], mu, mu+2*np.sqrt(var), interpolate=True, color='darkgray', alpha='0.5')
fill_between(b[1:], mu-2*np.sqrt(var), mu, interpolate=True, color='darkgray', alpha='0.5')
plt.xlabel(r'$r \left[\mathrm{Mpc/h} \right]$')
plt.ylabel(r'$\langle \delta \rangle$')
axhline(0.0,lw=1.5, color='black')
#legend()
ylim(-1,1)
gcf().savefig("void.png")

View file

@ -0,0 +1,51 @@
#+
# ARES/HADES/BORG Package -- ./scripts/mod_restart_file.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
import h5py
import numpy as np
#set chain
chain_path="."
#set reference file
refdir ='/scratch/jasche/panphasia_run_lc/'
fref=refdir+'restart.h5_0'
#set target file
tardir ='/scratch/jasche/panphasia_run_h/'
ftar=tardir+'restart.h5_0'
#read density files from reference file
print fref
dref = h5py.File(fref, "r")
dref_final_density=dref['/scalars/BORG_final_density']
dref_s_field=dref['/scalars/s_field']
dref_s_hat_field=dref['/scalars/s_hat_field']
dtar = h5py.File(ftar, "r+")
dtar_final_density=dtar['/scalars/BORG_final_density']
dtar_s_field=dtar['/scalars/s_field']
dtar_s_hat_field=dtar['/scalars/s_hat_field']
'''
WARNING: At this point you will irretrievably
modify your restart file!!!!!!
'''
dtar_final_density[...] = dref_final_density[...]
dtar_s_field[...] = dref_s_field[...]
dtar_s_hat_field[...] = dref_s_hat_field[...]
dref.close()
dtar.close()

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,25 @@
#+
# ARES/HADES/BORG Package -- ./scripts/notebooks/src/curl.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
import numpy as np
def curl(vect,dx=1,dy=1,dz=1):
"return the curl of a n-D field"
[P_dy,P_dz]=np.gradient(vect[0],axis=[1,2])
[Q_dx,Q_dz]=np.gradient(vect[1],axis=[0,2])
[R_dx,R_dy]=np.gradient(vect[2],axis=[0,1])
curl=np.array([R_dy-Q_dz,P_dz-R_dx,Q_dx-P_dy])
return curl
def div(vect,dx=1,dy=1,dz=1):
"return the divergence of a n-D field"
return np.sum(np.gradient(vect),axis=0)

View file

@ -0,0 +1,94 @@
#+
# ARES/HADES/BORG Package -- ./scripts/notebooks/src/special_regions.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
import numpy as np
def dir_vec(ra,dec):
#angles in degrre
x=np.cos(dec/180.*np.pi)*np.cos(ra/180.*np.pi)
y=np.cos(dec/180.*np.pi)*np.sin(ra/180.*np.pi)
z=np.sin(dec/180.*np.pi)
return np.array([x,y,z])
#set special sites
special_coords = {}
special_coords['home']={'ra': 0.95,'dec': 0.98,'z': 0.0, 'rc' : 400., 'mu_mass' : 1, 'var_mass' : 1, 'mu_delta' : 1, 'var_delta' : 1 }
special_coords['coma']={'ra': 194.95,'dec': 27.98,'z': 0.0232, 'rc' : 40., 'mu_mass' : 1, 'var_mass' : 1, 'mu_delta' : 1, 'var_delta' : 1 }
special_coords['shapley']={'ra': 202.011,'dec': -31.493,'z': 0.0480, 'rc' : 100. *0.68 , 'mu_mass' : 1, 'var_mass' : 1, 'mu_delta' : 1, 'var_delta' : 1 }
special_coords['coronaborealis']={'ra': 232.0000,'dec': 28.8833,'z': 0.065, 'rc' : 10, 'mu_mass' : 1, 'var_mass' : 1, 'mu_delta' : 1, 'var_delta' : 1 }
special_coords['bootesvoid']={'ra': 215.0000,'dec': 26.000,'z': 0.05, 'rc' : 12, 'mu_mass' : 1, 'var_mass' : 1, 'mu_delta' : 1, 'var_delta' : 1 }
special_coords['hydra']={'ra': 158.68749975,'dec': -27.23192123,'z': np.nan, 'rc' : 12, 'mu_mass' : 1, 'var_mass' : 1, 'mu_delta' : 1, 'var_delta' : 1 }
special_coords['cetus']={'ra': 217.40988269,'dec': -69.94391168,'z': np.nan, 'rc' : 12, 'mu_mass' : 1, 'var_mass' : 1, 'mu_delta' : 1, 'var_delta' : 1 }
special_coords['vela']={'ra': 141.17998324,'dec': -50.57228815,'z': np.nan, 'rc' : 12, 'mu_mass' : 1, 'var_mass' : 1, 'mu_delta' : 1, 'var_delta' : 1 }
special_coords['norma']={'ra': 243.5936928,'dec': -60.85205904,'z': np.nan, 'rc' : 12, 'mu_mass' : 1, 'var_mass' : 1, 'mu_delta' : 1, 'var_delta' : 1 }
special_coords['A3158']={'ra': 55.87672518,'dec': -53.4820204,'z': np.nan, 'rc' : 12, 'mu_mass' : 1, 'var_mass' : 1, 'mu_delta' : 1, 'var_delta' : 1 }
special_coords['hydra-cen']={'ra': 200.02772883,'dec': -53.81840285,'z': np.nan, 'rc' : 12, 'mu_mass' : 1, 'var_mass' : 1, 'mu_delta' : 1, 'var_delta' : 1 }
special_coords['horologium']={'ra': 6.74460262,'dec': -49.70333177,'z': np.nan, 'rc' : 12, 'mu_mass' : 1, 'var_mass' : 1, 'mu_delta' : 1, 'var_delta' : 1 }
def get_objpos_range(objname,cosmolo={'omega_M_0' : 0.307, 'omega_lambda_0' : 0.693, 'h' : 0.6777}):
from astropy.cosmology import LambdaCDM
cosmo = LambdaCDM(H0=100.*cosmolo['h'], Om0=cosmolo['omega_M_0'], Ode0=cosmolo['omega_lambda_0'])
ra = special_coords[objname]['ra']
dec = special_coords[objname]['dec']
z = special_coords[objname]['z']
rc = special_coords[objname]['rc'] # units Mpc/h
dcom = np.array(cosmo.comoving_distance(z).value)*cosmolo['h']
d = dir_vec(ra,dec)
pos_SSC = dcom * d
return pos_SSC, rc
# I just want a switch....can't live without it
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
def sky_region(region,ra,dec):
for case in switch(region):
if case('R1'):
#SDSS-NGC Withburn & Shanks 2014
return np.where( (ra>150.)*(ra<220.)*(dec>0.)*(dec<50.)), 3072.38*(np.pi/180.)**2
break
if case('R2'):
#6dFGS-SGC Withburn & Shanks 2014
return np.where( ((ra>330.)*(ra<360.)*(dec<0)*(dec>-50.)) + ((ra>0.)*(ra<50.)*(dec<0)*(dec>-50.))), 3511.29*(np.pi/180.)**2
break
if case('R3'):
#6dFGS-NGC Withburn & Shanks 2014
return np.where( (ra>150.)*(ra<220.)*(dec<0)*(dec>-40.)), 2578.03*(np.pi/180.)**2
break
if case(): # default, could also just omit condition or 'if True'
print ("Case not known!")
# No need to break here, it'll stop anyway

View file

@ -0,0 +1,533 @@
#+
# ARES/HADES/BORG Package -- ./scripts/old_analysis/analysis.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
from read_all_h5 import explore_chain
from read_all_h5 import rebuild_spliced_h5
import h5py as h5
import numpy as np
import healpy as hp
import numexpr as ne
import os
import math
from pylab import *
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, show
#import numba
'''
@numba.jit
def _special_spectrum_builder(P, PP, tmp, alpha, N_i, Pk_i):
for i in range(Pk_i.size):
for j in range(PP.size):
v = PP[j]**(-alpha)*exp(- N_i[i]*Pk_i[i] / (2 * PP[j]))
tmp[j] = v
total += v
for j in range(PP.size):
P[j] += tmp[j] / total
'''
#ARES/HADES/BORG image scanning class
class IndexTracker:
def __init__(self, ax, X):
self.ax = ax
self.ax.set_title('use up/down keys to navigate images')
self.X = X
rows,cols,self.slices = X.shape
self.ind = self.slices/2
cmax=X.max()
cmin=X.min()
self.im = self.ax.imshow(self.X[:,self.ind,:],vmax=cmax,vmin=cmin)
self.update()
def onscroll(self, event):
#print ("%s " % (event.key))
if event.key=='up':
self.ind = np.clip(self.ind+1, 0, self.slices-1)
else:
self.ind = np.clip(self.ind-1, 0, self.slices-1)
self.update()
def update(self):
self.im.set_data(self.X[:,self.ind,:])
self.ax.set_ylabel('slice %s'%self.ind)
self.im.axes.figure.canvas.draw()
#ARES/HADES/BORG analysis class
class analysis:
def __init__(self, chain_path='.', LSS_framework='ARES'):
self.chain_path = chain_path
self.LSS_framework = LSS_framework
self.description = "This Class is part of the ARES/HADES/BORG analysis framework"
self.author = "Copyright (C) 2009-2016 Jens Jasche \n Copyright (C) 2014-2016 Guilhem Lavaux"
#get chain setup
self.L0=0
self.L1=0
self.L2=0
self.N0=0
self.N1=0
self.N2=0
self.x0=0
self.x1=0
self.x2=0
with h5.File(os.path.join(self.chain_path, "restart.h5_0"), mode="r") as f:
info=f.require_group('/info')
markov=f.require_group('/markov')
#print markov.keys()
#print info.keys()
print f['info']['scalars'].keys()
#print f['markov']['scalars'].keys()
self.L0 = f['info']['scalars']['L0'][:]
self.L1 = f['info']['scalars']['L1'][:]
self.L2 = f['info']['scalars']['L2'][:]
self.N0 = int(f['info']['scalars']['N0'][:])
self.N1 = int(f['info']['scalars']['N1'][:])
self.N2 = int(f['info']['scalars']['N2'][:])
self.xmin0 = int(f['info']['scalars']['corner0'][:])
self.xmin1 = int(f['info']['scalars']['corner1'][:])
self.xmin2 = int(f['info']['scalars']['corner2'][:])
self.ncat = int(f['info']['scalars']['NCAT'][:])
if(LSS_framework!='BORG'):
self.kmodes = f['/info/scalars/k_modes'][:]
self.nmodes = len(self.kmodes)
#get brefs
bref=[]
for i in range(self.ncat):
bref.append(f['info']['scalars']['galaxy_bias_ref_'+str(i)][:])
self.bias_ref=np.array(bref)
def check_biasref(self):
return self.bias_ref
def get_ncat(self):
return self.ncat
def get_mask_spliced(self,msknr,ncpu=0):
if ncpu>0:
mskkey = "info.scalars.galaxy_sel_window_" + str(msknr)
a=rebuild_spliced_h5(os.path.join(self.chain_path, "restart.h5"),[mskkey],32)
return np.array(a[mskkey][:,:,:,0])
else:
print 'Error: need number of processes to read files !'
def get_mask(self,msknr):
with h5.File(os.path.join(self.chain_path, "restart.h5_0"), mode="r") as f:
mskkey = "galaxy_sel_window_" + str(msknr)
mask = f['info']['scalars'][mskkey][:]
return np.array(mask[:,:,:,0])
def get_data(self,datnr):
with h5.File(os.path.join(self.chain_path, "restart.h5_0"), mode="r") as f:
datkey = "galaxy_data_" + str(datnr)
data = f['info']['scalars'][datkey][:]
return np.array(data)
def get_data_spliced(self,msknr,ncpu=0):
if ncpu>0:
mskkey = "info.scalars.galaxy_data_" + str(msknr)
a=rebuild_spliced_h5(os.path.join(self.chain_path, "restart.h5"),[mskkey],32)
return np.array(a[mskkey][:])
else:
print 'Error: need number of processes to read files !'
def scan_datacube(self,data):
fig = figure()
ax = fig.add_subplot(111)
plt.jet()
tracker = IndexTracker(ax, data)
fig.canvas.mpl_connect('key_press_event', tracker.onscroll)
show()
def get_2d_marginal(self,attribute_a='s_field',attribute_b='s_field',id_a=None,id_b=None, first_sample=0,last_sample=1000):
print '-'*60
print 'Estimate 2d marginals for parameters ', attribute_a, ' and ', attribute_b , ' for ' , self.LSS_framework, ' run!'
print '-'*60
if(id_a==None or id_b==None):
print "Error: no index chosen"
return -1
#2) collect chain
samples_a = []
samples_b = []
for i,a in explore_chain(self.chain_path, first_sample,last_sample, 1):
d = a[attribute_a][:]
e = a[attribute_b][:]
samples_a.append(d[id_a])
samples_b.append(e[id_b])
H, xedges, yedges = np.histogram2d(samples_a, samples_b)
return xedges,yedges, H
def get_cross_corcoeff(self,attribute_a='s_field',attribute_b='s_field',id_a=None,id_b=None, first_sample=0,last_sample=1000):
print '-'*60
print 'Estimate 2d marginals for parameters ', attribute_a, ' and ', attribute_b , ' for ' , self.LSS_framework, ' run!'
print '-'*60
if(id_a==None or id_b==None):
print "Error: no index chosen"
return -1
#2) collect chain
samples_a = []
samples_b = []
nelements_a = len(id_a[0])
nelements_b = len(id_b[0])
mu_a = np.zeros(nelements_a)
var_a = np.zeros(nelements_a)
mu_b = np.zeros(nelements_b)
var_b = np.zeros(nelements_b)
nn=1
for i,a in explore_chain(self.chain_path, first_sample,last_sample, 1):
d = a[attribute_a][:]
e = a[attribute_b][:]
aux_a = d[id_a]
aux_b = e[id_b]
mu_a = (nn-1.)/float(nn)*mu_a +1./float(nn)*aux_a
if(nn>1):
aux = (mu_a-aux_a)**2
var_a = (nn-1.)/nn*var_a+1./(nn-1)*aux
mu_b = (nn-1.)/float(nn)*mu_b +1./float(nn)*aux_b
if(nn>1):
aux = (mu_b-aux_b)**2
var_b = (nn-1.)/nn*var_b+1./(nn-1)*aux
samples_a.append(aux_a)
samples_b.append(aux_b)
nn+=1
pc= np.zeros((nelements_a,nelements_b))
cnt=0
for n in range(nn-1):
x=samples_a[n]
y=samples_b[n]
pc += np.multiply.outer(x-mu_a, y-mu_b)
cnt+=1
return pc/float(cnt) #/np.sqrt(var_a*var_b)
def get_trace(self,attribute='s_field',element_id=None, first_sample=0,last_sample=1000):
print '-'*60
print 'Record trace for parameters ', attribute , ' for ' , self.LSS_framework, ' run!'
print '-'*60
'''
if(element_id==None):
print "Error: no list of indices provided"
return -1
'''
#1) collect chain
samples = []
nn=1
for i,a in explore_chain(self.chain_path, first_sample,last_sample, 1):
d = a[attribute][:]
if (element_id!=None):
samples.append(d[element_id])
else:
samples.append(d)
nn+=1
return samples
def get_corrlength(self,attribute='s_field',element_id=None,nlength=100, first_sample=0,last_sample=1000):
print '-'*60
print 'Estimate correlation length for parameters ', attribute , ' for ' , self.LSS_framework, ' run!'
print '-'*60
if(element_id==None):
print "Error: no list of indices provided"
return -1
if(nlength>last_sample-first_sample):
print "Warning: Chain setting not long enough set nlength to last_sample"
nlength = last_sample-first_sample -1
nelements = len(element_id[0])
#1) calculate mean and variance
mu = np.zeros(nelements)
var = np.zeros(nelements)
#2) collect chain
samples = []
nn=1
for i,a in explore_chain(self.chain_path, first_sample,last_sample, 1):
d = a[attribute][:]
print np.shape(d)
mu = (nn-1.)/float(nn)*mu +1./float(nn)*d[element_id]
if(nn>1):
aux = (mu-d[element_id])**2
var = (nn-1.)/nn*var+1./(nn-1)*aux
samples.append(d[element_id])
nn+=1
cl = np.zeros((nlength,nelements))
cl_count= np.zeros(nlength)
for i in range(nlength):
for j in range(len(samples)-i):
cl[i]+= (samples[j]-mu)*(samples[j+i]-mu)/var
cl_count[i] +=1.;
for i in range(nlength):
cl[i]/=cl_count[i]
return np.array(range(nlength)), cl
def print_job(self,msg):
print('-'*60)
print(msg)
print('-'*60)
def spectrum_pdf(self, first_sample=0, last_sample=-1, sample_steps=10, gridsize=1000, Pmin=None, Pmax=None):
P = np.zeros((gridsize, Npk), dtype=np.float64)
if Pmin is None or Pmax is None:
P0m,P0M = np.inf,0
for i,a in explore_chain(self.chain_path, first_sample,last_sample, sample_steps):
P0m = a['/scalars/powerspectrum'].min()
P0M = a['/scalars/powerspectrum'].max()
Pb_m,Pb_M = min(P0m, Pb_m),max(P0M,Pb_M)
if Pmin is None:
Pmin = Pb_m
if Pmax is None:
Pmax = Pb_M
PP = Pmin*np.exp(np.arange(gridsize)*np.log(Pmax/Pmin))
N=0
prior=0
N_ib = 0.5*(self.Nk+prior)[None,:]
for i,a in explore_chain(self.chain_path, first_sample,last_sample, sample_steps):
Pk_i = a['/scalars/powerspectrum'][:]
N_i = self.Nk
_special_spectrum_builder(P, PP, tmp_PP, N_ib, N_i, Pk_i)
N += 1
P /= N
return P
def get_spherical_slice(self,vdata,nside=32, observer=np.array([0,0,0]),rslice = 150.):
def RenderSphere(VolumeData3D,image,rslice,observer,Larr,Narr):
print "Rendering Sphere..."
NSIDE=hp.npix2nside(len(image))
idx=Larr[0]/Narr[0]
idy=Larr[1]/Narr[1]
idz=Larr[2]/Narr[2]
for ipix in range(len(image)):
#get direction of pixel and calculate unit vectors
dx,dy,dz=hp.pix2vec(NSIDE, ipix)
d = math.sqrt(dx * dx + dy * dy + dz * dz)
dx = dx / d; dy = dy / d; dz = dz / d # ray unit vector
rayX = observer[0]+rslice*dx; rayY = observer[1]+rslice*dy; rayZ = observer[2]+rslice*dz
rayX /= idx; rayY /= idy; rayZ /= idz
#find voxel inside box
ix = int(round(rayX))
iy = int(round(rayY))
iz = int(round(rayZ))
image[ipix]=np.nan
if ix > -1 and ix < Narr[0] \
or iy > -1 and iy < Narr[1] \
or iz > -1 and iz < Narr[2]:
jx = (ix+1) % Narr[0];
jy = (iy+1) % Narr[1];
jz = (iz+1) % Narr[2];
rx = (rayX - ix);
ry = (rayY - iy);
rz = (rayZ - iz);
qx = 1.-rx;
qy = 1.-ry;
qz = 1.-rz;
val = VolumeData3D[ix,iy,iz] * qx * qy * qz +VolumeData3D[ix,iy,jz] * qx * qy * rz +VolumeData3D[ix,jy,iz] * qx * ry * qz +VolumeData3D[ix,jy,jz] * qx * ry * rz +VolumeData3D[jx,iy,iz] * rx * qy * qz +VolumeData3D[jx,iy,jz] * rx * qy * rz +VolumeData3D[jx,jy,iz] * rx * ry * qz +VolumeData3D[jx,jy,jz] * rx * ry * rz;
image[ipix]=val
print '\r'+str(100 * ipix / (len(image) - 1)).zfill(3) + "%"
obs = np.array([observer[0]-self.xmin0,observer[1]-self.xmin1,observer[2]-self.xmin2])
Larr=np.array([self.L0,self.L1,self.L2])
Narr=np.array([self.N0,self.N1,self.N2])
image = np.zeros(hp.nside2npix(nside))
RenderSphere(vdata,image,rslice,obs,Larr,Narr)
return image
def mean_var_density(self, first_sample=0,last_sample=-1,sample_steps=10):
self.print_job('Estimate mean and variance of density fields for %s run!' % self.LSS_framework)
if(self.LSS_framework=='ARES'):
mu_i = np.zeros((self.N0,self.N1,self.N2))
var_i = np.zeros((self.N0,self.N1,self.N2))
nn=1
for i,a in explore_chain(self.chain_path, first_sample,last_sample, sample_steps):
d = a['s_field'][:]
mu_i = (nn-1.)/float(nn)*mu_i +1./float(nn)*d
if(nn>1):
aux = (mu_i-d)**2
var_i = (nn-1.)/nn*var_i+1./(nn-1)*aux
nn+=1
return mu_i, var_i
elif(self.LSS_framework=='HADES'):
mu_i = np.zeros((self.N0,self.N1,self.N2))
var_i = np.zeros((self.N0,self.N1,self.N2))
nn=1
for i,a in explore_chain(self.chain_path, first_sample,last_sample, sample_steps):
d = a['s_field'][:]
mu_i = (nn-1.)/float(nn)*mu_i +1./float(nn)*d
if(nn>1):
aux = (mu_i-d)**2
var_i = (nn-1.)/nn*var_i+1./(nn-1)*aux
nn+=1
return mu_i, var_i
else:
mu_i = np.zeros((self.N0,self.N1,self.N2))
mu_f = np.zeros((self.N0,self.N1,self.N2))
var_i = np.zeros((self.N0,self.N1,self.N2))
var_f = np.zeros((self.N0,self.N1,self.N2))
nn=1
for i,a in explore_chain(self.chain_path, first_sample,last_sample, sample_steps):
d = a['s_field'][:]
mu_i = (nn-1.)/float(nn)*mu_i +1./float(nn)*d
if(nn>1):
aux = (mu_i-d)**2
var_i = (nn-1.)/nn*var_i+1./(nn-1)*aux
d = a['BORG_final_density'][:]
mu_f = (nn-1.)/float(nn)*mu_f +1./float(nn)*d
if(nn>1):
aux = (mu_f-d)**2
var_f = (nn-1.)/nn*var_f+1./(nn-1)*aux
nn+=1
return mu_i, var_i, mu_f, var_f
def mean_var_spec(self, first_sample=0,last_sample=-1,sample_steps=10):
self.print_job('Estimate mean and variance of density fields for %s run!' % self.LSS_framework)
if(self.LSS_framework=='ARES'):
mu = np.zeros(self.nmodes)
var = np.zeros(self.nmodes)
nn=1
for i,a in explore_chain(self.chain_path, first_sample,last_sample, sample_steps):
d = a['/scalars/powerspectrum'][:]
mu = (nn-1.)/float(nn)*mu +1./float(nn)*d
if(nn>1):
aux = (mu-d)**2
var = (nn-1.)/nn*var+1./(nn-1)*aux
nn+=1
return self.kmodes,mu, var
elif(self.LSS_framework=='HADES'):
mu = np.zeros(self.nmodes)
var = np.zeros(self.nmodes)
nn=1
for i,a in explore_chain(self.chain_path, first_sample,last_sample, sample_steps):
d = a['/scalars/powerspectrum'][:]
mu = (nn-1.)/float(nn)*mu +1./float(nn)*d
if(nn>1):
aux = (mu-d)**2
var = (nn-1.)/nn*var+1./(nn-1)*aux
nn+=1
return self.kmodes,mu, var
else:
mu = np.zeros(self.nmodes)
var = np.zeros(self.nmodes)
nn=1
for i,a in explore_chain(self.chain_path, first_sample,last_sample, sample_steps):
d = a['/scalars/powerspectrum'][:]
mu = (nn-1.)/float(nn)*mu +1./float(nn)*d
if(nn>1):
aux = (mu-d)**2
var = (nn-1.)/nn*var+1./(nn-1)*aux
nn+=1
return self.kmodes,mu, var

View file

76
scripts/quasar/Quasar.ini Normal file
View file

@ -0,0 +1,76 @@
[system]
console_output=logares.txt
VERBOSE_LEVEL = 2
N0 = 64
N1 = 64
N2 = 64
L0 = 15000
L1 = 15000
L2 = 15000
corner0=-7500
corner1=-7500
corner2=-7500
NUM_MODES=30
N_MC=1000
# If true, data is discarded, meta data is kept and used to generate mock realizations
# of the data, assuming some cosmological power spectrum as determined from the [cosmo]
# section
test_mode=true
# If true, the initial power spectrum of the chain is set to the cosmological one
seed_cpower=true
# Indicate which samplers should be blocked for testing purposes
messenger_signal_blocked=false
power_sampler_a_blocked=false
power_sampler_b_blocked=false
bias_sampler_blocked=false
[run]
NCAT = 2
[cosmology]
omega_r = 0
omega_k = 0
omega_m = 0.30
omega_b = 0.045
omega_q = 0.70
w = -1
wprime = 0
n_s = 1
sigma8 = 0.80
h100 = 0.65
beta = 0.51
z0 = 0
[catalog_1]
datafile = QUASAR.txt
maskdata = one.fits
radial_selection = file
radial_file = quasar_selection.txt
refbias = true
nmean=20000
bias=3
[catalog_0]
datafile=QUASAR.txt
maskdata=one.fits
radial_selection = schechter
refbias=true
nmean=1000
bias=1
schechter_alpha=-0.94
schechter_mstar=-23.28
schechter_sampling_rate = 1000
schechter_dmax = 6000
galaxy_bright_apparent_magnitude_cut = 5
galaxy_faint_apparent_magnitude_cut = 20.5
galaxy_bright_absolute_magnitude_cut = -27
galaxy_faint_absolute_magnitude_cut = -20

View file

@ -0,0 +1,51 @@
50 1500
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
6.673907226393414107e-06
1.734337076802830175e-04
1.647804654638367495e-04
1.592090793589784543e-04
1.565669552298505868e-04
1.624398458001042786e-04
1.725257855230777424e-04
1.980533338931222802e-04
2.237396632301090775e-04
2.070433955770229232e-04
2.047457554473321780e-04
1.978205375085790956e-04
1.885770893536208503e-04
1.928008703640204360e-04
2.063115299378451631e-04
2.026780570325601523e-04
2.098433958446920163e-04
2.224598655709381317e-04
2.010470068082567186e-04
1.883110442296301919e-04
1.741405501226736617e-04
1.512519934395997138e-04
1.359556689075110006e-04
1.174436351257825535e-04
1.011449443557470632e-04
1.003995284387548706e-05
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00

View file

@ -0,0 +1,44 @@
#+
# ARES/HADES/BORG Package -- ./scripts/quasar/gen_selection_qso.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
import cosmolopy as cpy
import numpy as np
import pyfits as pf
cosmo={'omega_M_0':0.30,'omega_lambda_0':0.70,'omega_k_0':0,'h':0.65,'sigma8':0.80}
f = pf.open("DR12Q.fits")
c = f[1].data
Z = c['Z_PIPE']
d = cpy.distance.comoving_distance(Z, **cosmo) * cosmo['h']
Dmax = 8000
Nb = 100
delta = 0.5*Dmax/Nb
d = d[d>100]
H,b = np.histogram(d, range=(0-delta,Dmax-delta),bins=Nb)
b0 = 0.5*(b[1:] + b[0:b.size-1])
H = H.astype(np.float64) / (b[1:]**3 - b[0:b.size-1]**3)
b0max = Dmax
H /= H.max()
with open("quasar_selection.txt", mode="wt") as f:
f.write("%d %lg\n" % (H.size, b0max))
for r in H:
f.write("%lg\n" % r)

BIN
scripts/quasar/one.fits Normal file

Binary file not shown.

View file

@ -0,0 +1,231 @@
230 9200
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
4.464256056972410467e-07
4.920217452309941548e-07
5.825344815425159915e-07
7.008903535579486308e-07
1.077480445647259972e-06
1.526666626082827528e-06
1.818815122479636500e-06
2.018677949544820690e-06
2.005543949398441098e-06
1.925904845383709055e-06
1.828709639491850945e-06
1.756264149072702208e-06
1.588094063832900675e-06
1.405091172847078112e-06
1.339373166135644391e-06
1.226769448563412834e-06
1.092605119806316037e-06
1.002979573645545923e-06
8.829936736012149635e-07
7.663851835305657866e-07
6.891610749489451356e-07
6.515506267990144133e-07
6.075563991776231622e-07
5.980036671071462753e-07
5.777078931125058108e-07
5.677421646413206615e-07
5.484818119363407165e-07
4.898276062850167770e-07
4.723789439670749620e-07
4.302917765212324781e-07
3.675181531518500531e-07
3.180582666704701342e-07
2.208067257960157904e-07
1.428135441397378421e-07
1.115902311634798465e-07
1.068686393453370296e-07
1.235690453454264588e-07
1.293630566075979715e-07
1.220736219495483258e-07
1.084159012912398193e-07
8.875775927054741121e-08
6.470422805962073154e-08
5.475890130818397029e-08
4.462003253544039184e-08
3.528368369328123565e-08
2.228615668601604571e-08
2.125720618622087093e-08
1.814268244042811866e-08
1.354162258586397821e-08
1.162502853211510599e-08
1.077068164570427032e-08
1.003672815208152326e-08
1.011299662590278025e-08
1.038258223880551272e-08
7.838559543718416206e-09
5.067693751139185135e-09
4.913061835489195437e-09
4.761724140949369054e-09
3.044973918760326512e-09
2.279897290627057168e-09
1.802771197349926254e-09
1.514691673929204962e-09
2.290036414881933966e-09
2.176864824857619630e-09
2.324373912131133400e-09
1.957712349281418461e-09
2.609091315565912616e-09
2.663247445337106263e-09
1.646086301218361054e-09
1.953537352499027030e-09
2.576171919222921799e-09
2.548094030641825980e-09
1.575295429427995549e-10
8.570712026510770418e-10
7.708001628315344194e-10
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0.000000000000000000e+00
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0

107
scripts/regraft_run.py Normal file
View file

@ -0,0 +1,107 @@
#+
# ARES/HADES/BORG Package -- ./scripts/regraft_run.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
import warnings
warnings.simplefilter("ignore", FutureWarning)
import h5py as h5
import argparse
import os
class NotCompatible(Exception):
def __init__(msg_):
self.msg = msg_
def __repr__():
return "NotCompatible: " + self.msg
def restart_name(prefix, cpu):
return os.path.join(prefix, "restart.h5_%d" % cpu)
def detect_ncpu(prefix):
ncpu = 0
while True:
try:
os.stat(os.path.join(prefix, "restart.h5_%d" % ncpu))
except OSError:
break
ncpu += 1
return ncpu
def bias_name(bid):
return "scalars/galaxy_bias_%d" % bid
def nmean_name(nid):
return "scalars/galaxy_nmean_%d" % nid
def check_compat(restart_prefix, mcmc):
print("Checking compatibility of MCMC and restart")
with h5.File(restart_name(restart_prefix, 0), mode="r") as f_r, \
h5.File(mcmc, mode="r") as f_m:
bname = bias_name(0)
while bname in f_m:
if not bname in f_r:
raise NotCompatible("Not enough catalogs")
if f_m[bname].size != f_r[bname].size:
raise NotCompatible("Incompatible bias model")
bias_num += 1
bname = bias_name(bias_num)
def transfer(src, dest, name, start):
if (not name in src) or (not name in dest):
return False
sz = dest[name].shape[0]
dest[name][...] = src[name][start:(start+sz),...]
return True
def checked_transfer(*args):
if not transfer(*args):
raise NotCompatible("Problems in grafting")
def graft(restart_prefix, ncpus, mcmc):
with h5.File(args.mcmc, mode="r") as f_m:
plane_start = 0
plane_len = 0
for cpu in range(ncpus):
print("Transplanting to restart CPU %d" % cpu)
with h5.File(restart_name(restart_prefix, cpu), mode="r") as f_r:
plane_len = f_r['scalars/BORG_final_density'].shape[0]
checked_transfer(f_m, f_r, 'scalars/BORG_final_density', plane_start)
checked_transfer(f_m, f_r, 'scalars/s_field', plane_start)
checked_transfer(f_m, f_r, 'scalars/s_hat_field', plane_start)
bias_num = 0
while True:
if not transfer(f_m, f_r, bias_name(bias_num), 0):
break
if not transfer(f_m, f_r, nmean_name(bias_num), 0):
break
bias_num += 1
plane_start += plane_len
p = argparse.ArgumentParser(description="Graft the state of a previous run on a given restart.")
p.add_argument('mcmc', type=str, help="MCMC state to import")
p.add_argument('restart', type=str, help="restart prefix directory")
args = p.parse_args()
ncpu = detect_ncpu(args.restart)
print("Found %d CPU restart file")
check_compat(args.restart, args.mcmc)
graft(args.restart, ncpu, args.mcmc)
print("Done")

View file

@ -0,0 +1,57 @@
import h5py as h5
import cosmotool as ct
import numpy as np
import ares_tools as at
#result = at.read_chain_avg_dev(".",["final_density", "v_field"],step=20*4,prefix="output",do_dev=True,start=20000)
if False:
mean_p,dev_p = at.read_chain_complex_avg_dev(".",lambda ff: (1+ff['final_density'][...])*ff['v_field'][...],step=10,prefix="output",do_dev=True,start=200, pattern="%s_%04d.h5")
mean_d,dev_d= at.read_chain_complex_avg_dev(".",lambda ff: ff['final_density'][...],step=10,prefix="output",do_dev=True,start=200,pattern="%s_%04d.h5")
np.savez("means.npz", mean_p=mean_p, dev_p=dev_p, mean_d=mean_d,dev_d=dev_d)
else:
x=np.load("means.npz")
mean_d = x['mean_d']
mean_p = x['mean_p']
with h5.File("output_2200.h5", mode="r") as ff:
one_d = ff['final_density'][...]
one_p = ff['v_field'][...]#(ff['final_density'][...]+1)*ff['v_field'][...]
Nside=256
L=4000
N=256
Dmin=10
Dmax=128
#ix=np.arange(N)*L/N - 0.5*L
ix=np.arange(N)*L/N
# These are the box corners
corner_x=-2200
corner_y=-2000
corner_z=-300
shifter=-np.array([corner_x,corner_y,corner_z])*N/L - 0.5*N
shifter2=np.array([corner_x,corner_y,corner_z])
mm=ct.spherical_projection(Nside, mean_d, Dmin, Dmax,shifter=shifter,integrator_id=1,booster=100)
one_mm=ct.spherical_projection(Nside, one_d, Dmin, Dmax,shifter=shifter,integrator_id=1,booster=100)
x = ix[:,None,None].repeat(N,axis=1).repeat(N,axis=2) + shifter2[0]
y = ix[None,:,None].repeat(N,axis=0).repeat(N,axis=2) + shifter2[1]
z = ix[None,None,:].repeat(N,axis=0).repeat(N,axis=1) + shifter2[2]
r = np.sqrt(x**2+y**2+z**2)
cond = r>0
pr = np.where(cond, (mean_p[0,...] * x + mean_p[1,...] * y + mean_p[2,...]*z)/r,0)
one_pr = np.where(cond,(one_p[0,...] * x + one_p[1,...] * y + one_p[2,...]*z)/r,0)
one = np.ones(pr.shape)
mpr=ct.spherical_projection(Nside, pr, Dmin, Dmax,shifter=shifter,integrator_id=1,booster=100)
one_mpr=ct.spherical_projection(Nside, one_pr, Dmin, Dmax,shifter=shifter,integrator_id=1,booster=100)
mdist=ct.spherical_projection(Nside, one, Dmin, Dmax,shifter=shifter,integrator_id=1,booster=100)
np.savez("sky.npz", d=mm,pr=mpr,r=r,dist=mdist,one_d=one_mm,one_pr=one_mpr)

View file

@ -0,0 +1,117 @@
from tqdm import tqdm
import math
import os
import numpy as np
import matplotlib as mpl
import matplotlib.cm as cm
mpl.use('Agg')
import ares_tools as at
import matplotlib.pyplot as plt
import h5py as h5
import numba as nb
startMC=0
suffix="ref"
ss = at.analysis(".")
opts=dict(Nbins=256,range=(0,ss.kmodes.max()))
names=[]
PP=[]
Fmax=0
while True:
try:
os.stat("mcmc_%d.h5" % Fmax)
except:
break
names.append(Fmax)
Fmax += 1
print(Fmax)
def handle_likelihood():
def bias_func(rho, bias):
a=ne.evaluate('exp(b0*log(rho) - b1*rho**(-b2))', dict(b0=bias[0],b1=bias[1],b2=bias[2],rho=rho))
return a
@nb.jit(parallel=True,nopython=True)
def compute_likelihood(S,nmean,bias,density,data):
N0,N1,N2 = density.shape
alpha,r0,eps = bias
L = 0
for p in nb.prange(N0*N1*N2):
k = p % N2
j = (p//N2) % N1
i = (p//N2//N1)
if S[i,j,k] <= 0:
continue
rho = 1+density[i,j,k]+1e-6
x = r0*rho**(-eps)
lrho = math.log(rho)
rho_g = nmean * rho**alpha * math.exp(-x)
log_rho_g = math.log(nmean) + lrho*alpha - x
lam = S[i,j,k] * rho_g
log_lam = math.log(S[i,j,k]) + log_rho_g
L += data[i,j,k]*log_lam - lam
return L
try:
Likelihood = list(np.load("Like_%s.npy" % suffix))
loc_names = names[len(Likelihood):]
except:
Likelihood = []
loc_names = list(names)
print(loc_names)
if len(loc_names) == 0:
return
data = []
selection = []
for d_no in range(16):
print("Load data %d" % (d_no,))
data.append(ss.get_data(d_no))
selection.append(ss.get_mask(d_no))
for mc_id in tqdm(loc_names):
with h5.File("mcmc_%d.h5" % mc_id, mode="r") as f:
density = f['/scalars/BORG_final_density'][...]
L=[]
for i,(D,S) in enumerate(zip(data,selection)):
nmean = f['/scalars/galaxy_nmean_%d' % i][0]
bias = f['/scalars/galaxy_bias_%d' % i][...]
L.append( compute_likelihood(S, nmean, bias, density, D) )
Likelihood.append(L)
np.save("Like.npy", Likelihood)
def handle_power():
Pref = ss.rebin_power_spectrum(startMC, **opts)
try:
data = np.load("power_%s.npz" % suffix,allow_pickle=True)
print("Found previous run")
loc_names = names[len(data['P']):]
print(loc_names)
PP = list(data['P'])
except Exception as exc:
print(exc)
print("no previous run")
PP = []
loc_names = list(names)
print(loc_names)
if len(loc_names) == 0:
return
for i in tqdm(loc_names):
PP.append(ss.compute_power_shat_spectrum(i, **opts))
bins = 0.5*(Pref[2][1:]+Pref[2][:-1])
np.savez("power_%s.npz" % suffix, bins=bins, P=PP, startMC=startMC, Fmax=Fmax, Pref=Pref)
#handle_likelihood()
handle_power()

44
scripts/test_analysis.py Normal file
View file

@ -0,0 +1,44 @@
#+
# ARES/HADES/BORG Package -- ./scripts/test_analysis.py
# Copyright (C) 2014-2020 Guilhem Lavaux <guilhem.lavaux@iap.fr>
# Copyright (C) 2009-2020 Jens Jasche <jens.jasche@fysik.su.se>
#
# Additional contributions from:
# Guilhem Lavaux <guilhem.lavaux@iap.fr> (2023)
#
#+
from analysis.analysis import *
import pylab as plt
import numpy as np
import healpy as hp
#chain_path="/scratch/jasche/panphasia_run_pm/"
chain_path="/scratch/jasche/2mpp_highres_pm/"
ares=analysis(chain_path=chain_path,LSS_framework='BORG')
mu_i,var_i,mu_f,var_i=ares.mean_var_density(first_sample=299,last_sample=300,sample_steps=2)
plt.imshow(np.log(2+mu_f[:,:,64]))
plt.show()
'''
image=ares.get_spherical_slice(mu,nside=256,rslice=50)
hp.mollview(image)
plt.show()
'''
'''
k,mu,var=ares.mean_var_spec(first_sample=0,last_sample=100000,sample_steps=1)
#set loglog scale
plt.xscale('log')
plt.yscale('log')
plt.errorbar(k, mu, yerr=np.sqrt(var), fmt='-')
#plt.plot(k,mu,color='red')
plt.show()
'''