This commit is contained in:
P.M. Sutter 2014-05-22 20:47:46 -05:00
commit 1ec0d26b20
29 changed files with 2187 additions and 5963 deletions

View file

@ -1,161 +0,0 @@
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include "header.h"
void output_matter_power_spectrum()
{
int k;
double dlogk,xk,plin,pnl;
FILE *fp;
char aa[100];
fprintf(stderr,"\n\nCALCULATING MATTER POWER SPECTRA.\n");
fprintf(stderr, "---------------------------------\n\n");
sprintf(aa,"%s.matter_pk",Task.root_filename);
fp = fopen(aa,"w");
for(k=-300;k<=100;++k)
{
xk=pow(10.0,k/100.0);
plin = linear_power_spectrum(xk)/(xk*xk*xk)*TWOPI*PI;
pnl = nonlinear_power_spectrum(xk)/(xk*xk*xk)*TWOPI*PI;
fprintf(fp,"%e %e %e\n",xk,plin,pnl);
}
fclose(fp);
}
void output_matter_correlation_function()
{
int k,nr=50;
double dlogr,r,xlin,xnl,rmin = 0.05,rmax = 80.0;
FILE *fp;
char aa[100];
fprintf(stderr,"\n\nCALCULATING MATTER CORRELATION FUNCTIONIONS.\n");
fprintf(stderr, "--------------------------------------------\n\n");
sprintf(aa,"%s.matter_xi",Task.root_filename);
fp = fopen(aa,"w");
dlogr = (log(rmax) - log(rmin))/(nr-1);
for(k=0;k<nr;++k)
{
r = exp(k*dlogr)*rmin;
xlin = xi_linear_interp(r);
xnl = xi_interp(r);
fprintf(fp,"%e %e %e\n",r,xlin,xnl);
}
fclose(fp);
}
void output_halo_correlation_function(double mass)
{
int k,nr=50;
double dlogr,r,xlin,xnl,rmin = 0.15,rmax = 40.0;
FILE *fp;
char aa[100];
fprintf(stderr,"\n\nCALCULATING HALO CORRELATION FUNCTIONION (M=%.2e)\n",mass);
fprintf(stderr, "-------------------------------------------------\n\n");
sprintf(aa,"%s.halo_xi",Task.root_filename);
fp = fopen(aa,"w");
rmin = pow(3*mass/(4*DELTA_HALO*PI*RHO_CRIT*OMEGA_M),THIRD);
dlogr = (log(rmax) - log(rmin))/(nr-1);
for(k=0;k<nr;++k)
{
r = exp(k*dlogr)*rmin;
xlin = bias_interp(mass,r);
xnl = xi_interp(r);
fprintf(fp,"%e %e\n",r,xnl*xlin*xlin);
}
fclose(fp);
}
void output_matter_variance()
{
int k,nr=50;
double dlogr,r,slin,snl,mass,rmin = 0.05,rmax = 80.0,pnorm,pnorm_nl;
FILE *fp;
char aa[100];
fprintf(stderr,"\n\nCALCULATING MATTER VARIANCE.\n");
fprintf(stderr, "----------------------------\n\n");
sprintf(aa,"%s.sigma_r",Task.root_filename);
fp = fopen(aa,"w");
dlogr = (log(rmax) - log(rmin))/(nr-1);
pnorm = SIGMA_8/sigmac(8.0);
pnorm_nl = pnorm*sigmac(80.0)/nonlinear_sigmac(80.0);
for(k=0;k<nr;++k)
{
r = exp(k*dlogr)*rmin;
mass = 4./3.*PI*r*r*r*RHO_CRIT*OMEGA_M;
slin = sigmac(r)*pnorm;
snl = nonlinear_sigmac(r)*pnorm_nl;
fprintf(fp,"%e %e %e %e\n",r,slin,snl,mass);
}
fclose(fp);
}
void output_halo_concentrations()
{
int k,nr=100;
double x,dlogm,m,mvir,cdelta,mmin=1e8,mmax=1e16,delta_vir;
FILE *fp;
char aa[100];
fprintf(stderr,"\n\nCALCULATING HALO CONCENTRATIONS.\n");
fprintf(stderr, "--------------------------------\n\n");
sprintf(aa,"%s.cvir",Task.root_filename);
fp = fopen(aa,"w");
dlogm = (log(mmax) - log(mmin))/(nr-1);
x=OMEGA_M-1;
delta_vir=(18*PI*PI+82*x-39*x*x)/(1+x);
for(k=0;k<nr;++k)
{
m = exp(k*dlogm)*mmin;
cdelta = halo_concentration(m);
fprintf(fp,"%e %e\n",m,cdelta);
}
fclose(fp);
}
void output_halo_mass_function()
{
int k,nr=100;
double x,dlogm,m,mvir,cdelta,mmin=1e8,mmax=1e16,delta_vir;
FILE *fp;
char aa[100];
fprintf(stderr,"\n\nCALCULATING HALO MASS FUNCTION.\n");
fprintf(stderr, "-------------------------------\n\n");
sprintf(aa,"%s.massfunc",Task.root_filename);
fp = fopen(aa,"w");
dlogm = (log(mmax) - log(mmin))/(nr-1);
for(k=0;k<nr;++k)
{
m = exp(k*dlogm)*mmin;
x = dndM_interp(m);
fprintf(fp,"%e %e\n",m,x);
}
fclose(fp);
}

View file

@ -1,73 +0,0 @@
#include <stdlib.h>
#include <math.h>
#include <stdio.h>
#include "header.h"
/* This calculates the linear growthfactor at redshift z.
* Currently, the default is a flat LCDM universe, but can easily
* be changed.
*/
double x03_g1,
xi3_g1;
double func_D0(double);
double func_Di(double);
double growthfactor(double z)
{
int i;
double zp1,x03,xi3,bi,b0,yy,sqx,sqx1,dbdx,x0,xi,lambda_i,htemp,omega_L,omega_i,hubble_i,
astart,fac,redshift;
redshift=z;
astart=1.0/(z+1);
zp1=redshift+1;
omega_L=1-OMEGA_M;
hubble_i = sqrt(OMEGA_M/(astart*astart*astart) +
(1.0-OMEGA_M-omega_L)/(astart*astart) +omega_L);
if(omega_L>0)
{
lambda_i=omega_L/(omega_L+(1.0-OMEGA_M-omega_L)*zp1*zp1+OMEGA_M*pow(zp1,3.0));
omega_i=OMEGA_M*pow(zp1,3.0)*lambda_i/omega_L;
}
else
{
lambda_i=0;
omega_i=OMEGA_M*zp1/(1.0+OMEGA_M*redshift);
}
fac=astart;
if((OMEGA_M < 0.99) && (omega_L > 0.001))
{
x03_g1=x03=1.0/OMEGA_M-1.0;
xi3_g1=xi3=1.0/omega_i-1.0;
b0 = qromo(func_D0,0.0,1.0,midpnt);
bi = qromo(func_Di,0.0,1.0,midpnt);
b0=b0*sqrt(x03+1.0);
bi=bi*sqrt(xi3+1.0)*astart;
fac=bi/b0;
}
if((OMEGA_M < 0.99) && (omega_L == 0))
{
x0=1.0/OMEGA_M-1.0;
xi=x0*astart;
b0 = 1.+3./x0+3.*sqrt(1+x0)*log(sqrt(1.+x0)-sqrt(x0))/pow(x0,1.5);
bi = 1.+3./xi+3.*sqrt(1+xi)*log(sqrt(1.+xi)-sqrt(xi))/pow(xi,1.5);
fac = bi/b0;
}
return(fac);
}
double func_Di(double y)
{
return(pow(1.0+xi3_g1*pow(y,1.2),-1.5));
}
double func_D0(double y)
{
return(pow(1.0+x03_g1*pow(y,1.2),-1.5));
}

View file

@ -1,300 +0,0 @@
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "header.h"
/* This is the bias factor for halos. Mass (in h^{-1} M_sol) is the input.
* This is not the scale-dependent bias factor, which can be calculated later.
*
* There are many variants of the bias in this function.
* The one it is set up to use is from the Tinker etal (in prep, currently)
* bias paper calibrated from the SO halo catalogs from the Tinker et al mass functino
* paper. The parameters of the bias functions are calibrated such that they will
* work with any chosen halo overdensity.
*
* Other variants:
*
* The parameterization of b(M) is taken from Sheth Mo & Tormen (2001 MNRAS 232 1)
* but the actual parameters have been replaced by the values of Appendix A in
* Tinker, Weinberg, Zheng, & Zehavi. 2005 Apj (M/L paper)
*
* See also Seljak & Warren (2004).
* See also Sheth & Tormen (1999)
* See also Mo & White (1996)
*/
double bias_from_file(double m, double r);
double bias(double mass)
{
double rm,sig,k,neff,b,logk,khi,klo,phi,plo,nu,psp,x;
static int flag=0;
static double pnorm, prev_delta=-1;
// variables for the SO(DELTA) bias functions
static double bias_A, bias_a, bias_B, bias_b;
/* Original SMT parameters */
double a=0.707,bb=0.5,c=0.6;
pnorm=SIGMA_8/sigmac(8.0);
rm=pow(3.0*mass/(4.0*PI*OMEGA_M*RHO_CRIT),1.0/3.0);
sig=pnorm*sigmac(rm);
/* Tinker et al parameters */
a=0.707;
bb=0.35;
c=0.8;
/* Fitting to Mike Warren's simulations. */
bb=0.28;
/* Use the globel parameters. */
a=BIAS_A; bb=BIAS_B; c=BIAS_C;
/* First normalize the power spectrum
*/
pnorm=SIGMA_8/sigmac(8.0);
rm=pow(3.0*mass/(4.0*PI*OMEGA_M*RHO_CRIT),1.0/3.0);
sig=pnorm*sigmac(rm);
/* This is from Tinker etal in prep for SO halos
*/
if(DELTA_HALO != prev_delta)
{
bias_A = pow(fabs(log10(DELTA_HALO)-2.69),2)*0.16 + 0.785;
bias_a = (log10(DELTA_HALO) - 2.28)*0.45;
bias_B = 0.4*(log10(DELTA_HALO)-2.3)+0.7*log10(DELTA_HALO)*exp(-pow(4/log10(DELTA_HALO),4.0))+1.23;
bias_b = 2.4;
fprintf(stderr,"BIAS PARAMS: %f %f %f %f\n",bias_A, bias_a, bias_B, bias_b);
prev_delta = DELTA_HALO;
}
a = pow(sig,-bias_a);
b = 1 - bias_A*a/(a+1) + bias_B*pow(sig,-bias_b);
return(b);
/* This is the Seljak & Warren (2004) bias.
* There's an alpha_s in the correction term, which here I set to zero. (RUNNING.)
* See App. A of Tinker et al (M/L) for a comparison of this bias with above bias.
*/
x=mass/MSTAR;
b=(0.53+0.39*pow(x,0.45)+0.13/(40*x+1) + 5.0e-4*pow(x,1.5));
b=b+log10(x)*(0.4*(OMEGA_M-0.3+SPECTRAL_INDX-1)+0.3*(SIGMA_8-0.9+HUBBLE-0.7)+0.8*0.0);
return(b);
/* This is the Sheth Mo Tormen bias.
* (possible that parameter values have been changed to better fit simulations,
* ie from Tinker etal 2005 ML paper).
*/
nu=DELTA_CRIT/sig;
b=1+1.0/(sqrt(a)*DELTA_CRIT)*(sqrt(a)*a*nu*nu + sqrt(a)*bb*pow(a*nu*nu,1-c) -
(pow(a*nu*nu,c)/(pow(a*nu*nu,c)+bb*(1-c)*(1-c/2.))));
return(b);
/* This is Sheth & Tormen
*/
nu = DELTA_CRIT/sig;
nu = nu*nu;
return(1 + (0.707*nu - 1)/DELTA_CRIT + 2*0.3/DELTA_CRIT/(1+pow(0.707*nu,0.3)));
/* This is the old Mo & White (1996) formula
*/
return(1+DELTA_CRIT/sig/sig-1/DELTA_CRIT);
}
/* Just like the halo mass function, we'll set this up such that
* you just need to interpolate.
*
* Now this has been changed to calculate the spatial-scale dependence
* of the bias factor. If you don't want the scale dep. b, then just
* input r<0.
*
* If the global flag LINEAR_PSP==0, uses the scale dependence calculated for
* for halo bias relative to the non-linear matter \xi_m(r):
*
* f^2(r) = (1.0+xi*1.17)^1.49/(1.0+xi*0.69)^2.09 --> b(r) = b0*f(r)
*
* For LINEAR_PSP==1, use scale dependence determined for the linear P(k):
*
* f(r) = 1 + exp[-(r/A)^0.7] --> where A is a parameter that we're gonna have to
* determine in more detail, but now A \approx 1
*/
double bias_interp(double m, double r)
{
static int flag=0,prev_cosmo=0;
static double *x,*y,*y2, pnorm;
int i,n=100;
double dm,max=16.3,min=9,a,b,m1,m2,dm1,xi,power,rm,sig,b1,b2,mass,rvir,a1;
if(!flag || RESET_COSMOLOGY!=prev_cosmo)
{
if(!ThisTask && OUTPUT)
fprintf(stdout,"RESET: resetting bias for %f %f\n",OMEGA_M,SIGMA_8);
if(!flag)
{
x=dvector(1,n);
y=dvector(1,n);
y2=dvector(1,n);
}
flag=1;
dm=(double)(max-min)/n;
for(i=1;i<=n;++i)
{
x[i]=pow(10.0,min+i*dm);
y[i]=log(bias(x[i]));
x[i] = log(x[i]);
//printf("BIAS %f %f\n",x[i]/2.302, y[i]/2.302);
continue;
// no longer need to do this part, since we're taking into account
// halo overdensity in the bias formula.
if(DELTA_HALO!=200)
{
x[i]=log(halo_mass_conversion2(x[i],halo_c200(x[i]),200.0,DELTA_HALO));
}
else
{
x[i]=log(x[i]);
}
}
spline(x,y,n,2.0E+30,2.0E+30,y2);
prev_cosmo=RESET_COSMOLOGY;
pnorm=SIGMA_8/sigmac(8.0);
}
m=log(m);
splint(x,y,y2,n,m,&a);
a = exp(a);
// if we're using systematic errors in an MCMC, adjust parameter a1 (amplitude)
if(USE_ERRORS)
a *= M2N.bias_amp;
// if no scale-dependence required, return the large-scale value
if(r<0) return a;
/*
* SCALE DEPENDENT BIAS!!!
*----------------------------------------------------------------------
*/
/* first, calculate the standard Zheng-like, mass-independent scale bias
* based on the non-linear correlation function
* (re-calibrated using the SO catalogs)
*/
xi = xi_interp(r);
b = pow(1.0+xi*0.92,2.08)*pow(1.0+xi*0.74,-2.37);
if(b<0.6)b=0.6;
/* Now the mass-dependent term.
* Where the mass-dependence comes in the form of the large-scale bias itself
*/
/*
sig = sigmac_radius_interp(r);
if(a<0)
b *= pow(1 + 0.028*pow(a,1.53)*pow(sig,1.57),2.59)/
pow(1 + 0.253*pow(a,1.24)*pow(sig,1.71),0.397);
*/
// if we're using systematic errors in an MCMC, adjust parameter a1 (amplitude)
if(USE_ERRORS) {
b = (b-1)*M2N.scalebias_amp + 1;
if(b<0)b=0;
}
return b*a;
}
/* This is the integrand which qromo or qtrap would call
* to calculate the large-scale galaxy bias.
* The integral is a number-weighted average of the halo
* bias function, integrated over the halo mass function.
*/
double func_galaxy_bias(double m)
{
m=exp(m);
return(dndM_interp(m)*N_avg(m)*bias_interp(m,-1.)*m);
}
/*
* This is to get the bias from a series of files.
*
*/
double bias_from_file(double m, double r)
{
FILE *fp;
static int nfiles=10, *n, flag =1;
static double *mass, **rad, **bias;
float x1,x2,x3;
char fname[1000];
int i,j,i1,i2;
double b;
if(flag)
{
n = ivector(0,nfiles-1);
mass = dvector(0,nfiles-1);
rad = dmatrix(0,nfiles-1,1,30);
bias = dmatrix(0,nfiles-1,1,30);
fp = openfile("/home/tinker/cosmo/SDSS/zspace_analysis/SO_calibration/halofiles/sigma.dat");
for(i=0;i<nfiles;++i)
fscanf(fp,"%lf %f",&mass[i],&x1);
for(i=0;i<nfiles;++i)
mass[i] = log(mass[i]);
fclose(fp);
for(i=0;i<nfiles;++i)
{
sprintf(fname,"/home/tinker/cosmo/SDSS/zspace_analysis/SO_calibration/halofiles/bias_nl.200.m%d",i);
fp = openfile(fname);
n[i] = filesize(fp);
for(j=1;j<=n[i];++j)
{
fscanf(fp,"%lf %lf %f %f %f",&rad[i][j],&bias[i][j],&x1,&x2,&x3);
rad[i][j] = log(rad[i][j]);
}
fclose(fp);
}
flag = 0;
}
r = log(r);
if(m>mass[nfiles-1])return -1;
for(i=1;i<nfiles;++i)
if(mass[i]>=m)break;
i2 = i;
i1 = i-1;
for(j=2;j<=n[i1];++j)
if(rad[i1][j]>r)break;
x1 = (bias[i1][j] - bias[i1][j-1])/(rad[i1][j] - rad[i1][j-1])*(r-rad[i1][j-1]) + bias[i1][j-1];
for(j=2;j<=n[i2];++j)
if(rad[i2][j]>r)break;
x2 = (bias[i2][j] - bias[i2][j-1])/(rad[i2][j] - rad[i2][j-1])*(r-rad[i2][j-1]) + bias[i2][j-1];
b = (x2 - x1)/(mass[i2] - mass[i1])*(m - mass[i1]) + x1;
// if(r>log(4))
// printf("%e %e %f %f %f %f %f\n",m,r,b,x2,x1,mass[i2],mass[i1]);
return b;
}

View file

@ -1,178 +0,0 @@
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "header.h"
/* This is the bias factor for halos. Mass (in h^{-1} M_sol) is the input.
* This is not the scale-dependent bias factor, which can be calculated later.
*
* The parameterization of b(M) is taken from Sheth Mo & Tormen (2001 MNRAS 232 1)
* but the actual parameters have been replaced by the values of Appendix A in
* Tinker, Weinberg, Zheng, & Zehavi. 2005 Apj (submitted)
*/
double bias(double mass)
{
static int prev_cosmo=-1;
static long IDUM=-3333;
double rm,sig,k,neff,b,logk,khi,klo,phi,plo,nu,psp,x,fac,fac1,fac2;
static int flag=0,na=4;
static double pnorm,*xa,*ya,*za;
int i;
/* Original SMT parameters */
double a=0.707,bb=0.5,c=0.6;
/* Tinker et al parameters */
a=0.707;
bb=0.35;
c=0.8;
/* Fitting to Mike Warren's simulations. */
bb=0.28;
/* Use the globel parameters. */
a=BIAS_A; bb=BIAS_B; c=BIAS_C;
if(!flag)
{
xa=dvector(1,na);
ya=dvector(1,na);
za=dvector(1,na);
flag=1;
xa[1] = log(1/2.51);
xa[2] = log(1/1.49);
xa[3] = log(1/0.905);
xa[4] = log(1/0.501);
}
if(prev_cosmo != RESET_COSMOLOGY)
{
prev_cosmo=RESET_COSMOLOGY;
for(i=1;i<=4;++i)
ya[i] = gasdev(&IDUM);
spline(xa,ya,na,1.0E+30,1.0E+30,za);
}
/* First normalize the power spectrum
*/
pnorm=SIGMA_8/sigmac(8.0);
rm=pow(3.0*mass/(4.0*PI*OMEGA_M*RHO_CRIT),1.0/3.0);
sig=pnorm*sigmac(rm);
nu=DELTA_CRIT/sig;
b=1+1.0/(sqrt(a)*DELTA_CRIT)*(sqrt(a)*a*nu*nu + sqrt(a)*bb*pow(a*nu*nu,1-c) -
(pow(a*nu*nu,c)/(pow(a*nu*nu,c)+bb*(1-c)*(1-c/2.))));
splint(xa,ya,za,na,log(1/sig),&fac1);
fac2 = 0.0158*pow(sig,-2.7) + 0.00251*pow(sig,2.2);
fac = 1 + fac1*fac2;
if(fac<0.01)fac=0.01;
b*=fac;
return(b);
/* This is the Seljak & Warren (2004) bias.
* There's an alpha_s in the correction term, which here I set to zero. (RUNNING.)
* See App. A of Tinker et al for a comparison of this bias with above bias.
*/
x=mass/MSTAR;
b=(0.53+0.39*pow(x,0.45)+0.13/(40*x+1) + 5.0e-4*pow(x,1.5));
b=b+log10(x)*(0.4*(OMEGA_M-0.3+SPECTRAL_INDX-1)+0.3*(SIGMA_8-0.9+HUBBLE-0.7)+0.8*0.0);
return(b);
/* This is the old Mo & White (1996) formula
*/
return(1+DELTA_CRIT/sig/sig-1/DELTA_CRIT);
}
/* Just like the halo mass function, we'll set this up such that
* you just need to interpolate.
*
* Now this has been changed to calculate the spatial-scale dependence
* of the bias factor. If you don't want the scale dep. b, then just
* input r<0.
*
* If the global flag LINEAR_PSP==0, uses the scale dependence calculated for
* for halo bias relative to the non-linear matter \xi_m(r):
*
* f^2(r) = (1.0+xi*1.17)^1.49/(1.0+xi*0.69)^2.09 --> b(r) = b0*f(r)
*
* For LINEAR_PSP==1, use scale dependence determined for the linear P(k):
*
* f(r) = 1 + exp[-(r/A)^0.7] --> where A is a parameter that we're gonna have to
* determine in more detail, but now A \approx 1
*/
double bias_interp(double m, double r)
{
static int flag=0,prev_cosmo=0;
static double *x,*y,*y2;
int i,n=100;
double dm,max=16.3,min=9,a,b,m1,m2,dm1,xi;
if(!flag || RESET_COSMOLOGY!=prev_cosmo)
{
if(!ThisTask && OUTPUT)
fprintf(stdout,"RESET: resetting bias for %f %f\n",OMEGA_M,SIGMA_8);
if(!flag)
{
x=dvector(1,n);
y=dvector(1,n);
y2=dvector(1,n);
}
flag=1;
dm=(double)(max-min)/n;
for(i=1;i<=n;++i)
{
x[i]=pow(10.0,min+i*dm);
y[i]=log(bias(x[i]));
if(DELTA_HALO!=200)
{
m1 = halo_mass_conversion2(x[i]*1.001,halo_c200(x[i]*1.001),DELTA_HALO,200.0);
m2 = halo_mass_conversion2(x[i]*0.999,halo_c200(x[i]*0.999),DELTA_HALO,200.0);
dm1 = (x[i]*1.001 - x[i]*0.999)/(m1-m2);
y[i] = y[i] + log(dm1);
x[i]=log(halo_mass_conversion2(x[i],halo_c200(x[i]),DELTA_HALO,200.0));
}
else
x[i]=log(x[i]);
}
spline(x,y,n,2.0E+30,2.0E+30,y2);
prev_cosmo=RESET_COSMOLOGY;
}
m=log(m);
splint(x,y,y2,n,m,&a);
if(r<0 || r>10)return(exp(a));
if(LINEAR_PSP)
{
b=exp(-pow(r,0.7)) + 1;
}
else
{
xi=xi_interp(r);
b=pow(1.0+xi*1.17,1.49*0.5)*pow(1.0+xi*0.69,-2.09*0.5);
}
a=exp(a)*b;
return(a);
}
/* This is the integrand which qromo or qtrap would call
* to calculate the large-scale galaxy bias.
* The integral is a number-weighted average of the halo
* bias function, integrated over the halo mass function.
*/
double func_galaxy_bias(double m)
{
return(dndM_interp(m)*N_avg(m)*bias_interp(m,-1.));
}

View file

@ -1,155 +0,0 @@
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "header.h"
double collapse_redshift(double z);
double cvir_pnorm_g1,
cvir_sigma_g1;
/* This calculates and tabulates the halo concentrations
* as a function of halo mass. Uses the "Bullock model",
* described in a little more detail below.
*/
double halo_concentration(double m)
{
static int flag=1,n,prev_cosmo=0;
static double *x,*y,*y2;
int i;
float x1,x2,cfac;
double a,dm,x3,x4;
FILE *fp;
char fname[1000];
if(flag || RESET_COSMOLOGY!=prev_cosmo)
{
MSTAR = mstar();
if(OUTPUT)
fprintf(stdout,"Calc cvir with DELTA_HALO= %f\n",DELTA_HALO);
prev_cosmo=RESET_COSMOLOGY;
n=50;
if(flag)
{
x=dvector(1,n);
y=dvector(1,n);
y2=dvector(1,n);
}
cvir_pnorm_g1=SIGMA_8/sigmac(8.0);
flag=0;
dm=(log(HOD.M_max)-log(1.0E8))/(n-1);
for(i=1;i<=n;++i)
{
x[i]=exp((i-1)*dm+log(1.0E8));
y[i]=cvir_model(x[i]);
x[i]=log(halo_mass_conversion(x[i],&y[i],DELTA_HALO));
y[i]=log(y[i]);
//y[i] = log(10);
}
spline(x,y,n,1.0E+30,1.0E+30,y2);
}
cfac = 0.13*log10(m/1.0e12) + 0.125;
cfac = 1;
if(m>80*MSTAR) m=80*MSTAR;
m=log(m);
splint(x,y,y2,n,m,&a);
return(exp(a)*cfac);
}
/* This is the cvir model, which should reproduce the results
* of cvir3.f, which can be obtained off James Bullock's web site.
*
* Basic idea; For a halo of mass M, find the collapse redshift
* of that halo be finding the redshift at which rms mass variance
* sigma(Mf) = DELTA_CRIT (using linear growth factor), where Mf is some
* fraction of the redshift zero halo mass.
*
* cvir = k*(1+z_coll(M*f))
*
* Model params:
* - k = 3.12 - fitting parameter (taken from cvir3.f)
* - f = 0.001 - fraction of halo mass that collapsed at z_coll
*/
double cvir_model(double mass)
{
double cvir,z_coll,zbrent(),rad;
double k=3.12;
double f=0.001;
rad=pow(3.0*f*mass/(4.0*PI*OMEGA_M*RHO_CRIT),1.0/3.0);
cvir_sigma_g1=cvir_pnorm_g1*sigmac(rad);
if(collapse_redshift(0.0)<0)return(k);
z_coll=zbrent(collapse_redshift,0.0,200.0,1.0E-3);
cvir=k*(1+z_coll);
return(cvir);
}
/* This is the input function to find where sig(M)*D(z)-DELTA_CRIT = 0
* which is the redshift at which a halo of mass M collapsed.
*/
double collapse_redshift(double z)
{
double D;
D=growthfactor(z);
return(cvir_sigma_g1*D-DELTA_CRIT);
}
/* Some quantities are specific to an overdensity of 200 (i.e., the Jenkins mass
* function and the halo bias relation of Tinker et al. )
* Specfically, these are actually for FOF 0.2 halos, for which 200 is the
* current best approximation. (Although Warren et al quotes 250, which is the most recent
* value.)
*
* Therefore, the halo concentrations for Delta=200 halos need to be easily
* accessible for halo mass conversion of these quantities. The values are
* tabulates here, as opposed to the above routine which tabulates the
* concentrations for a user-specified overdensity.
*/
double halo_c200(double m)
{
static int flag=1,n,prev_cosmo=0;
static double *x,*y,*y2;
int i;
float x1,x2;
double a,dm,x3,x4;
FILE *fp;
char fname[1000];
if(flag || RESET_COSMOLOGY!=prev_cosmo)
{
if(OUTPUT)
fprintf(stdout,"Calc cvir with DELTA_HALO= %f\n",200.0);
prev_cosmo=RESET_COSMOLOGY;
n=50;
if(flag)
{
x=dvector(1,n);
y=dvector(1,n);
y2=dvector(1,n);
}
cvir_pnorm_g1=SIGMA_8/sigmac(8.0);
flag=0;
dm=(log(HOD.M_max)-log(1.0E8))/(n-1);
for(i=1;i<=n;++i)
{
x[i]=exp((i-1)*dm+log(1.0E8));
y[i]=cvir_model(x[i]);
x[i]=log(halo_mass_conversion(x[i],&y[i],200.0));
y[i]=log(y[i]);
}
}
m=log(m);
splint(x,y,y2,n,m,&a);
return(exp(a));
}

View file

@ -1,86 +0,0 @@
#include <stdlib.h>
#include <math.h>
#include <stdio.h>
#include "header.h"
/* This is a routine which takes a virial mass (as defined by the virial
* overdensity relation (see fitting formula of Bryan & Normax)
* and converts if to a mass of some given overdensity. This is all
* taken from the appendix of Hu & Kravtsov. 2003ApJ...584..702H
*/
/* Constants from conversion formula in Hu & Kravtsov (2003)
*/
#define a1 0.5116
#define a2 -0.4283
#define a3 -3.13E-3
#define a4 -3.52E-5
double HK_func(double x);
double halo_mass_conversion(double mvir, double *cvir1, double delta_halo)
{
double x,y,z,vx,vy,vz,x1,x2,x3,x4,omega_m,delta=180,mstar,delta_vir,delta_fof,p,f,
mp,v,mass,lambda,error,tol=1.0E-3,cprev,m_delta,delta_fac,cvir,rdelta,rvir;
cvir=*cvir1;
omega_m=OMEGA_M;
lambda=1-OMEGA_M;
mstar=MSTAR;
delta=delta_halo;
x=omega_m-1;
delta_vir=(18*PI*PI+82*x-39*x*x)/(1+x);
/* Now convert from virial mass to m_delta.
*/
f=delta/delta_vir*HK_func(1.0/cvir);
p=a2+a3*log(f)+a4*log(f)*log(f);
x1=1.0/sqrt(a1*pow(f,2*p)+0.5625)+2*f;
m_delta=mvir*(delta/delta_vir*pow(1.0/(x1*cvir),3.0));
/* Now convert the cvir to c_delta.
*/
rvir=pow(3*mvir/(4*delta_vir*PI*RHO_CRIT*OMEGA_M),1.0/3.0);
rdelta=pow(3*m_delta/(4*delta*PI*RHO_CRIT*OMEGA_M),1.0/3.0);
*cvir1=cvir*rdelta/rvir;
return(m_delta);
}
/* This is a slight modification to the above routine-- instead of converting from
* the virial mass, it converts from a specified Delta to the other Delta.
* (so here the "_vir" quantities are any arbitrary input overdensity.)
*/
double halo_mass_conversion2(double mvir, double cvir1, double delta_vir, double delta_halo)
{
double x,y,z,vx,vy,vz,x1,x2,x3,x4,omega_m,delta=180,mstar,delta_fof,p,f,
mp,v,mass,lambda,error,tol=1.0E-3,cprev,m_delta,delta_fac,cvir,rdelta,rvir;
cvir=cvir1;
omega_m=OMEGA_M;
lambda=1-OMEGA_M;
mstar=MSTAR;
delta=delta_halo;
/* Now convert from virial mass to m_delta.
*/
f=delta/delta_vir*HK_func(1.0/cvir);
p=a2+a3*log(f)+a4*log(f)*log(f);
x1=1.0/sqrt(a1*pow(f,2*p)+0.5625)+2*f;
m_delta=mvir*(delta/delta_vir*pow(1.0/(x1*cvir),3.0));
/* Now convert the cvir to c_delta.
*/
rvir=pow(3*mvir/(4*delta_vir*PI*RHO_CRIT*OMEGA_M),1.0/3.0);
rdelta=pow(3*m_delta/(4*delta*PI*RHO_CRIT*OMEGA_M),1.0/3.0);
return(m_delta);
}
double HK_func(double x)
{
return(x*x*x*(log(1+1.0/x)-1.0/(1+x)));
}

View file

@ -1,338 +0,0 @@
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "header.h"
/* This is a calculation of the differential halo mass function of
* Jenkins et al 2001 (MNRAS 321, 372).
*
* Also included is the mass function from my own analysis, which is
* a variant of the Sheth-Tormen mass function (leaving all 3 parameters
* as free parameters), with a 2-parameter high-mass cutoff of the form
* suggested by Reed et al.
*
*/
void initialize_mass_function(double *a1, double *a2, double *a3, double *a4);
double halo_mass_function(double mass)
{
double sig,logm,a,slo,shi,rm,rlo,rhi,mlo,mhi,dsdM,n,nuprime,nufnu,p,A;
static int flag=0,SO180=0,SO324=0,WARREN=0,ST=0,JENKINS=0;
static double pnorm, prev_delta;
double btemp = -1;
static double
a1 = 0.325277,
a2 = 0.492785,
a3 = 0.310289,
a4 = 1.317104,
a5 = 2.425681;
/* Jenkins et al. SO 180 best-fit
*/
if(SO180)
{
JENKINS_A = 0.301;
JENKINS_B = 0.64;
JENKINS_C = 3.82;
}
if(SO324)
{
JENKINS_A = 0.316;
JENKINS_B = 0.67;
JENKINS_C = 3.82;
}
if(JENKINS) //their .2 fof function
{
JENKINS_A = 0.315;
JENKINS_B = 0.61;
JENKINS_C = 3.8;
}
/* First normalize the power spectrum
*/
pnorm=SIGMA_8/sigmac(8.0);
rm=pow(3.0*mass/(4.0*PI*OMEGA_M*RHO_CRIT),1.0/3.0);
sig=pnorm*sigmac(rm);
logm=log10(mass);
mlo=0.99*mass;
mhi=1.01*mass;
rlo=pow(3.0*mlo/(4.0*PI*OMEGA_M*RHO_CRIT),1.0/3.0);
rhi=pow(3.0*mhi/(4.0*PI*OMEGA_M*RHO_CRIT),1.0/3.0);
slo=pnorm*sigmac(rlo);
shi=pnorm*sigmac(rhi);
dsdM=(shi-slo)/(mhi-mlo);
if(SO324)goto JENKINS_FUNCTION;
if(SO180)goto JENKINS_FUNCTION;
if(WARREN)goto WARREN_FUNCTION;
if(ST)goto ST_FUNCTION;
if(JENKINS)goto JENKINS_FUNCTION;
/* Tinker et al. (in prep) for SO 200
*/
if(DELTA_HALO != prev_delta)
{
initialize_mass_function(&a1,&a2,&a3,&a4);
prev_delta = DELTA_HALO;
// if we're using systematic errors in an MCMC, adjust parameter a1 (amplitude)
if(USE_ERRORS)
a1 *= M2N.mf_amp;
fprintf(stderr,"MF PARAMS for DELTA=%f %f %f %f %f\n",DELTA_HALO,a1,a2,a3,a4);
}
n = -a1*(pow(sig/a3,-a2)+1)*exp(-a4/sig/sig)*OMEGA_M*RHO_CRIT/mass/sig*dsdM;
return(n);
/* Jenkins et al. FOF .2 best-fit (unless SO180==1)
*/
JENKINS_FUNCTION:
a=-JENKINS_A*OMEGA_M*RHO_CRIT/mass/sig;
n=a*dsdM*exp(-pow(fabs(JENKINS_B-log(sig)),JENKINS_C));
return(n);
/* Warren et al. (calibrated only on concordance cosmology, FOF.2)
*/
WARREN_FUNCTION:
n = -0.7234*(pow(sig,-1.625)+0.2538)*exp(-1.198/sig/sig)*OMEGA_M*RHO_CRIT/mass/sig*dsdM;
return(n);
/* Need to find the derivative dlog(sig)/dlog(M)
*/
mlo=0.99*logm;
mhi=1.01*logm;
rlo=pow(3.0*pow(10.0,mlo)/(4.0*PI*OMEGA_M*RHO_CRIT),1.0/3.0);
rhi=pow(3.0*pow(10.0,mhi)/(4.0*PI*OMEGA_M*RHO_CRIT),1.0/3.0);
slo=log10(pnorm*sigmac(rlo));
shi=log10(pnorm*sigmac(rhi));
dsdM=(shi-slo)/(mhi-mlo);
ST_FUNCTION:
/* This is a bunch of Sheth-Tormen stuff.
* NB! because I'm skipping the above derivative (dlogs/dlogM), i'm using the lower
*/
nuprime=0.841*DELTA_CRIT/sig;
nufnu=0.644*(1+1.0/pow(nuprime,0.6))*(sqrt(nuprime*nuprime/2/PI))*exp(-nuprime*nuprime/2);
//n=RHO_CRIT*OMEGA_M/mass*mass*nufnu*fabs(dsdM);
n=RHO_CRIT*OMEGA_M/mass*nufnu*fabs(dsdM)/sig;
return(n);
}
/* It may be a bit costly to run the above function every time you need
* dn/dM, so here we put the values into an array and then interpolate.
*
* The currentrange of masses calculated is 10^9 to 10^16.7. The tabulation is
* done in log(M), so the spline interpolation will perform a power-law fit
* to masses outside this range.
*/
double dndM_interp(double m)
{
static int flag=0,prev_cosmo=0;
static double *x,*y,*y2;
int i,n=2000;
double dm,max=16.7,min=8,a,m1,m2,dm1;
if(!flag || RESET_COSMOLOGY!=prev_cosmo)
{
if(!ThisTask && OUTPUT)
fprintf(stdout,"RESET: resetting mass function for %f %f\n",OMEGA_M,SIGMA_8);
fflush(stdout);
if(!flag)
{
x=dvector(1,n);
y=dvector(1,n);
y2=dvector(1,n);
}
flag=1;
dm=(double)(max-min)/n;
for(i=1;i<=n;++i)
{
x[i]=pow(10.0,min+i*dm);
y[i]=log(halo_mass_function(x[i]));
x[i]=log(x[i]);
continue;
/*
m1 = halo_mass_conversion2(x[i]*1.001,halo_concentration(x[i]*1.001),DELTA_HALO,200);
m2 = halo_mass_conversion2(x[i]*0.999,halo_concentration(x[i]*0.999),DELTA_HALO,200.0);
dm1 = (x[i]*1.001 - x[i]*0.999)/(m1-m2);
y[i] = y[i] + log(dm1);
x[i]=log(halo_mass_conversion2(x[i],halo_concentration(x[i]),DELTA_HALO,200.0));
printf("%e %e %e %e\n",exp(x[i]),exp(y[i]),0.1*exp(y[i]),0.1);
continue;
*/
if(DELTA_HALO!=200)
{
m1 = halo_mass_conversion2(x[i]*1.001,halo_c200(x[i]*1.001),200.0,DELTA_HALO);
m2 = halo_mass_conversion2(x[i]*0.999,halo_c200(x[i]*0.999),200.0,DELTA_HALO);
dm1 = (x[i]*1.001 - x[i]*0.999)/(m1-m2);
y[i] = y[i] + log(dm1);
x[i]=log(halo_mass_conversion2(x[i],halo_c200(x[i]),200.0,DELTA_HALO));
}
else
x[i]=log(x[i]);
}
spline(x,y,n,2.0E+30,2.0E+30,y2);
prev_cosmo=RESET_COSMOLOGY;
}
//exit(0);
m=log(m);
splint(x,y,y2,n,m,&a);
return(exp(a));
}
/* Reads mass function in from a file and spline interpolates.
*/
double nbody_massfunction(double m)
{
static int flag=0,n;
static double *x,*y,*y2,log10_2,normhi,normlo;
float x1,x2,x3;
int i;
double a,dx;
char aa[1000];
FILE *fp;
if(!flag)
{
log10_2=log10(2.0);
flag++;
if(!(fp=fopen(Files.MassFuncFile,"r")))
endrun("ERROR opening MassFuncFile");
i=0;
n = filesize(fp);
fprintf(stderr,"Read %d lines from [%s]\n",n,Files.MassFuncFile);
x=dvector(1,n);
y=dvector(1,n);
y2=dvector(1,n);
for(i=1;i<=n;++i)
{
fscanf(fp,"%f %f",&x1,&x2);
x[i]=log(x1);
y[i]=log(x2);
fgets(aa,1000,fp);
}
spline(x,y,n,2.0E+30,2.0E+30,y2);
fclose(fp);
fprintf(stderr,"Minimum halo mass in N-body dndM= %e\n",exp(x[1]));
normhi = exp(y[n])/halo_mass_function(exp(x[n]));
normlo = exp(y[1])/halo_mass_function(exp(x[1]));
}
m=log(m);
// if(m>x[n])return(0);
/*if(m<x[1])return(0);*/
if(m>x[n])
return(halo_mass_function(exp(m))*normhi);
if(m<x[1])
return(halo_mass_function(exp(m))*normlo);
splint(x,y,y2,n,m,&a);
/*if(m<x[1])printf("LESS %e %e %e\n",pow(10.0,m),pow(10.0,a),pow(10.0,y[1]));*/
return(exp(a));
}
void initialize_mass_function(double *a1, double *a2, double *a3, double *a4)
{
int n = 9, i;
double *x, *y, *z, at, ztemp;
x = dvector(1,n);
y = dvector(1,n);
z = dvector(1,n);
// initialize the overdensities
for(i=1;i<=9;i+=2)
x[i] = log(200*pow(2.0,(i-1.0)/2.0));
for(i=2;i<=9;i+=2)
x[i] = log(300*pow(2.0,(i-2.0)/2.0));
//first parameter
y[1] = 1.858659e-01 ;
y[2] = 1.995973e-01 ;
y[3] = 2.115659e-01 ;
y[4] = 2.184113e-01 ;
y[5] = 2.480968e-01 ;
y[6] = 2.546053e-01 ;
y[7] = 2.600000e-01 ;
y[8] = 2.600000e-01 ;
y[9] = 2.600000e-01 ;
spline(x,y,n,1.0E+30,1.0E+30,z);
splint(x,y,z,n,log(DELTA_HALO),a1);
if(DELTA_HALO>=1600) *a1 = 0.26;
//second parameter
y[1] = 1.466904e+00 ;
y[2] = 1.521782e+00 ;
y[3] = 1.559186e+00 ;
y[4] = 1.614585e+00 ;
y[5] = 1.869936e+00 ;
y[6] = 2.128056e+00 ;
y[7] = 2.301275e+00 ;
y[8] = 2.529241e+00 ;
y[9] = 2.661983e+00 ;
spline(x,y,n,1.0E+30,1.0E+30,z);
splint(x,y,z,n,log(DELTA_HALO),a2);
//third parameter
y[1] = 2.571104e+00 ;
y[2] = 2.254217e+00 ;
y[3] = 2.048674e+00 ;
y[4] = 1.869559e+00 ;
y[5] = 1.588649e+00 ;
y[6] = 1.507134e+00 ;
y[7] = 1.464374e+00 ;
y[8] = 1.436827e+00 ;
y[9] = 1.405210e+00 ;
spline(x,y,n,1.0E+30,1.0E+30,z);
splint(x,y,z,n,log(DELTA_HALO),a3);
//fourth parameter
y[1] = 1.193958e+00;
y[2] = 1.270316e+00;
y[3] = 1.335191e+00;
y[4] = 1.446266e+00;
y[5] = 1.581345e+00;
y[6] = 1.795050e+00;
y[7] = 1.965613e+00;
y[8] = 2.237466e+00;
y[9] = 2.439729e+00;
spline(x,y,n,1.0E+30,1.0E+30,z);
splint(x,y,z,n,log(DELTA_HALO),a4);
// now adjust for redshift
if(!(REDSHIFT>0))return;
ztemp = REDSHIFT;
if(REDSHIFT>3) ztemp = 3.0;
*a1 *= pow(1+ztemp,-0.14);
*a2 *= pow(1+ztemp,-0.14);
at = -pow(0.75/log10(DELTA_HALO/75),1.2);
at = pow(10.0,at);
*a3 *= pow(1+ztemp,-at);
}

View file

@ -1,146 +0,0 @@
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "header.h"
/* This is a calculation of the differential halo mass function of
* Jenkins et al 2001 (MNRAS 321, 372).
*
* Also included is the mass function from my own analysis, which is
* a variant of the Sheth-Tormen mass function (leaving all 3 parameters
* as free parameters, with a 2-parameter high-mass cutoff of the form
* suggested by Reed et al.
*
*/
double halo_mass_function(double mass)
{
double sig,logm,a,slo,shi,rm,rlo,rhi,mlo,mhi,dsdM,n,nuprime,nufnu,p,A;
static int flag=0, prev_cosmo=-1,na=4;
static double pnorm,*xa,*ya,*za;
double fac1,fac2,fac;
static long IDUM=-555;
int i;
static double
a1 = 0.325277,
a2 = 0.492785,
a3 = 0.310289,
a4 = 1.317104,
a5 = 2.425681;
if(!flag)
{
xa=dvector(1,na);
ya=dvector(1,na);
za=dvector(1,na);
flag=1;
xa[1] = log(1/2.51);
xa[2] = log(1/1.49);
xa[3] = log(1/0.905);
xa[4] = log(1/0.501);
}
if(prev_cosmo != RESET_COSMOLOGY)
{
prev_cosmo=RESET_COSMOLOGY;
for(i=1;i<=4;++i)
ya[i] = gasdev(&IDUM);
spline(xa,ya,na,1.0E+30,1.0E+30,za);
}
/* First normalize the power spectrum
*/
pnorm=SIGMA_8/sigmac(8.0);
rm=pow(3.0*mass/(4.0*PI*OMEGA_M*RHO_CRIT),1.0/3.0);
sig=pnorm*sigmac(rm);
logm=log10(mass);
mlo=0.99*mass;
mhi=1.01*mass;
rlo=pow(3.0*mlo/(4.0*PI*OMEGA_M*RHO_CRIT),1.0/3.0);
rhi=pow(3.0*mhi/(4.0*PI*OMEGA_M*RHO_CRIT),1.0/3.0);
slo=pnorm*sigmac(rlo);
shi=pnorm*sigmac(rhi);
dsdM=(shi-slo)/(mhi-mlo);
if(BEST_FIT)
{
n = a1*sqrt(2*a2/PI)*(1+pow(sig*sig/(a2*DELTA_CRIT*DELTA_CRIT),a3))
*DELTA_CRIT/sig*exp(-a2*DELTA_CRIT*DELTA_CRIT/(2*sig*sig))
*exp(-a4/sig/pow(fabs(cosh(2*sig)),a5));
splint(xa,ya,za,na,log(1/sig),&fac1);
fac2 = 0.00562*pow(sig,-2.9) + 0.00158*pow(sig,2.2);
fac = 1 + fac1*fac2;
if(fac<0.01)fac=0.01;
n = n*fac;
/* printf("%e %e %e %e\n",sig,n,n/fac,fac); */
n = -n*OMEGA_M*RHO_CRIT/mass/sig*dsdM;
return(n);
}
/* Jenkins et al.
*/
a=-JENKINS_A*OMEGA_M*RHO_CRIT/mass/sig;
n=a*dsdM*exp(-pow(fabs(JENKINS_B-log(sig)),JENKINS_C));
return(n);
}
/* It may be a bit costly to run the above function every time you need
* dn/dM, so here we put the values into an array and then interpolate.
*/
double dndM_interp(double m)
{
static int flag=0,prev_cosmo=0;
static double *x,*y,*y2;
int i,n=1000;
double dm,max=16.7,min=9,a,m1,m2,dm1;
if(!flag || RESET_COSMOLOGY!=prev_cosmo)
{
if(!ThisTask && OUTPUT)
fprintf(stdout,"RESET: resetting mass function for %f %f\n",OMEGA_M,SIGMA_8);
fflush(stdout);
if(!flag)
{
x=dvector(1,n);
y=dvector(1,n);
y2=dvector(1,n);
}
flag=1;
dm=(double)(max-min)/n;
for(i=1;i<=n;++i)
{
x[i]=pow(10.0,min+i*dm);
y[i]=log(halo_mass_function(x[i]));
if(DELTA_HALO!=200)
{
m1 = halo_mass_conversion2(x[i]*1.001,halo_c200(x[i]*1.001),DELTA_HALO,200.0);
m2 = halo_mass_conversion2(x[i]*0.999,halo_c200(x[i]*0.999),DELTA_HALO,200.0);
dm1 = (x[i]*1.001 - x[i]*0.999)/(m1-m2);
y[i] = y[i] + log10(dm1);
x[i]=log10(halo_mass_conversion2(x[i],halo_c200(x[i]),DELTA_HALO,200.0));
}
else
x[i]=log(x[i]);
}
spline(x,y,n,2.0E+30,2.0E+30,y2);
prev_cosmo=RESET_COSMOLOGY;
}
m=log(m);
splint(x,y,y2,n,m,&a);
return(exp(a));
}

View file

@ -1,95 +0,0 @@
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include "header.h"
double *mu_x,*mu_y,*mu_z;
int mu_n;
/* Equation (10) of vdB et al, assuming alpha = 1 (standard NFW)
*/
double func_jeans1(double x)
{
return(x/(1+x)/(1+x));
}
/* Equation (11) of vdB et al.
*/
double func_jeans2(double x)
{
double y;
splint(mu_x,mu_y,mu_z,mu_n,x,&y);
return(y/(x*x*x*(1+x)*(1+x)));
}
double jeans_dispersion(double mass, double rx, double v[])
{
int i,j,k,n,nc,nr;
double rmin,rmax,dlogr;
static double cmin,cmax,dlogc;
double rvir,rs,s1,sig,mu1,vcirc,cvir;
static long IDUM2 = -4555;
static double fac=-1;
static int prev_cosmo=-1;
if(fac<0)
{
mu_n = nc = 1000;
cmin = 0.1;
cmax = 100.0;
dlogc = (log(cmax) - log(cmin))/(nc-1);
mu_x = dvector(1,nc);
mu_y = dvector(1,nc);
mu_z = dvector(1,nc);
for(i=1;i<=nc;++i)
{
mu_x[i] = exp((i-1)*dlogc)*cmin;
mu_y[i] = qromo(func_jeans1,0,mu_x[i],midpnt);
}
spline(mu_x,mu_y,nc,1.0E+30,1.0E+30,mu_z);
fac=sqrt(4.499E-48)*3.09E19;
}
cvir = halo_concentration(mass);
rvir = pow(3*mass/(4*PI*DELTA_HALO*OMEGA_M*RHO_CRIT),THIRD);
rs = rvir/cvir;
rx = rx/rs;
vcirc = fac*fac*mass/rvir;
splint(mu_x,mu_y,mu_z,mu_n,cvir,&mu1);
s1 = qromo(func_jeans2,rx,cmax,midpnt);
sig = sqrt(cvir*vcirc/mu1*rx*(1+rx)*(1+rx)*s1);
if(isnan(sig))
sig = sqrt(vcirc/2);
for(i=0;i<3;++i)
v[i]=gasdev(&IDUM2)*sig*VBIAS;
return sig;
nr = 100;
rmin = rvir*0.01;
rmax = rvir*0.99;
dlogr = (log(rmax) - log(rmin))/(nr-1);
rs = rvir/cvir;
vcirc = fac*fac*mass/rvir;
splint(mu_x,mu_y,mu_z,mu_n,cvir,&mu1);
for(i=0;i<nr;++i)
{
rx = exp(i*dlogr)*rmin/rs;
s1 = qromo(func_jeans2,rx,cmax,midpnt);
sig = cvir/mu1*rx*(1+rx)*(1+rx)*s1*2;
printf("%f %f\n",rx*rs/rvir,sqrt(sig));
}
exit(0);
}

View file

@ -1,432 +0,0 @@
#include <stdlib.h>
#include <math.h>
#include <stdio.h>
#include "header.h"
/* Local variables for the coordinate in redshift space.
*/
double rs_g8,rp_g8;
/* Internal functions.
*/
double func_kaiser(double z);
double xi_bar(double r);
double f_xibar(double r);
double xi_prime(double r, double mu);
double xi_t(double r);
double Lpoly(double x, int i);
double f_xi2bar(double r);
double xi_harmonic(double r, int i);
double scale_dependent_dispersion(double r);
double scale_dependent_skewness(double r);
void initial_dispersion_model_values(double *a, double **pp, double *yy);
void dispersion_model_input(void);
double chi2_dispersion_model(double *a);
/* Internal variables.
*/
struct DISPERION_MODEL_STRUCT {
int n;
double **rs,**rp;
double **x,**e;
} XX;
double *rr_g8, *aa_g8;
int nparams_g8, niter_g8=0;
void fit_dispersion_model()
{
int n,niter,i,j;
double *a,**pp,*yy,FTOL=1.0E-3,chi2min,s1,dlogm,m;
FILE *fp;
char aa[1000];
fprintf(stderr,"\n\nCHI2 MINIMIZATION OF xi(SIGMA,PI) WITH DISPERSION MODEL.\n");
fprintf(stderr, "--------------------------------------------------------\n\n");
if(POWELL)
FTOL=1.0E-4;
else
FTOL=1.0E-4;
nparams_g8 = n = 8;
dispersion_model_input();
wp.ncf=n;
a=dvector(1,n);
rr_g8 = dvector(1,n);
aa_g8 = dvector(1,n);
for(i=1;i<=n;++i)
rr_g8[i] = (log(10)-log(0.1))/(n-1.)*(i-1) + log(0.1);
BETA = 0.5;
if(POWELL)
pp=dmatrix(1,n,1,n);
else
pp=dmatrix(1,n+1,1,n);
yy=dvector(1,n+1);
initial_dispersion_model_values(a,pp,yy);
if(POWELL)
{
if(OUTPUT)printf("dispersion_model> starting powell.\n");
powell(a,pp,n,FTOL,&niter,&chi2min,chi2_dispersion_model);
chi2min = chi2_dispersion_model(a);
}
else
{
if(OUTPUT)printf("dispersion_model> starting amoeba.\n");
amoeba(pp,yy,n,FTOL,chi2_dispersion_model,&niter);
for(i=1;i<=n;++i)a[i]=pp[1][i];
chi2min = chi2_dispersion_model(a);
}
}
void initial_dispersion_model_values(double *a, double **pp, double *yy)
{
static int flag=0;
int i,j;
double d[100];
if(!flag) {
for(i=1;i<=nparams_g8;++i)
a[i] = 500;
printf("INITIAL VALUES: ");
for(i=1;i<=wp.ncf;++i)printf("%e ",a[i]);
printf("\n");
}
flag++;
/* Make the starting stepsize 10% of the initial values.
*/
for(i=1;i<=wp.ncf;++i)
d[i]=a[i]*0.1/flag;
if(POWELL)
{
for(i=1;i<=wp.ncf;++i)
{
for(j=1;j<=wp.ncf;++j)
{
pp[i][j]=0;
if(i==j)pp[i][j]+=d[j];
}
}
}
else
{
for(j=1;j<=wp.ncf;++j)
pp[1][j]=a[j];
yy[1]=chi2_dispersion_model(a);
for(i=1;i<=wp.ncf;++i)
{
a[i]+=d[i];
if(i>1)a[i-1]-=d[i-1];
yy[i+1]=chi2_dispersion_model(a);
for(j=1;j<=wp.ncf;++j)
pp[i+1][j]=a[j];
}
a[wp.ncf]-=d[wp.ncf];
}
}
void dispersion_model_input()
{
FILE *fp;
int i,j;
fp = openfile("xi2d.data");
XX.n = (int)sqrt(filesize(fp)*0.99)+1;
XX.rs = dmatrix(1,XX.n,1,XX.n);
XX.rp = dmatrix(1,XX.n,1,XX.n);
XX.x = dmatrix(1,XX.n,1,XX.n);
XX.e = dmatrix(1,XX.n,1,XX.n);
for(i=1;i<=XX.n;++i)
for(j=1;j<=XX.n;++j)
fscanf(fp,"%lf %lf %lf %lf",&XX.rs[i][j],&XX.rp[i][j],&XX.x[i][j],&XX.e[i][j]);
fclose(fp);
}
double chi2_dispersion_model(double *a)
{
int i,j;
double xx,r,chi2=0;
for(i=1;i<=nparams_g8;++i)
{
aa_g8[i] = a[i];
if(a[i]<=0)return(1.0E7);
}
for(i=2;i<=XX.n;++i)
for(j=2;j<=XX.n;++j)
{
xx = kaiser_distortion(XX.rs[i][j],XX.rp[i][j]);
/* printf("%f %f %f %f %f\n",XX.rs[i][j],XX.rp[i][j],XX.x[i][j],xx,xx); */
chi2 += (xx-XX.x[i][j])*(xx-XX.x[i][j])/(XX.e[i][j]*XX.e[i][j]);
}
niter_g8++;
printf("ITER %d %e ",niter_g8,chi2);
for(i=1;i<=nparams_g8;++i)
printf("%e ",a[i]);
printf("\n");
fflush(stdout);
if(niter_g8%100==0)
{
for(i=1;i<=100;++i)
{
r = exp((log(40)-log(0.1))/(100-1.)*(i-1) + log(0.1));
xx = scale_dependent_dispersion(r);
printf("PVD %d %f %f\n",niter_g8,r,xx);
}
fflush(stdout);
}
return(chi2);
}
double kaiser_distortion(double rs, double rp)
{
double s1,rlim=60,rr;
static int prev=-1;
if(prev==-1 || prev!=RESET_COSMOLOGY)
{
prev=RESET_COSMOLOGY;
/*
BETA = pow(OMEGA_M,0.6)/qromo(func_galaxy_bias,HOD.M_low,HOD.M_max,midpnt)*
GALAXY_DENSITY;
*/
printf("kaiser> BETA= %f SIGV= %f\n",BETA,SIGV);
}
rs_g8=rs;
rp_g8=rp;
rr=sqrt(rs*rs+rp*rp);
/* if(rr*1.5>10)rlim=rr*5.5; */
/* s1=qromo(func_kaiser,-rlim,rlim,midpnt); */
s1=qtrap(func_kaiser,-rlim,rlim,1.0E-3);
return(s1-1);
}
double linear_kaiser_distortion(double r, double z)
{
double mu;
mu= sqrt(1 - z/r*z/r);
mu = z/r;
return xi_prime(r,mu);
}
double func_kaiser(double z)
{
double r,mu,v,x,sigv;
r=sqrt(z*z+rs_g8*rs_g8);
mu=z/r;
v=(rp_g8-z)*100.0;
sigv = scale_dependent_dispersion(r);
/*
if(v*z<0)
sigv*=scale_dependent_skewness(r);
*/
/*
x=(1+one_halo_real_space(r)+two_halo_real_space(r))*
exp(-ROOT2*fabs(v)/sigv)/ROOT2/sigv*100.0;
*/
x=(1+xi_prime(r,mu)+one_halo_real_space(r))*exp(-ROOT2*fabs(v)/sigv)/ROOT2/sigv*100.0;
return(x);
}
double scale_dependent_skewness(double r)
{
return 1.5;
}
double scale_dependent_dispersion(double r)
{
static int niter=-1,flag=0;
static double *z,*x,*y;
double a;
int i;
if(niter!=niter_g8)
{
niter = niter_g8;
if(!flag)
{
flag++;
x=dvector(1,nparams_g8+1);
y=dvector(1,nparams_g8+1);
z=dvector(1,nparams_g8+1);
y[1] = 0;
x[1] = log(0.01);
for(i=2;i<=nparams_g8+1;++i)
x[i] = rr_g8[i-1];
}
for(i=2;i<=nparams_g8+1;++i)
y[i] = aa_g8[i-1];
spline(x,y,nparams_g8+1,1.0E+30,1.0E+30,z);
}
r=log(r);
if(r>rr_g8[nparams_g8])return(aa_g8[nparams_g8]);
splint(x,y,z,nparams_g8+1,r,&a);
return(a);
}
/* This function tabulates the spherically averaged correlation function.
* Equation (A7) in Madgwick et al.
*/
double xi_bar(double r)
{
static int flag=0,prev=-1;
static double *x,*y,*y2;
int n=100,i;
static double rmin,rmax,lgr,a;
if(!flag || prev!=RESET_KAISER)
{
if(OUTPUT>1)
printf("Tabulating xi_bar...\n");
prev=RESET_KAISER;
if(!flag)
{
x=malloc(n*sizeof(double));
x--;
y=malloc(n*sizeof(double));
y--;
y2=malloc(n*sizeof(double));
y2--;
}
flag=1;
rmin=-1.9;
rmin=log10(R_MIN_2HALO);
rmax=log10(80.0);
for(i=1;i<=n;++i)
{
lgr=(rmax-rmin)*(i-1.0)/(n-1.0)+rmin;
x[i]=pow(10.0,lgr);
y[i]=qtrap(f_xibar,0.0,x[i],1.0E-4)*3/(x[i]*x[i]*x[i]);
//printf("XIBAR %f %e\n",x[i],y[i]);
}
spline(x,y,n,1.0E30,1.0E30,y2);
}
if(r<x[1])return(-1);
splint(x,y,y2,n,r,&a);
//if(a<xi_t(r))return(xi_t(r));
return(a);
}
double f_xibar(double r)
{
double x;
x=two_halo_real_space(r);
/* x=one_halo_real_space(r)+two_halo_real_space(r); */
return(x*r*r);
}
/* This function tabulates the xi(r)*r^4 dr function.
* Equation (A8)
*/
double xi_2bar(double r)
{
static int flag=0,prev=-1;
static double *x,*y,*y2,rmin;
int n=100,i;
double rmax,lgr,a;
if(!flag || prev!=RESET_KAISER)
{
if(OUTPUT>1)
printf("Tabulating xi_2bar...\n");
prev=RESET_KAISER;
if(!flag)
{
x=malloc(n*sizeof(double));
x--;
y=malloc(n*sizeof(double));
y--;
y2=malloc(n*sizeof(double));
y2--;
}
flag=1;
rmin=-1.9;
rmin = log10(R_MIN_2HALO);
rmax=log10(80.0);
for(i=1;i<=n;++i)
{
lgr=(rmax-rmin)*(i-1.0)/(n-1.0)+rmin;
x[i]=pow(10.0,lgr);
y[i]=qtrap(f_xi2bar,0.0,x[i],1.0E-4)*5/(x[i]*x[i]*x[i]*x[i]*x[i]);
//printf("XI2BAR %f %e\n",x[i],y[i]);
}
spline(x,y,n,1.0E30,1.0E30,y2);
}
if(r<x[1])return(-1);
splint(x,y,y2,n,r,&a);
// if(a<xi_t(r))return(xi_t(r));
return(a);
}
double f_xi2bar(double r)
{
double x;
x=two_halo_real_space(r);
/* x=one_halo_real_space(r)+two_halo_real_space(r); */
return(x*r*r*r*r);
}
/* These are Legendre polynomials
*/
double Lpoly(double x, int i)
{
switch(i) {
case 0: return(1.0);
case 2: return(0.5*(3*x*x-1));
case 4: return((35*x*x*x*x-30*x*x+3)/8.0);
/*case 4: return((384*x*x*x*x+1152*x*x*(x*x-1)+144*(x*x-1)*(x*x-1))/(16.*24.));*/
default: return(0.0);
}
}
double xi_harmonic(double r, int i)
{
switch(i){
case 0: return((1+2.*BETA/3.+BETA*BETA/5.)*xi_t(r));
case 2: return((4.*BETA/3.+4.*BETA*BETA/7.)*(xi_t(r)-xi_bar(r)));
case 4: return(8.*BETA*BETA/35.*(xi_t(r)+2.5*xi_bar(r)-3.5*xi_2bar(r)));
default: return(0);
}
}
double xi_prime(double r, double mu)
{
double x0,x2,x4;
x0=xi_harmonic(r,0)*Lpoly(mu,0);
x2=xi_harmonic(r,2)*Lpoly(mu,2);
x4=xi_harmonic(r,4)*Lpoly(mu,4);
return(x0+x2+x4);
}
double xi_t(double r)
{
return(two_halo_real_space(r));
return(one_halo_real_space(r)+two_halo_real_space(r));
}

View file

@ -1,469 +0,0 @@
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef PARALLEL
#include <mpi.h>
#endif
#include "header.h"
/* External functions from wp_minimization.c
*/
void wp_input(void);
double mcmc_initialize(double *a, double **cov1, double *avg1, double *start_dev);
/* Internal functions.
*/
double chi2_wp_wrapper(double *a);
int USE_IWEIGHT = 0;
/******************************************************************
*
* HOD.free[] also controls which variables will be held constant/vary
* during MCMC minimization. Since this routine will also so z-space
* minimization if requested, indices>6 are cosmological.
*
* i variable
* --- --------
* [1] -> M_min
* [2] -> M1
* [3] -> alpha
* [4] -> M_cut
* [5] -> sigmaM
* [6] -> CVIR_FAC
* [7] -> MaxCen (or M_cen_max)
* [8] -> M_sat_break
* [9] -> alpha1
*
* [10]-> OMEGA_M
* [11]-> SIGMA_8
* [12]-> VBIAS
* [13]-> VBIAS_C
* [14]-> GAMMA
* [15]-> SPECTRAL_INDX
*
* [0] -> The galaxy_density will be considered data with errors on it,
* and therefore no variable will be fixed by the galaxy density.
*
*/
void mcmc_minimization()
{
double stepfac=1;
double error=1,tolerance=0,**cov1,**tmp,*a,*avg1,chi2,chi2prev,
**evect,*eval,*aprev,*atemp,**tmp1,*opar,x1,fsat,**chain,*start_dev,*eval_prev;
int n,i,j,k,nrot,niter=0,count=0,imax_chain=100000,NSTEP=50,NSTEP_MAX=10000,convergence=0;
long IDUM=-555;
int *pcheck,pcnt,ptot=20,firstflag=1,*iweight,total_weight,icvir;
double t0,tprev,temp,chi2a,chi2b;
FILE *fpmcmc;
char fname[1000];
sprintf(fname,"%s.MCMC",Task.root_filename);
fpmcmc = fopen(fname,"w");
opar=dvector(1,100);
MCMC=Task.MCMC;
pcheck=calloc(ptot,sizeof(int));
wp_input();
Work.imodel=2;
Work.chi2=1;
OUTPUT=0;
srand48(32498793);
/* Find the number of free parameters in the minimization
* for the real-space correlation function.
*/
for(n=0,i=1;i<100;++i)
{
n+=HOD.free[i];
if(OUTPUT)
printf("mcmc_min> free[%i] = %d\n",i,HOD.free[i]);
}
wp.ncf=n;
/* Find out which free parameter is for CVIR_FAC
*/
j=0;
if(HOD.free[6])
for(i=0;i<6;++i)
if(HOD.free[i])j++;
icvir=j+1;
if(HOD.free[0])
{
wp.ngal = GALAXY_DENSITY;
wp.ngal_err = 0.1*wp.ngal;
FIX_PARAM = 0;
}
if(OUTPUT)
printf("mcmc_min> %d free parameters\n",n);
a=dvector(1,n);
start_dev=dvector(1,n);
aprev=dvector(1,n);
atemp=dvector(1,n);
cov1=dmatrix(1,n,1,n);
avg1=dvector(1,n);
tmp=dmatrix(1,n,1,n);
tmp1=dmatrix(1,n,1,1);
evect=dmatrix(1,n,1,n);
eval=dvector(1,n);
eval_prev=dvector(1,n);
chain=dmatrix(1,imax_chain,1,n);
iweight = ivector(1,imax_chain);
for(i=1;i<=imax_chain;++i)
iweight[i] = 0;
IDUM=IDUM_MCMC;
chi2prev=mcmc_initialize(a,cov1,avg1,start_dev);
niter++;
for(i=1;i<=n;++i)
{
aprev[i] = a[i];
chain[1][i] = a[i];
}
pcnt=0;
pcheck[pcnt]=1;
stepfac=1;
while(niter<NSTEP)
{
pcnt++;
if(pcnt==ptot)
{
for(j=i=0;i<ptot;++i)j+=pcheck[i];
stepfac = stepfac*pow(0.9,5-j);
if(!ThisTask)printf("STEPFAC %f %d %d\n",stepfac,j,count);
pcnt=0;
}
/* stepfac=0.7; */
for(i=1;i<=n;++i)
a[i] = (1+gasdev(&IDUM)*start_dev[i]*stepfac)*aprev[i];
if(MCMC>1)
{
RESET_COSMOLOGY++;
j=0;
for(i=1;i<=N_HOD_PARAMS;++i)if(HOD.free[i])j++;
i=N_HOD_PARAMS;
if(HOD.free[++i])OMEGA_M = a[++j];
if(HOD.free[++i])SIGMA_8 = a[++j];
if(HOD.free[++i])VBIAS = a[++j];
if(HOD.free[++i])VBIAS_C = a[++j];
if(HOD.free[++i])GAMMA = a[++j];
if(HOD.free[++i])SPECTRAL_INDX = a[++j];
}
if(VBIAS_C<0)continue;
/* Hard-wire CVIR variation
*/
if(HOD.free[6])
CVIR_FAC = a[icvir];
chi2=chi2_wp_wrapper(a);
pcheck[pcnt]=1;
if(!(chi2<chi2prev || drand48() <= exp(-(chi2-chi2prev)/2)))
{
if(USE_IWEIGHT)
iweight[niter+1]++;
pcheck[pcnt]=0;
continue;
}
niter++;
iweight[niter]++;
for(i=1;i<=n;++i)
chain[niter][i]=a[i];
for(i=1;i<=n;++i)
avg1[i] += a[i];
for(i=1;i<=n;++i)
aprev[i] = a[i];
for(i=1;i<=n;++i)
for(j=1;j<=n;++j)
cov1[i][j] += a[i]*a[j];
chi2prev=chi2;
if(!ThisTask){
fprintf(fpmcmc,"%d %d ",niter,count);
for(i=1;i<=n;++i)
printf("%e ",a[i]);
printf("%e\n",chi2);fflush(fpmcmc);
}
}
stepfac=1.6/sqrt(n);
pcnt=-1;
t0 = second();
NSTEP = niter;
while(niter<imax_chain)
{
stepfac=1.6/sqrt(n);
if(convergence)goto SKIP_MATRIX;
for(j=1;j<=n;++j)
{
avg1[j]=0;
for(k=1;k<=n;++k)
cov1[j][k]=0;
}
total_weight = 0;
for(i=1;i<=niter;++i)
{
for(j=1;j<=n;++j)
{
avg1[j]+=chain[i][j]*iweight[i];
for(k=1;k<=n;++k)
cov1[j][k]+=chain[i][j]*chain[i][k]*iweight[i];
}
total_weight+=iweight[i];
}
for(i=1;i<=n;++i)
for(j=1;j<=n;++j)
tmp[i][j] = cov1[i][j]/total_weight - avg1[i]*avg1[j]/(total_weight*total_weight);
jacobi(tmp,n,eval,evect,&nrot);
gaussj(evect,n,tmp1,1);
SKIP_MATRIX:
for(i=1;i<=n;++i)
atemp[i] = gasdev(&IDUM)*sqrt(eval[i])*stepfac;
for(i=1;i<=n;++i)
for(a[i]=0,j=1;j<=n;++j)
a[i] += atemp[j]*evect[j][i];
for(i=1;i<=n;++i)
a[i] += aprev[i];
/* We seem to be having a problem with this.
* So, broadcast the model params from the root processor.
*/
#ifdef PARALLEL
MPI_Bcast(&a[1],n,MPI_DOUBLE_PRECISION,0,MPI_COMM_WORLD);
#endif
if(MCMC>1)
{
RESET_COSMOLOGY++;
j=0;
for(i=1;i<=N_HOD_PARAMS;++i)if(HOD.free[i])j++;
i=N_HOD_PARAMS;
if(HOD.free[++i])OMEGA_M = a[++j];
if(HOD.free[++i])SIGMA_8 = a[++j];
if(HOD.free[++i])VBIAS = a[++j];
if(HOD.free[++i])VBIAS_C = a[++j];
if(HOD.free[++i])GAMMA = a[++j];
if(HOD.free[++i])SPECTRAL_INDX = a[++j];
/* if(HOD.free[++i])SIGV = a[++j]; */
}
if(VBIAS_C<0)continue;
/* Hard-wire CVIR variation
*/
if(HOD.free[6])
CVIR_FAC = a[icvir];
chi2=chi2_wp_wrapper(a);
tprev = t0;
t0 = second();
++count;
pcheck[pcnt]=0;
if(!(chi2<chi2prev || drand48() <= exp(-(chi2-chi2prev)/2)))
{
if(USE_IWEIGHT)
iweight[niter+1]++;
continue;
}
pcheck[pcnt]=1;
niter++;
if(!convergence)NSTEP = niter;
iweight[niter]++;
if(niter%NSTEP_MAX==0 && !convergence && niter>NSTEP_MAX)
{
convergence = 1;
for(i=1;i<=n;++i)
{
x1=fabs(eval[i]-eval_prev[i])/eval_prev[i];
if(x1>0.01)convergence = 0;
printf("CONVERGENCE CHECK %d %d %e %e %e\n",niter/NSTEP_MAX,i,x1,eval[i],eval_prev[i]);
}
for(i=1;i<=n;++i)
eval_prev[i] = eval[i];
convergence = 0;
if(convergence)
printf("CONVERGENCE ACCOMPLISHED %d %d \n",niter,count);
}
if(niter==NSTEP_MAX)
{
for(i=1;i<=n;++i)
eval_prev[i] = eval[i];
}
for(i=1;i<=n;++i)
chain[niter][i]=a[i];
for(i=1;i<=n;++i)
avg1[i] += a[i];
for(i=1;i<=n;++i)
aprev[i] = a[i];
for(i=1;i<=n;++i)
for(j=1;j<=n;++j)
cov1[i][j] += a[i]*a[j];
chi2prev=chi2;
if(!ThisTask) {
fprintf(fpmcmc,"%d %d ",niter,count);
for(i=1;i<=n;++i)
printf("%e ",a[i]);
printf("%e\n",chi2);fflush(fpmcmc);
}
}
}
double chi2_wp_wrapper(double *a)
{
static int flag=1;
static double *b;
int i,j;
if(flag)
{
b=dvector(1,100);
flag=0;
}
for(j=0,i=1;i<=N_HOD_PARAMS;++i) {
if(HOD.free[i] && i!=5) {
if(a[++j]<=0) { printf("NEG %d %d %e\n",i,j,a[j]); return(1.0E7); } }
if(HOD.free[i] && i==5) {
++j; }
}
i=0;j=0;
if(HOD.free[++i]){j++;b[j]=pow(10.0,a[j]);} /* M_min */
if(HOD.free[++i]){j++;b[j]=pow(10.0,a[j]);} /* M1 */
if(HOD.free[++i]){j++;b[j]=a[j];} /* alpha */
if(HOD.free[++i]){j++;b[j]=pow(10.0,a[j]);} /* M_cut */
if(HOD.free[++i]){j++;b[j]=pow(10.0,a[j]);} /* sigma_logM */
if(HOD.free[++i]){j++;b[j]=a[j];} /* cvir_fac */
if(HOD.free[++i]){j++;b[j]=pow(10.0,a[j]);} /* MaxCen */
if(HOD.free[++i]){j++;b[j]=pow(10.0,a[j]);} /* M_sat_break */
if(HOD.free[++i]){j++;b[j]=a[j];} /* alpha1 */
return(chi2_wp(b));
}
double mcmc_initialize(double *a, double **cov1, double *avg1, double *start_dev)
{
int i,j=0;
double x1,x2,omega_m;
long IDUM = -556;
omega_m = 1;
if(MCMC>1)
omega_m = OMEGA_M;
i=0;j=0;
if(HOD.free[++i]){ a[++j]=log10(HOD.M_min/omega_m);start_dev[j]=0.001; }
if(HOD.free[++i]){ a[++j]=log10(HOD.M1/omega_m);start_dev[j]=0.001; } //.0005
if(HOD.free[++i]){ a[++j]=HOD.alpha;start_dev[j]=0.03; } //.005
if(HOD.free[++i]){ a[++j]=log10(HOD.M_cut/omega_m);start_dev[j]=0.01; } //.001
if(HOD.free[++i]){ a[++j]=log10(HOD.sigma_logM);start_dev[j]=0.01; }
if(HOD.free[++i]){ a[++j]=CVIR_FAC;start_dev[j]=0.02; }
if(HOD.pdfc==7) {
if(HOD.free[++i])a[++j]=log10(HOD.M_cen_max/omega_m); start_dev[j]=0.001; }
else {
if(HOD.free[++i])a[++j]=HOD.MaxCen; start_dev[j]=0.02; }
if(HOD.free[++i]){ a[++j]=log10(HOD.M_sat_break/omega_m);start_dev[j]=0.001; }
if(HOD.free[++i]){ a[++j]=HOD.alpha1;start_dev[j]=0.02; }
if(MCMC>1)
{
if(HOD.free[++i])a[++j]=OMEGA_M;
if(HOD.free[++i])a[++j]=SIGMA_8;
if(HOD.free[++i])a[++j]=VBIAS;
if(HOD.free[++i])a[++j]=VBIAS_C;
if(HOD.free[++i])a[++j]=GAMMA;
if(HOD.free[++i])a[++j]=SPECTRAL_INDX;
}
if(!ThisTask)
{
printf("INITIAL VALUES: ");
for(i=1;i<=wp.ncf;++i)printf("%e ",a[i]);
printf("\n");
}
for(i=1;i<=wp.ncf;++i)
{
avg1[i]=a[i];
for(j=1;j<=wp.ncf;++j)
cov1[i][j]=a[i]*a[j];
}
if(MCMC>1)
{
RESET_COSMOLOGY++;
j=0;
for(i=1;i<=N_HOD_PARAMS;++i)if(HOD.free[i])j++;
i=N_HOD_PARAMS;
if(HOD.free[++i]){ OMEGA_M = a[++j]; start_dev[j] = 0.01; }
if(HOD.free[++i]){ SIGMA_8 = a[++j]; start_dev[j] = 0.01; }
if(HOD.free[++i]){ VBIAS = a[++j]; start_dev[j] = 0.01; }
if(HOD.free[++i]){ VBIAS_C = a[++j]; start_dev[j] = 0.02; }
if(HOD.free[++i]){ GAMMA = a[++j]; start_dev[j] = 0.015; }
if(HOD.free[++i]){ SPECTRAL_INDX = a[++j]; start_dev[j] = 0.02; }
}
x1=chi2_wp_wrapper(a);
x2=0;
if(!ThisTask) {
printf("TRY 0 ");
for(i=1;i<=wp.ncf;++i)
printf("%.4e ",a[i]);
printf("%e\n",x1+x2);fflush(stdout);
printf("INITIAL CHI2: %e %e\n",x1,x2);
fflush(stdout);
}
return(x1+x2);
}

View file

@ -1,539 +0,0 @@
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef PARALLEL
#include <mpi.h>
#endif
#include "header.h"
/* External functions from wp_minimization.c
*/
void wp_input(void);
void wp_color_input(void);
double chi2_wp_color(double *a);
double chi2_wp_color_wrapper(double *a);
/* inteernal functions
*/
double mcmc_color_initialize(double *a, double **cov1, double *avg1, double *start_dev);
void mcmc_color_minimization()
{
double stepfac=1;
double error=1,tolerance=0,**cov1,**tmp,*a,*avg1,chi2,chi2prev,
**evect,*eval,*aprev,*atemp,**tmp1,*opar,x1,fsat,**chain,*start_dev,*eval_prev;
int n,i,j,k,nrot,niter=0,count=0,imax_chain=100000,NSTEP=50,NSTEP_MAX=10000,convergence=0;
long IDUM=-555;
int *pcheck,pcnt,ptot=20,firstflag=1,*iweight,total_weight;
double t0,tprev,temp,chi2a,chi2b;
opar=dvector(1,100);
MCMC=Task.MCMC;
pcheck=calloc(ptot,sizeof(int));
Work.imodel=2;
Work.chi2=1;
OUTPUT=1;
fprintf(stderr,"\n\nMCMC OF W_P(R_P) COLOR DATA..........\n");
fprintf(stderr, "--------------------------------------------\n\n");
HOD.blue_fraction = 0.5857; /* <- Millenium fraction (sloan);; SDSS fraction -> 0.565; */
HOD.blue_fraction = 0.6555; /* <- Millenium fraction (B-V>0.8) */
HOD.blue_fraction = 0.565; /* SDSS -19,-20 */
HOD.blue_fraction = 0.492; /* SDSS -20,-21 */
HOD.blue_fraction = 0.379; /* SDSS -21 */
wp_color.ON = 1;
wp_color_input();
srand48(32498793);
/* Find the number of free parameters in the minimization
* for the real-space correlation function.
*/
for(n=0,i=1;i<12;++i)
{
n+=HOD.free[i];
/* if(i>N_HOD_PARAMS && HOD.free[i])MCMC=3;*/
if(OUTPUT)
printf("mcmc_min> free[%i] = %d\n",i,HOD.free[i]);
}
// add 3 parameters for the color stuff
n+=3;
wp.ncf=n;
if(HOD.free[0])
{
wp.ngal = GALAXY_DENSITY;
wp.ngal_err = 0.1*wp.ngal;
FIX_PARAM = 0;
}
if(OUTPUT)
printf("mcmc_min> %d free parameters\n",n);
a=dvector(1,n);
start_dev=dvector(1,n);
aprev=dvector(1,n);
atemp=dvector(1,n);
cov1=dmatrix(1,n,1,n);
avg1=dvector(1,n);
tmp=dmatrix(1,n,1,n);
tmp1=dmatrix(1,n,1,1);
evect=dmatrix(1,n,1,n);
eval=dvector(1,n);
eval_prev=dvector(1,n);
chain=dmatrix(1,imax_chain,1,n);
iweight = ivector(1,imax_chain);
for(i=1;i<=imax_chain;++i)
iweight[i] = 0;
IDUM=IDUM_MCMC;
//chi2prev=mcmc_initialize(a,cov1,avg1,start_dev);
chi2prev=mcmc_color_initialize(a,cov1,avg1,start_dev);
//initial_color_values(a,pp,yy);
niter++;
for(i=1;i<=n;++i)
{
aprev[i] = a[i];
chain[1][i] = a[i];
}
pcnt=0;
pcheck[pcnt]=1;
stepfac=1;
while(niter<NSTEP)
{
pcnt++;
if(pcnt==ptot)
{
for(j=i=0;i<ptot;++i)j+=pcheck[i];
stepfac = stepfac*pow(0.9,5-j);
if(!ThisTask)printf("STEPFAC %f %d %d\n",stepfac,j,count);
pcnt=0;
}
/* stepfac=0.7; */
for(i=1;i<=n;++i)
a[i] = (1+gasdev(&IDUM)*start_dev[i]*stepfac)*aprev[i];
if(MCMC>1)
{
RESET_COSMOLOGY++;
j=0;
for(i=1;i<=N_HOD_PARAMS;++i)if(HOD.free[i])j++;
i=N_HOD_PARAMS;
if(HOD.free[++i])OMEGA_M = a[++j];
if(HOD.free[++i])SIGMA_8 = a[++j];
if(HOD.free[++i])VBIAS = a[++j];
if(HOD.free[++i])VBIAS_C = a[++j];
if(HOD.free[++i])GAMMA = a[++j];
if(HOD.free[++i])SPECTRAL_INDX = a[++j];
/* if(HOD.free[++i])SIGV = a[++j]; */
}
if(VBIAS_C<0)continue;
/* Hard-wire CVIR variation
*/
if(HOD.free[6])
CVIR_FAC = a[3];
chi2=chi2_wp_color_wrapper(a);
if(!ThisTask){
printf("TRY %d ",++count);
for(i=1;i<=n;++i)
printf("%.4e ",a[i]);
printf("%e\n",chi2);fflush(stdout);
}
pcheck[pcnt]=1;
if(!(chi2<chi2prev || drand48() <= exp(-(chi2-chi2prev)/2)))
{
/* This for loop puts the prev element in the chain is
* the current trial point is rejected.
*/
/* For the initialization, don't use this: we need
* separate elements for estimating the covariance matrix.
*/
/*
for(i=1;i<=n;++i)
a[i] = aprev[i];
chi2 = chi2prev;
*/
pcheck[pcnt]=0;
continue;
}
niter++;
iweight[niter]++;
for(i=1;i<=n;++i)
chain[niter][i]=a[i];
for(i=1;i<=n;++i)
avg1[i] += a[i];
for(i=1;i<=n;++i)
aprev[i] = a[i];
for(i=1;i<=n;++i)
for(j=1;j<=n;++j)
cov1[i][j] += a[i]*a[j];
chi2prev=chi2;
if(!ThisTask){
printf("ACCEPT %d %d ",niter,count);
for(i=1;i<=n;++i)
printf("%e ",a[i]);
printf("%e\n",chi2);fflush(stdout);
printf("HSTATS %d %e %e %e %e\n",niter,HOD.M_min,number_weighted_halo_mass(),
number_weighted_central_mass(),
qromo(func_satellite_density,log(HOD.M_low),log(HOD.M_max),midpnt)/GALAXY_DENSITY);
printf("FSAT %d %e %e %e %e\n",niter,HOD.M_min,wp.fsat_red,wp.fsat_blue,wp.fsat_all);
}
}
RESTART_POINT:
stepfac=1.6/sqrt(n);
pcnt=-1;
t0 = second();
NSTEP = niter;
while(niter<imax_chain)
{
pcnt++;
if(pcnt==ptot)
{
for(j=i=0;i<ptot;++i)j+=pcheck[i];
//stepfac=1.6/sqrt(n);
//stepfac=0.6/sqrt(n);
// stepfac = stepfac*pow(0.9,6-j);
stepfac = 0.25;
stepfac = 0.5;
stepfac=1.6/sqrt(n);
if(!ThisTask)printf("STEPFAC %f %d %d\n",stepfac,j,count);
pcnt=0;
}
stepfac=1.6/sqrt(n);
//stepfac = 0;
if(convergence)goto SKIP_MATRIX;
// if(niter>NSTEP_MAX && niter%NSTEP_MAX!=0)goto SKIP_MATRIX;
for(j=1;j<=n;++j)
{
avg1[j]=0;
for(k=1;k<=n;++k)
cov1[j][k]=0;
}
total_weight = 0;
for(i=1;i<=niter;++i)
{
for(j=1;j<=n;++j)
{
avg1[j]+=chain[i][j]*iweight[i];
for(k=1;k<=n;++k)
cov1[j][k]+=chain[i][j]*chain[i][k]*iweight[i];
}
total_weight+=iweight[i];
}
for(i=1;i<=n;++i)
for(j=1;j<=n;++j)
tmp[i][j] = cov1[i][j]/total_weight - avg1[i]*avg1[j]/(total_weight*total_weight);
jacobi(tmp,n,eval,evect,&nrot);
gaussj(evect,n,tmp1,1);
SKIP_MATRIX:
if(RESTART==4)convergence = 1;
if(RESTART && count==0)stepfac=0;
for(i=1;i<=n;++i)
atemp[i] = gasdev(&IDUM)*sqrt(eval[i])*stepfac;
for(i=1;i<=n;++i)
for(a[i]=0,j=1;j<=n;++j)
a[i] += atemp[j]*evect[j][i];
for(i=1;i<=n;++i)
a[i] += aprev[i];
/* We seem to be having a problem with this.
* So, broadcast the model params from the root processor.
*/
#ifdef PARALLEL
MPI_Bcast(&a[1],n,MPI_DOUBLE_PRECISION,0,MPI_COMM_WORLD);
#endif
// CHeck that the chi2 for the last point in the restart chain
// is recovered.
/*
if(RESTART && !count)
for(i=1;i<=n;++i)
a[i] = aprev[i];
*/
if(RESTART==5)
{
j = count+1;
if(j>4000)exit(0);
for(i=1;i<=n;++i)
a[i] = chain[j][i];
}
/*
if(!ThisTask)
for(i=1;i<=n;++i)
{
printf("COV %d %d %e ",count,i,sqrt(eval[i]));
for(j=1;j<=n;++j)
printf("%e ",evect[j][i]);
printf("\n");
}
*/
/* Using only variances
*/
//for(i=1;i<=n;++i)
// a[i] = aprev[i] + gasdev(&IDUM)*sqrt(tmp[i][i])*stepfac;
if(MCMC>1)
{
RESET_COSMOLOGY++;
j=0;
for(i=1;i<=N_HOD_PARAMS;++i)if(HOD.free[i])j++;
i=N_HOD_PARAMS;
if(HOD.free[++i])OMEGA_M = a[++j];
if(HOD.free[++i])SIGMA_8 = a[++j];
if(HOD.free[++i])VBIAS = a[++j];
if(HOD.free[++i])VBIAS_C = a[++j];
if(HOD.free[++i])GAMMA = a[++j];
if(HOD.free[++i])SPECTRAL_INDX = a[++j];
/* if(HOD.free[++i])SIGV = a[++j]; */
}
if(VBIAS_C<0)continue;
/* Hard-wire CVIR variation
*/
if(HOD.free[6])
CVIR_FAC = a[3];
chi2=chi2_wp_color_wrapper(a);
tprev = t0;
t0 = second();
++count;
if(!ThisTask) {
printf("TRY %d ",count);
for(i=1;i<=n;++i)
printf("%.4e ",a[i]);
if(RESTART==2) {
printf("%e %e %.2f\n",chi2,chi2/(1+exp(-count/100.0)),
timediff(tprev,t0));fflush(stdout); }
else {
printf("%e %.2f\n",chi2,
timediff(tprev,t0));fflush(stdout); }
}
if(0) {
printf("CPU%02d %d ",ThisTask,count);
for(i=1;i<=n;++i)
printf("%.4e ",a[i]);
if(RESTART==2) {
printf("%e %e %.2f\n",chi2,chi2/(1+exp(-count/100.0)),
timediff(tprev,t0));fflush(stdout); }
else {
printf("%e %.2f\n",chi2,
timediff(tprev,t0));fflush(stdout); }
}
pcheck[pcnt]=0;
if(!(chi2<chi2prev || drand48() <= exp(-(chi2-chi2prev)/2)))
{
/*
for(i=1;i<=n;++i)
a[i] = aprev[i];
chi2 = chi2prev;
*/
continue;
}
pcheck[pcnt]=1;
// if(NSTEP<NSTEP_MAX)NSTEP++;
niter++;
if(!convergence)NSTEP = niter;
iweight[niter]++;
if(niter%NSTEP_MAX==0 && !convergence && niter>NSTEP_MAX)
{
convergence = 1;
for(i=1;i<=n;++i)
{
x1=fabs(eval[i]-eval_prev[i])/eval_prev[i];
if(x1>0.01)convergence = 0;
printf("CONVERGENCE CHECK %d %d %e %e %e\n",niter/NSTEP_MAX,i,x1,eval[i],eval_prev[i]);
}
for(i=1;i<=n;++i)
eval_prev[i] = eval[i];
convergence = 0;
if(convergence)
printf("CONVERGENCE ACCOMPLISHED %d %d \n",niter,count);
}
if(niter==NSTEP_MAX)
{
for(i=1;i<=n;++i)
eval_prev[i] = eval[i];
}
for(i=1;i<=n;++i)
chain[niter][i]=a[i];
for(i=1;i<=n;++i)
avg1[i] += a[i];
for(i=1;i<=n;++i)
aprev[i] = a[i];
for(i=1;i<=n;++i)
for(j=1;j<=n;++j)
cov1[i][j] += a[i]*a[j];
chi2prev=chi2;
if(!ThisTask) {
printf("ACCEPT %d %d ",niter,count);
for(i=1;i<=n;++i)
printf("%e ",a[i]);
printf("%e\n",chi2);fflush(stdout);
printf("FSAT %d %e %e %e %e\n",niter,HOD.M_min,wp.fsat_red,wp.fsat_blue,wp.fsat_all);
if(MCMC==1)
{
printf("HSTATS %d %e %e %e %e\n",niter,HOD.M_min,number_weighted_halo_mass(),
number_weighted_central_mass(),
qromo(func_satellite_density,log(HOD.M_low),log(HOD.M_max),midpnt)/GALAXY_DENSITY);
fsat = qromo(func_satfrac,log(HOD.M_low),log(HOD.M_max),midpnt)/GALAXY_DENSITY;
}
}
}
}
double chi2_wp_color_wrapper(double *a)
{
static int flag=1;
static double *b;
int i,j;
if(flag)
{
b=dvector(1,100);
flag=0;
}
for(j=0,i=1;i<=N_HOD_PARAMS;++i) {
if(HOD.free[i] && i!=5) {
if(a[++j]<=0) { printf("NEG %d %d %e\n",i,j,a[j]); return(1.0E7); } }
if(HOD.free[i] && i==5) {
++j; }
}
i=0;j=0;
if(HOD.free[++i]){j++;b[j]=pow(10.0,a[j]);} /* M_min */
if(HOD.free[++i]){j++;b[j]=pow(10.0,a[j]);} /* M1 */
if(HOD.free[++i]){j++;b[j]=a[j];} /* alpha */
if(HOD.free[++i]){j++;b[j]=pow(10.0,a[j]);} /* M_cut */
if(HOD.free[++i]){j++;b[j]=pow(10.0,a[j]);} /* sigma_logM */
if(HOD.free[++i]){j++;b[j]=a[j];} /* cvir_fac */
if(HOD.free[++i]){j++;b[j]=pow(10.0,a[j]);} /* MaxCen */
if(HOD.free[++i]){j++;b[j]=pow(10.0,a[j]);} /* M_sat_break */
if(HOD.free[++i]){j++;b[j]=a[j];} /* alpha1 */
// now the 3 color params
++j;
b[j] = a[j];
++j;
b[j] = a[j];
++j;
b[j] = a[j];
muh(1);
return(chi2_wp_color(b));
}
double mcmc_color_initialize(double *a, double **cov1, double *avg1, double *start_dev)
{
int i,j=0;
double x1,x2,omega_m;
long IDUM = -556;
i=0;j=0;
if(HOD.free[++i]){ a[++j]=log10(HOD.M_min); start_dev[j]=0.001; }
if(HOD.free[++i]){ a[++j]=log10(HOD.M1); start_dev[j] = 0.001; }
if(HOD.free[++i]){ a[++j]=HOD.alpha; start_dev[j] = 0.03; }
if(HOD.free[++i]){ a[++j]=log10(HOD.M_cut); start_dev[j] = 0.01; }
if(HOD.free[++i]){ a[++j]=log10(HOD.sigma_logM); start_dev[j] = 0.01; }
if(HOD.free[++i]){ a[++j]=CVIR_FAC; start_dev[j] = 0.02; }
if(HOD.pdfc>=7){
if(HOD.free[++i]){ a[++j]=log10(HOD.M_cen_max); start_dev[j] = 0.001; }}
else {
if(HOD.free[++i]) { a[++j]=log10(HOD.MaxCen); start_dev[j] = 0.001; }}
a[++j] = HOD.fblue0_sat;
start_dev[j] = 0.01;
a[++j] = HOD.sigma_fblue_sat;
start_dev[j] = 0.01;
a[++j] = HOD.sigma_fblue_cen;
start_dev[j] = 0.01;
if(!ThisTask)
{
printf("INITIAL VALUES: ");
for(i=1;i<=wp.ncf;++i)printf("%e ",a[i]);
printf("\n");
}
for(i=1;i<=wp.ncf;++i)
{
avg1[i]=a[i];
for(j=1;j<=wp.ncf;++j)
cov1[i][j]=a[i]*a[j];
}
x1=chi2_wp_color_wrapper(a);
x2 = 0;
if(!ThisTask) {
printf("TRY 0 ");
for(i=1;i<=wp.ncf;++i)
printf("%.4e ",a[i]);
printf("%e\n",x1+x2);fflush(stdout);
printf("INITIAL CHI2: %e %e\n",x1,x2);
fflush(stdout);
}
return(x1+x2);
}

View file

@ -1,632 +0,0 @@
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef PARALLEL
#include <mpi.h>
#endif
#include "header.h"
/* External functions from wp_minimization.c
*/
void wp_input(void);
double mcmc_initialize(double *a, double **cov1, double *avg1, double *start_dev);
/* Internal functions.
*/
double chi2_wp_wrapper(double *a);
void mcmc_restart2(double *start_dev, int np);
int mcmc_restart3(double **chain, int n, double *chi2_prev, int *iweight);
int USE_IWEIGHT = 0;
/******************************************************************
*
* HOD.free[] also controls which variables will be held constant/vary
* during MCMC minimization. Since this routine will also so z-space
* minimization if requested, indices>6 are cosmological.
*
* i variable
* --- --------
* [1] -> M_min
* [2] -> M1
* [3] -> alpha
* [4] -> M_cut
* [5] -> sigmaM
* [6] -> CVIR_FAC
* [7] -> MaxCen (or M_cen_max)
* [8] -> M_sat_break
* [9] -> alpha1
*
* [10]-> OMEGA_M
* [11]-> SIGMA_8
* [12]-> VBIAS
* [13]-> VBIAS_C
* [14]-> GAMMA
* [15]-> SPECTRAL_INDX
*
* [0] -> The galaxy_density will be considered data with errors on it,
* and therefore no variable will be fixed by the galaxy density.
*
*/
void mcmc_minimization()
{
double stepfac=1;
double error=1,tolerance=0,**cov1,**tmp,*a,*avg1,chi2,chi2prev,
**evect,*eval,*aprev,*atemp,**tmp1,*opar,x1,fsat,**chain,*start_dev,*eval_prev;
int n,i,j,k,nrot,niter=0,count=0,imax_chain=100000,NSTEP=150,NSTEP_MAX=1000,convergence=0;
long IDUM=-555;
int *pcheck,pcnt,ptot=20,firstflag=1,*iweight,total_weight;
double t0,tprev,temp;
opar=dvector(1,100);
MCMC=Task.MCMC;
pcheck=calloc(ptot,sizeof(int));
if(MCMC>1 && !COVAR)
wp.esys=0.05;
if(!ThisTask)printf("ESYS %f %d\n",wp.esys,MCMC);
wp_input();
Work.imodel=2;
Work.chi2=1;
/*
OUTPUT=0;
*/
srand48(32498793);
/* Find the number of free parameters in the minimization
* for the real-space correlation function.
*/
for(n=0,i=1;i<100;++i)
{
n+=HOD.free[i];
/* if(i>N_HOD_PARAMS && HOD.free[i])MCMC=3;*/
if(OUTPUT)
printf("mcmc_min> free[%i] = %d\n",i,HOD.free[i]);
}
wp.ncf=n;
if(HOD.free[0])
{
wp.ngal = GALAXY_DENSITY;
wp.ngal_err = 0.1*wp.ngal;
FIX_PARAM = 0;
}
if(OUTPUT)
printf("mcmc_min> %d free parameters\n",n);
a=dvector(1,n);
start_dev=dvector(1,n);
aprev=dvector(1,n);
atemp=dvector(1,n);
cov1=dmatrix(1,n,1,n);
avg1=dvector(1,n);
tmp=dmatrix(1,n,1,n);
tmp1=dmatrix(1,n,1,1);
evect=dmatrix(1,n,1,n);
eval=dvector(1,n);
eval_prev=dvector(1,n);
chain=dmatrix(1,imax_chain,1,n);
iweight = ivector(1,imax_chain);
for(i=1;i<=imax_chain;++i)
iweight[i] = 0;
IDUM=IDUM_MCMC;
if(RESTART)
{
niter = mcmc_restart3(chain,n,&chi2prev,iweight);
if(niter < NSTEP)
{
if(ThisTask==0)
fprintf(stderr,"Not enough points in restart chain: %d<=%d\n",niter,NSTEP);
exit(0);
}
for(i=1;i<=n;++i)
aprev[i] = chain[niter][i];
goto RESTART_POINT;
}
chi2prev=mcmc_initialize(a,cov1,avg1,start_dev);
niter++;
for(i=1;i<=n;++i)
{
aprev[i] = a[i];
chain[1][i] = a[i];
}
pcnt=0;
pcheck[pcnt]=1;
if(RESTART==2)
{
mcmc_restart2(start_dev,n);
}
stepfac=1;
while(niter<NSTEP)
{
pcnt++;
if(pcnt==ptot)
{
for(j=i=0;i<ptot;++i)j+=pcheck[i];
stepfac = stepfac*pow(0.9,5-j);
if(!ThisTask)printf("STEPFAC %f %d %d\n",stepfac,j,count);
pcnt=0;
}
/* stepfac=0.7; */
for(i=1;i<=n;++i)
a[i] = (1+gasdev(&IDUM)*start_dev[i]*stepfac)*aprev[i];
if(MCMC>1)
{
RESET_COSMOLOGY++;
j=0;
for(i=1;i<=N_HOD_PARAMS;++i)if(HOD.free[i])j++;
i=N_HOD_PARAMS;
if(HOD.free[++i])OMEGA_M = a[++j];
if(HOD.free[++i])SIGMA_8 = a[++j];
if(HOD.free[++i])VBIAS = a[++j];
if(HOD.free[++i])VBIAS_C = a[++j];
if(HOD.free[++i])GAMMA = a[++j];
if(HOD.free[++i])SPECTRAL_INDX = a[++j];
/* if(HOD.free[++i])SIGV = a[++j]; */
}
if(VBIAS_C<0)continue;
/* Hard-wire CVIR variation
*/
if(HOD.free[6])
CVIR_FAC = a[3];
/* Draw random value of cvir from prior.
*/
/* if(CVIR_FAC<0.3 || CVIR_FAC>1.2)continue; */
/* CVIR_FAC = 0.9*drand48()+0.3; */
/* GAMMA = gasdev(&IDUM)*0.02 + 0.15; */
chi2=chi2_wp_wrapper(a);
if(MCMC>2 && chi2<1.0E7)chi2+=chi2_zspace(a);
if(!ThisTask){
printf("TRY %d ",++count);
for(i=1;i<=n;++i)
printf("%.4e ",a[i]);
printf("%e\n",chi2);fflush(stdout);
}
pcheck[pcnt]=1;
if(!(chi2<chi2prev || drand48() <= exp(-(chi2-chi2prev)/2)))
{
/* This for loop puts the prev element in the chain is
* the current trial point is rejected.
*/
/* For the initialization, don't use this: we need
* separate elements for estimating the covariance matrix.
*/
/*
for(i=1;i<=n;++i)
a[i] = aprev[i];
chi2 = chi2prev;
*/
if(USE_IWEIGHT)
iweight[niter+1]++;
pcheck[pcnt]=0;
continue;
}
niter++;
iweight[niter]++;
for(i=1;i<=n;++i)
chain[niter][i]=a[i];
for(i=1;i<=n;++i)
avg1[i] += a[i];
for(i=1;i<=n;++i)
aprev[i] = a[i];
for(i=1;i<=n;++i)
for(j=1;j<=n;++j)
cov1[i][j] += a[i]*a[j];
chi2prev=chi2;
if(!ThisTask){
printf("ACCEPT %d %d ",niter,count);
for(i=1;i<=n;++i)
printf("%e ",a[i]);
printf("%e\n",chi2);fflush(stdout);
printf("HSTATS %d %e %e %e %e\n",niter,HOD.M_min,number_weighted_halo_mass(),
number_weighted_central_mass(),
qromo(func_satellite_density,log(HOD.M_low),log(HOD.M_max),midpnt)/GALAXY_DENSITY);
fsat = qromo(func_satfrac,log(HOD.M_low),log(HOD.M_max),midpnt)/GALAXY_DENSITY;
printf("FSAT %d %e %e %e %e\n",niter,fsat,HOD.M_min,HOD.sigma_logM,CVIR_FAC);
}
}
RESTART_POINT:
stepfac=1.6/sqrt(n);
pcnt=-1;
t0 = second();
NSTEP = niter;
while(niter<imax_chain)
{
pcnt++;
if(pcnt==ptot)
{
for(j=i=0;i<ptot;++i)j+=pcheck[i];
//stepfac=1.6/sqrt(n);
//stepfac=0.6/sqrt(n);
// stepfac = stepfac*pow(0.9,6-j);
stepfac = 0.25;
stepfac = 0.5;
stepfac=1.6/sqrt(n);
if(!ThisTask)printf("STEPFAC %f %d %d\n",stepfac,j,count);
pcnt=0;
}
//stepfac = 0;
if(convergence)goto SKIP_MATRIX;
// if(niter>NSTEP_MAX && niter%NSTEP_MAX!=0)goto SKIP_MATRIX;
for(j=1;j<=n;++j)
{
avg1[j]=0;
for(k=1;k<=n;++k)
cov1[j][k]=0;
}
total_weight = 0;
for(i=1;i<=niter;++i)
{
for(j=1;j<=n;++j)
{
avg1[j]+=chain[i][j]*iweight[i];
for(k=1;k<=n;++k)
cov1[j][k]+=chain[i][j]*chain[i][k]*iweight[i];
}
total_weight+=iweight[i];
}
for(i=1;i<=n;++i)
for(j=1;j<=n;++j)
tmp[i][j] = cov1[i][j]/total_weight - avg1[i]*avg1[j]/(total_weight*total_weight);
jacobi(tmp,n,eval,evect,&nrot);
gaussj(evect,n,tmp1,1);
SKIP_MATRIX:
if(RESTART==4)convergence = 1;
for(i=1;i<=n;++i)
atemp[i] = gasdev(&IDUM)*sqrt(eval[i])*stepfac;
for(i=1;i<=n;++i)
for(a[i]=0,j=1;j<=n;++j)
a[i] += atemp[j]*evect[j][i];
for(i=1;i<=n;++i)
a[i] += aprev[i];
if(!ThisTask)
for(i=1;i<=n;++i)
{
printf("COV %d %d %e ",count,i,sqrt(eval[i]));
for(j=1;j<=n;++j)
printf("%e ",evect[j][i]);
printf("\n");
}
/* Using only variances
*/
//for(i=1;i<=n;++i)
// a[i] = aprev[i] + gasdev(&IDUM)*sqrt(tmp[i][i])*stepfac;
if(MCMC>1)
{
RESET_COSMOLOGY++;
j=0;
for(i=1;i<=N_HOD_PARAMS;++i)if(HOD.free[i])j++;
i=N_HOD_PARAMS;
if(HOD.free[++i])OMEGA_M = a[++j];
if(HOD.free[++i])SIGMA_8 = a[++j];
if(HOD.free[++i])VBIAS = a[++j];
if(HOD.free[++i])VBIAS_C = a[++j];
if(HOD.free[++i])GAMMA = a[++j];
if(HOD.free[++i])SPECTRAL_INDX = a[++j];
/* if(HOD.free[++i])SIGV = a[++j]; */
}
if(VBIAS_C<0)continue;
/* Hard-wire CVIR variation
*/
if(HOD.free[6])
CVIR_FAC = a[3];
/* Draw random value of cvir from prior.
*/
/* CVIR_FAC = a[n]; */
/* if(CVIR_FAC<0.3 || CVIR_FAC>1.2)continue; */
/* CVIR_FAC = 0.7*drand48()+0.3; */
/* GAMMA = gasdev(&IDUM)*0.02 + 0.15; */
// printf("GAMMA %d %f %f\n",count+1,GAMMA,CVIR_FAC);
chi2=chi2_wp_wrapper(a);
if(MCMC>2 && chi2<1.0E7)chi2+=chi2_zspace(a);
tprev = t0;
t0 = second();
if(!ThisTask) {
printf("TRY %d ",++count);
for(i=1;i<=n;++i)
printf("%.4e ",a[i]);
printf("%e %.2f\n",chi2,timediff(tprev,t0));fflush(stdout);
}
pcheck[pcnt]=0;
if(!(chi2<chi2prev || drand48() <= exp(-(chi2-chi2prev)/2)))
{
/*
for(i=1;i<=n;++i)
a[i] = aprev[i];
chi2 = chi2prev;
*/
if(USE_IWEIGHT)
iweight[niter+1]++;
continue;
}
pcheck[pcnt]=1;
// if(NSTEP<NSTEP_MAX)NSTEP++;
niter++;
if(!convergence)NSTEP = niter;
iweight[niter]++;
if(niter%NSTEP_MAX==0 && !convergence && niter>NSTEP_MAX)
{
convergence = 1;
for(i=1;i<=n;++i)
{
x1=fabs(eval[i]-eval_prev[i])/eval_prev[i];
if(x1>0.01)convergence = 0;
printf("CONVERGENCE CHECK %d %d %e %e %e\n",niter/NSTEP_MAX,i,x1,eval[i],eval_prev[i]);
}
for(i=1;i<=n;++i)
eval_prev[i] = eval[i];
convergence = 0;
if(convergence)
printf("CONVERGENCE ACCOMPLISHED %d %d \n",niter,count);
}
if(niter==NSTEP_MAX)
{
for(i=1;i<=n;++i)
eval_prev[i] = eval[i];
}
for(i=1;i<=n;++i)
chain[niter][i]=a[i];
for(i=1;i<=n;++i)
avg1[i] += a[i];
for(i=1;i<=n;++i)
aprev[i] = a[i];
for(i=1;i<=n;++i)
for(j=1;j<=n;++j)
cov1[i][j] += a[i]*a[j];
chi2prev=chi2;
if(!ThisTask) {
printf("ACCEPT %d %d ",niter,count);
for(i=1;i<=n;++i)
printf("%e ",a[i]);
printf("%e\n",chi2);fflush(stdout);
printf("HSTATS %d %e %e %e %e\n",niter,HOD.M_min,number_weighted_halo_mass(),
number_weighted_central_mass(),
qromo(func_satellite_density,log(HOD.M_low),log(HOD.M_max),midpnt)/GALAXY_DENSITY);
fsat = qromo(func_satfrac,log(HOD.M_low),log(HOD.M_max),midpnt)/GALAXY_DENSITY;
printf("FSAT %d %e %e %e %e\n",niter,fsat,HOD.M_min,HOD.sigma_logM,CVIR_FAC);
}
}
}
double chi2_wp_wrapper(double *a)
{
static int flag=1;
static double *b;
int i,j;
if(flag)
{
b=dvector(1,100);
flag=0;
}
for(j=0,i=1;i<=N_HOD_PARAMS;++i) {
if(HOD.free[i] && i!=5) {
if(a[++j]<=0) { printf("NEG %d %d %e\n",i,j,a[j]); return(1.0E7); } }
if(HOD.free[i] && i==5) {
++j; }
}
i=0;j=0;
if(HOD.free[++i]){j++;b[j]=pow(10.0,a[j]);} /* M_min */
if(HOD.free[++i]){j++;b[j]=pow(10.0,a[j]);} /* M1 */
if(HOD.free[++i]){j++;b[j]=a[j];} /* alpha */
if(HOD.free[++i]){j++;b[j]=pow(10.0,a[j]);} /* M_cut */
if(HOD.free[++i]){j++;b[j]=pow(10.0,a[j]);} /* sigma_logM */
if(HOD.free[++i]){j++;b[j]=a[j];} /* cvir_fac */
if(HOD.free[++i]){j++;b[j]=pow(10.0,a[j]);} /* MaxCen */
if(HOD.free[++i]){j++;b[j]=pow(10.0,a[j]);} /* M_sat_break */
if(HOD.free[++i]){j++;b[j]=a[j];} /* alpha1 */
return(chi2_wp(b));
}
double mcmc_initialize(double *a, double **cov1, double *avg1, double *start_dev)
{
int i,j=0;
double x1,x2;
long IDUM = -556;
i=0;j=0;
if(HOD.free[++i]){ a[++j]=log10(HOD.M_min);start_dev[j]=0.001; }
if(HOD.free[++i]){ a[++j]=log10(HOD.M1);start_dev[j]=0.001; } //.0005
if(HOD.free[++i]){ a[++j]=HOD.alpha;start_dev[j]=0.03; } //.005
if(HOD.free[++i]){ a[++j]=log10(HOD.M_cut);start_dev[j]=0.01; } //.001
if(HOD.free[++i]){ a[++j]=log10(HOD.sigma_logM);start_dev[j]=0.01; }
if(HOD.free[++i]){ a[++j]=CVIR_FAC;start_dev[j]=0.02; }
if(HOD.pdfc==7) {
if(HOD.free[++i])a[++j]=log10(HOD.M_cen_max); start_dev[j]=0.001; }
else {
if(HOD.free[++i])a[++j]=HOD.MaxCen; start_dev[j]=0.02; }
if(HOD.free[++i]){ a[++j]=log10(HOD.M_sat_break);start_dev[j]=0.001; }
if(HOD.free[++i]){ a[++j]=HOD.alpha1;start_dev[j]=0.02; }
if(MCMC>1)
{
if(HOD.free[++i])a[++j]=OMEGA_M;
if(HOD.free[++i])a[++j]=SIGMA_8;
if(HOD.free[++i])a[++j]=VBIAS;
if(HOD.free[++i])a[++j]=VBIAS_C;
if(HOD.free[++i])a[++j]=GAMMA;
if(HOD.free[++i])a[++j]=SPECTRAL_INDX;
}
printf("INITIAL VALUES: ");
for(i=1;i<=wp.ncf;++i)printf("%e ",a[i]);
printf("\n");
for(i=1;i<=wp.ncf;++i)
{
avg1[i]=a[i];
for(j=1;j<=wp.ncf;++j)
cov1[i][j]=a[i]*a[j];
}
if(MCMC>1)
{
RESET_COSMOLOGY++;
j=0;
for(i=1;i<=N_HOD_PARAMS;++i)if(HOD.free[i])j++;
i=N_HOD_PARAMS;
if(HOD.free[++i]){ OMEGA_M = a[++j]; start_dev[j] = 0.01; }
if(HOD.free[++i]){ SIGMA_8 = a[++j]; start_dev[j] = 0.01; }
if(HOD.free[++i]){ VBIAS = a[++j]; start_dev[j] = 0.01; }
if(HOD.free[++i]){ VBIAS_C = a[++j]; start_dev[j] = 0.02; }
if(HOD.free[++i]){ GAMMA = a[++j]; start_dev[j] = 0.015; }
if(HOD.free[++i]){ SPECTRAL_INDX = a[++j]; start_dev[j] = 0.02; }
}
x1=chi2_wp_wrapper(a);
if(MCMC>2)
x2=chi2_zspace(a);
else
x2=0;
if(!ThisTask) {
printf("TRY 0 ");
for(i=1;i<=wp.ncf;++i)
printf("%.4e ",a[i]);
printf("%e\n",x1+x2);fflush(stdout);
printf("INITIAL CHI2: %e %e\n",x1,x2);
fflush(stdout);
}
return(x1+x2);
}
/* This is to look at a chain and get the variances in each parameter.
*/
void mcmc_restart2(double *start_dev, int np)
{
int n,i,j,k,i1,i2;
FILE *fp;
char aa[100];
float xbar[10],xsqr[10],x;
fp = openfile(RESTART_FILE);
n = filesize(fp);
for(i=0;i<np;++i)
xbar[i] = xsqr[i] = 0;
for(i=1;i<=n;++i)
{
fscanf(fp,"%s %d %d",aa,&i1,&i2);
for(j=0;j<np;++j)
{
fscanf(fp,"%f",&x);
xbar[j] += x;
xsqr[j] += x*x;
}
fgets(aa,100,fp);
}
for(i=0;i<np;++i)
{
xbar[i]/=n;
xsqr[i]/=n;
xsqr[i] = sqrt(xsqr[i] - xbar[i]*xbar[i]);
start_dev[i+1] = xsqr[i];
if(!ThisTask)
fprintf(stdout,"RESTART_DEV %f %f\n",xbar[i],xsqr[i]);
}
}
int mcmc_restart3(double **chain, int n, double *chi2_prev, int *iweight)
{
FILE *fp;
char aa[100];
int niter,i,j,i1,i2,iprev;
double x,*a,chi2;
fp = openfile(RESTART_FILE);
niter = filesize(fp);
a = dvector(1,n);
fscanf(fp,"%s %d %d",aa,&i1,&i2);
rewind(fp);
iprev = i2 - 1;
for(i=1;i<=niter;++i)
{
fscanf(fp,"%s %d %d",aa,&i1,&i2);
if(USE_IWEIGHT)
iweight[i] = i2 - iprev;
else
iweight[i] = 1;
iprev = i2;
for(j=1;j<=n;++j)
fscanf(fp,"%lf",&chain[i][j]);
fscanf(fp,"%lf",&x);
}
*chi2_prev = 20000*x; // set it to automatically take the first element
//*chi2_prev = x;
/* Normalize all the masses by OMEGA_M
*/
for(i=1;i<=niter;++i)
{
chain[i][1] -= log10(chain[i][4]);
chain[i][3] -= log10(chain[i][4]);
}
return niter;
}

View file

@ -1,39 +0,0 @@
#include <stdlib.h>
#include <math.h>
#include <stdio.h>
#include "header.h"
/* This function calculates M_star (non-linear mass scale) for the given
* cosmological paramters.
*/
double sigma_Mmdelta_c(double lnM);
double pnorm1;
double mstar()
{
double sig,lnMmin,lnMmax,M_star;
sig=sigmac(8.0);
pnorm1 = SIGMA_8/sig;
lnMmin=log(1e7);
lnMmax=log(1e18);
M_star=zbrent(sigma_Mmdelta_c,lnMmin,lnMmax,1e-5);
M_star=exp(M_star);
if(!ThisTask)
fprintf(stderr,"M_star = %e h^{-1}M_sol\n",M_star);
return(M_star);
}
/*** solve for M_* ***/
double sigma_Mmdelta_c(double lnM)
{
double sig,M,rm;
M=exp(lnM);
rm=pow(3.0*M/(4.0*PI*OMEGA_M*RHO_CRIT),1.0/3.0);
sig=pnorm1*sigmac(rm);
return sig-DELTA_CRIT;
}

View file

@ -1,204 +0,0 @@
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "header.h"
/* This is my implementation of the Halo Model fitting function
* described in Appendix C of Smith, Peacock, et al. 2003 MNRAS 341, 1311
*
* Comparison to Smith et al. program halofit.f output is successful.
*/
/* Internal functions
*/
double func_nl(double r);
void tabulate_pk_nl(double *kk, double *pknl, int nk);
/* This returns the linear power
* Delta = 4pi*k^3 P(k)/(2pi)^3
*/
double linear_power_spectrum(double xk)
{
static double *kk,*pknl,*y2,pnorm=-1,ahi,bhi;
static int flag=1,nk=1000,prev_cosmology=0;
double a,psp,x1[4],y1[4];
int i;
if(pnorm<0 || prev_cosmology!=RESET_COSMOLOGY)
{
pnorm=SIGMA_8/sigmac(8.0);
pnorm*=pnorm;
prev_cosmology=RESET_COSMOLOGY;
}
if(ITRANS>0)
psp=pow(xk,SPECTRAL_INDX)*pow(transfnc(xk),2.);
else
psp=pow(xk,SPECTRAL_INDX);
psp=psp*pnorm*xk*xk*xk/(2*PI*PI);
return(psp);
}
double nonlinear_power_spectrum(double xk)
{
static double *kk,*pknl,*y2,pnorm=-1,ahi,bhi;
static int flag=1,nk=1000,prev_cosmology=0;
double a,psp,x1[4],y1[4],xklog;
int i;
if(flag || RESET_COSMOLOGY!=prev_cosmology)
{
prev_cosmology=RESET_COSMOLOGY;
pnorm=SIGMA_8/sigmac(8.0);
flag=0;
kk=dvector(1,nk);
pknl=dvector(1,nk);
y2=dvector(1,nk);
tabulate_pk_nl(kk,pknl,nk);
spline(kk,pknl,nk,1.0E+30,1.0E+30,y2);
/* This takes the last four points in the power spectrum at high k
* and fits a power law to them for extrapolation.
*/
for(i=0;i<4;++i)
{
x1[i]=(kk[nk-3+i]);
y1[i]=(pknl[nk-3+i]);
}
least_squares(x1,y1,4,&ahi,&bhi);
}
xklog=log(xk);
/* If xk is less than the smallest k value tabulates, return linear power.
*/
if(xklog<kk[1])
return(linear_power_spectrum(xk));
/* If xk larger than highest k, return extrapolation.
*/
if(xklog>kk[nk])
return((exp((ahi+bhi*xklog))));
splint(kk,pknl,y2,nk,xklog,&a);
return(exp(a));
}
void tabulate_pk_nl(double *kk, double *pknl, int nk)
{
double r_nl,pnorm,DeltaQ,DeltaLin,DeltaH,psp,neff,s1,s2,n1,n2,ncurve,
lnk_lo,lnk_hi,dlnk,xk1,y,xk,fy;
double an,bn,cn,f1b,f2b,f3b,alpha,beta,gamma,nu,mu;
int i,j,n=10000;
/* First normalize the linear power spectrum.
*/
pnorm=SIGMA_8/sigmac(8.0);
/* Calculate the non-linear scale.
*/
r_nl = exp(zbrent(func_nl,log(0.01),log(10.0),1.0E-4));
if(OUTPUT)
fprintf(stdout,"R_NL= %f\n",r_nl);
/* Calculate the effective spectral index at the non-linear scale.
*/
s1=pnorm*sigmac(-r_nl*0.999);
s2=pnorm*sigmac(-r_nl*1.001);
neff=-(3+2*(s2-s1)/0.002);
if(OUTPUT)
fprintf(stderr,"neff= %f\n",neff);
/* Spectral curvature.
*/
lnk_hi=10.0;
lnk_lo=-10.0;
dlnk=(lnk_hi-lnk_lo)/n;
s1=0;
for(i=1;i<=n;++i)
{
xk1=exp(lnk_lo+(i-0.5)*dlnk);
y=xk1*r_nl;
DeltaLin=linear_power_spectrum(xk1);
s1+=DeltaLin*y*y*(1-y*y)*exp(-y*y)*dlnk;
}
ncurve=(3+neff)*(3+neff)+4*s1;
if(OUTPUT)
fprintf(stderr,"ncurve= %f\n",ncurve);
/* Coefficients of the model.
*/
an=pow(10.0,1.4861 + 1.8369*neff + 1.6762*neff*neff + 0.7940*neff*neff*neff +
0.1670*pow(neff,4.0) - 0.6202*ncurve);
bn=pow(10.0,0.9463 + 0.9466*neff + 0.3084*neff*neff - 0.9400*ncurve);
cn= pow(10.0,-0.2807 + 0.6669*neff + 0.3214*neff*neff - 0.0793*ncurve);
gamma = 0.8649 + 0.2989*neff + 0.1631*ncurve;
alpha = 1.3884 + 0.3700*neff - 0.1452*neff*neff;
beta = 0.8291 + 0.9854*neff + 0.3401*neff*neff;
mu = pow(10.0,-3.5442 + 0.1908*neff);
nu = pow(10.0, 0.9589 + 1.2857*neff);
/* Testing the parameter dependence.
* TESTING TESTING
*/
/*
alpha *= 0.3;
beta *= 0.3;
*/
/* Omega-dependent functions (FLAT LAMBDA COSMOLOGY)
*/
f1b = pow(OMEGA_M,-0.0307);
f2b = pow(OMEGA_M,-0.0585);
f3b = pow(OMEGA_M,+0.0743);
/* Tabulate the power spectrum
*/
lnk_lo=log(0.001);
lnk_hi=log(1000.0);
dlnk=(lnk_hi-lnk_lo)/(nk-1);
for(i=1;i<=nk;++i)
{
xk=exp(lnk_lo+(i-1)*dlnk);
y=xk*r_nl;
fy=y/4.0 + y*y/8.0;
/* TEST */
/* fy*=1.2; */
DeltaLin=linear_power_spectrum(xk);
DeltaQ = DeltaLin*pow(1+DeltaLin,beta)/(1+alpha*DeltaLin)*exp(-fy);
DeltaH = an*pow(y,3*f1b)/(1+bn*pow(y,f2b)+pow(cn*f3b*y,3-gamma));
DeltaH*= 1.0/(1+mu/y+nu/(y*y));
kk[i]=log(xk);
pknl[i]=log(DeltaQ + DeltaH);
}
}
/* This is a function to find the non-linear scale. Similar to M_star,
* but here we're using a Guassian smoothing window rather than top hat.
* (And we're finding the scale at which the variance = 1, not 1.686).
*/
double func_nl(double r)
{
static int prev_cosmology=0;
double sig;
static double pnorm=-1;
if(pnorm<0 || RESET_COSMOLOGY!=prev_cosmology)
pnorm=SIGMA_8/sigmac(8.0);
prev_cosmology=RESET_COSMOLOGY;
r=exp(r);
sig=pnorm*sigmac(-r);
return sig-1.0;
}

View file

@ -1,231 +0,0 @@
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "header.h"
/* These routines control the real-space one-halo term.
* For specifics, see:
*
* Berlind, A.\ A., \& Weinberg, D.\ H.\ 2002, \apj, 575, 587
* Zheng, Z. 2003, \apj, 610, 61
* Tinker, Weinberg, Zheng, Zehavi 2005 Apj 631 (App B)
*
*/
/* Local functions.
*/
void calc_real_space_one_halo(double *r, double *xi, int n);
double func1(double m);
double func1cs(double m);
double func1satsat(double m);
double func1_xcorr(double m);
double one_halo_ss(double r);
double one_halo_cs(double r);
double *xi_cs_g2,*xi_ss_g2,*xi_rad_g2;
/* These are the local globals to use during the qromo integration
*/
double r_g2;
/* This function tabulates the one-halo real-space term for spline interpolation.
* If the requested radius is out-of-bounds of the tabulated function, a value of
* zero is returned.
*/
double one_halo_real_space(double r)
{
static int flag=0;
static double *x,*y,*y2;
int i,n=100;
double a;
if(!HOD.pdfs)return(0);
if(!flag || RESET_FLAG_1H)
{
if(!flag)
{
x=dvector(1,n);
y=dvector(1,n);
y2=dvector(1,n);
}
flag=1;
RESET_FLAG_1H=0;
calc_real_space_one_halo(x,y,n);
spline(x,y,n,2.0E+30,2.0E+30,y2);
}
if(r>x[n])return(0);
if(r<x[1])return(0);
splint(x,y,y2,n,r,&a);
return(a);
}
/* Here we calculate the one-halo real space term
* logarithmically spaced in r. The minimum value of r = 0.01 Mpc/h. The maximum
* value of r is set to be approximately twice the virial radius of M_max.
*
* Only halos with virial radii greater than 1/2 the separation
* contribute to the 1-halo term.
* Terminate integrations when r>2*R_vir(M_max).
*/
void calc_real_space_one_halo(double *r, double *xi, int n)
{
static int ncnt=0;
double fac,s1,rhi=1,rlo=-2,dr,mlo,x1,x2;
int i,j;
FILE *fp;
char fname[100];
ncnt++;
rlo=log(0.01);
rhi=log(1.9*pow(3*HOD.M_max/(4*PI*DELTA_HALO*RHO_CRIT*OMEGA_M),1.0/3.0));
dr=(rhi-rlo)/(n-1);
if(OUTPUT>1)
printf("calc_one_halo> starting...\n");
if(!XCORR)
GALAXY_DENSITY2 = GALAXY_DENSITY;
for(i=1;i<=n;++i)
{
r_g2=r[i]=exp((i-1)*dr + rlo);
fac=1.0/(2*PI*r_g2*r_g2*GALAXY_DENSITY*GALAXY_DENSITY2);
mlo = 4./3.*PI*RHO_CRIT*DELTA_HALO*OMEGA_M*pow(r[i]*.5,3.0);
if(mlo<HOD.M_low)
mlo = HOD.M_low;
if(XCORR)
s1=fac*qromo(func1_xcorr,log(mlo),log(HOD.M_max),midpnt)*0.5;
else
s1=fac*qromo(func1,log(mlo),log(HOD.M_max),midpnt);
xi[i]=s1;
if(OUTPUT>1)
printf("calc_one_halo> %f %e %e\n",r[i],s1,fac);
continue;
}
}
/* This is the function passed to qromo in the above routine.
* It is the number density of
* galaxy pairs in halos of mass m at separation r_g2.
* See Equation (11) from Berlind & Weinberg.
*/
double func1(double m)
{
double N,n,fac2,rvir,f_ss,f_cs,cvir,x,rfof,ncen,nsat;
m=exp(m);
cvir=halo_concentration(m)*CVIR_FAC;
n=dndM_interp(m);
nsat=N_sat(m);
ncen=N_cen(m);
rvir=2*pow(3.0*m/(4*DELTA_HALO*PI*OMEGA_M*RHO_CRIT),1.0/3.0);
/* Break up the contribution of pairs into
* central-satellite (cs) and satellite-satellite (ss) pairs.
*/
f_ss=dFdx_ss(r_g2/rvir,cvir)*moment_ss(m)*0.5;
f_cs=dFdx_cs(r_g2/rvir,cvir)*nsat*ncen;
x=n*(f_ss+f_cs)/rvir*m;
return(x);
}
double func1satsat(double m)
{
double N,n,fac2,rvir,f_ss,f_cs,cvir,x,rfof,ncen,nsat;
m=exp(m);
cvir=halo_concentration(m)*CVIR_FAC;
n=dndM_interp(m);
nsat=N_sat(m);
ncen=N_cen(m);
rvir=2*pow(3.0*m/(4*DELTA_HALO*PI*OMEGA_M*RHO_CRIT),1.0/3.0);
/* Break up the contribution of pairs into
* central-satellite (cs) and satellite-satellite (ss) pairs.
*/
f_ss=dFdx_ss(r_g2/rvir,cvir)*moment_ss(m)*0.5;
x=n*(f_ss)/rvir*m;
return(x);
}
double func1cs(double m)
{
double N,n,fac2,rvir,f_ss,f_cs,cvir,x,rfof,ncen,nsat;
m=exp(m);
cvir=halo_concentration(m)*CVIR_FAC;
n=dndM_interp(m);
nsat=N_sat(m);
ncen=N_cen(m);
rvir=2*pow(3.0*m/(4*DELTA_HALO*PI*OMEGA_M*RHO_CRIT),1.0/3.0);
/* Break up the contribution of pairs into
* central-satellite (cs) and satellite-satellite (ss) pairs.
*/
f_cs=dFdx_cs(r_g2/rvir,cvir)*nsat*ncen;
x=n*(f_cs)/rvir*m;
return(x);
}
/* Same as above, only now we're calculating the number of pairs
* in the cross-correlation.
*
* NB! -- We're assuming that the satellite galaxy profiles
* of both HODs are the same.
*/
double func1_xcorr(double m)
{
double N,n,fac2,rvir,f_ss=0,f_cs=0,cvir,x,rfof,ncen1,nsat1,ncen2,nsat2;
m=exp(m);
cvir=halo_concentration(m)*CVIR_FAC;
n=dndM_interp(m);
nsat1=N_sat(m);
ncen1=N_cen(m);
nsat2=N_sat2(m);
ncen2=N_cen2(m);
rvir=2*pow(3.0*m/(4*DELTA_HALO*PI*OMEGA_M*RHO_CRIT),1.0/3.0);
/* Break up the contribution of pairs into
* central-satellite (cs) and satellite-satellite (ss) pairs.
* But for x-corr, we have c1-s2, c2-s1, s1-s2.
*/
f_ss=dFdx_ss(r_g2/rvir,cvir)*nsat1*nsat2;
f_cs=dFdx_cs(r_g2/rvir,cvir)*(nsat1*ncen2 + nsat2*ncen1);
x=n*(f_ss+f_cs)/rvir*m;
return(x);
}
/* The public version doesn't currently support cross-correlations.
*/
double N_sat2(double m)
{
return 0;
}
double N_cen2(double m)
{
return 0;
}

View file

@ -1,175 +0,0 @@
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "header.h"
double m_g3,r_g3,*x_g3,*y_g3,*z_g3;
double n_g3;
int flag_g3=0;
double func_ng(double m);
double func_ng2(double m);
/* The restr
*/
double restricted_number_density(double r)
{
static int flag=1;
static double *x,*y,*y2;
int i,n=50,j;
double mlimit,dlogm,logm,mmin,sum=0,t0,t1,s1,s2,s3,m,r1,r2,ng2,rlim,rmin,rmax;
if(flag)
{
n_g3 = n;
x_g3=dvector(1,n);
y_g3=dvector(1,n);
z_g3=dvector(1,n);
flag=0;
}
/* Reset the static variables in this function.
*/
func_ng2(-1);
r_g3=r;
ng2=GALAXY_DENSITY*GALAXY_DENSITY;
/* Calculate the maximum allowable halo mass, which had
* rvir = r_g3 - rvir(M_low).
*/
r1=pow(3.*HOD.M_low/(4.*PI*DELTA_HALO*RHO_CRIT*OMEGA_M),1.0/3.0);
rlim = r_g3 - r1;
mlimit=log(4./3.*DELTA_HALO*RHO_CRIT*PI*rlim*rlim*rlim*OMEGA_M);
if(mlimit>log(HOD.M_max))mlimit=log(HOD.M_max);
mmin=log(HOD.M_low);
if(HOD.color==2)
{
dlogm=(mlimit-mmin)/(n-1);
m = mmin;
for(i=1;i<=n;++i)
{
if(N_avg(exp(m))>0)break;
m += dlogm;
}
mmin = m;
r1=pow(3.*exp(mmin)/(4.*PI*DELTA_HALO*RHO_CRIT*OMEGA_M),1.0/3.0);
rlim = r_g3 - r1;
mlimit=log(4./3.*DELTA_HALO*RHO_CRIT*PI*rlim*rlim*rlim*OMEGA_M);
}
if(EXCLUSION==2) {
dlogm=(mlimit-mmin)/(n-1);
x_g3[1] = mmin;
y_g3[1] = qromo(func_galaxy_density,mmin,mmin+dlogm,midpnt);
for(i=2;i<=n;++i)
{
x_g3[i] = i*dlogm+mmin;
y_g3[i] = y_g3[i-1] + qromo(func_galaxy_density,(i-1)*dlogm+mmin,mmin+i*dlogm,midpnt);
}
spline(x_g3,y_g3,n,1.0E+30,1.0E+30,z_g3);
s1 = qromo(func_ng2,mmin,mlimit,midpnt);
return(sqrt(s1));
}
/* Calculate the double integral at specified masses.
*/
dlogm=(mlimit-mmin)/(n-1);
for(i=1;i<=n;++i)
{
logm=(i-0.5)*dlogm+mmin;
m_g3=exp(logm);
r2 = pow(3*m_g3/(4*PI*DELTA_HALO*RHO_CRIT*OMEGA_M),1.0/3.0);
if(EXCLUSION==3) {
if(ellipsoidal_exclusion_probability(r1/r2,r_g3/(r1+r2))==0)break; }
else {
if(r1+r2>r_g3)break; }
s1=qtrap(func_ng,mmin,mlimit,1.0E-4);
sum+=s1*m_g3*dlogm;
if(s1==0)break;
if(sum>=ng2)break;
}
return sqrt(sum);
}
double func_ng2(double m)
{
static double fac2=-1,fac1=-1;
double s1,rv1,n,N,m1,mx;
if(m<0)
{
fac1=fac2=-1;
return(0);
}
m1=exp(m);
if(fac2<0)
fac2=pow(3.0/(4.*PI*DELTA_HALO*RHO_CRIT*OMEGA_M),1.0/3.0);
if(fac1<0)
fac1=4./3.*PI*RHO_CRIT*DELTA_HALO*OMEGA_M;
rv1 = r_g3 - pow(m1,1.0/3.0)*fac2;
rv1 = rv1;
mx = fac1*rv1*rv1*rv1;
n=dndM_interp(m1);
N=N_avg(m1);
splint(x_g3,y_g3,z_g3,n_g3,log(mx),&s1);
return(n*N*s1*m1);
}
double func_ng(double m)
{
static double fac2=-1;
double s1,rv1,rv2,exfac=1,n,N;
m=exp(m);
if(fac2<0)
fac2=pow(3.0/(4*DELTA_HALO*PI*RHO_CRIT*OMEGA_M),1.0/3.0);
rv1=pow(m_g3,1.0/3.0)*fac2;
rv2=pow(m,1.0/3.0)*fac2;
if(EXCLUSION==3)
{
if(0.5*(rv1+rv2)>r_g3)return(0);
if(1.5*(rv1+rv2)>r_g3)exfac=ellipsoidal_exclusion_probability(rv2/rv1,r_g3/(rv2+rv1));
}
else
{
if(rv1+rv2>r_g3)return(0);
}
n=dndM_interp(m)*dndM_interp(m_g3);
N=N_avg(m)*N_avg(m_g3);
return(exfac*n*N*m);
}
/* This is the probability that two halos do not overlap, given their
* radii and separation. Of course, for spherical halos P(x) is a step function
* at x = (r1+r2)/r_sep = 1, but for ellipsoidal halos there is a chance
* that they could be closer. In detail, P(x) changes depending on the mass
* ratio of the halos, but using tabulated values does not appear to make
* significant difference in the results for xi_2h(r). The function below is
* a fit to Monte Carlo results for a halos with a distribution of axis ratios
* which is lognormal in e_b = (1-b/a) and e_c = (1-c/a) with dispersions of 0.2
* mean <b/a>=0.9 and <c/a>=0.8 (pretty reasonable values).
*/
double ellipsoidal_exclusion_probability(double rv, double r)
{
static int flag=0,nr=101,nratio=31;
static double **xprob,*rad,*ratio,rhi,rlo,mhi,mlo,dr,dm;
float x1,x2,x3;
int i,j,im,ir;
FILE *fp;
if(rv<1)rv=1.0/rv;
r=(r-0.8)/0.29;
if(r>1)return(1.0);
if(r<0)return(0.0);
return(3*r*r-2*r*r*r);
}

View file

@ -1,5 +0,0 @@
#include "header.h"
void test(int argc, char **argv)
{
}

View file

@ -1,140 +0,0 @@
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include "header.h"
// Transfer function of Eisenstein & Hu 1998
// (Equation numbers refer to this paper)
double calc_tf_eh(double k);
double tf_eisenstein_hu(double k)
{
static int flag=0,prev_cosmo=0;
static double *x,*y,*y2;
int n=1000,i;
double a,rlo=1.0E-4,rhi=1.0E+4,dlogr,klo;
double xi_linear_int();
if(!flag || RESET_COSMOLOGY!=prev_cosmo)
{
if(!flag)
{
x=dvector(1,n);
y=dvector(1,n);
y2=dvector(1,n);
}
flag=1;
dlogr = (log(rhi)-log(rlo))/(n-1);
for(i=1;i<=n;++i)
{
x[i] = exp((i-1)*dlogr)*rlo;
y[i] = log(calc_tf_eh(x[i]));
//printf("TK %e %e %e %e %e\n",x[i],exp(y[i]),exp(y[i]),exp(y[i]),exp(y[i]));
x[i] = log(x[i]);
}
spline(x,y,n,2.0E+30,2.0E+30,y2);
prev_cosmo=RESET_COSMOLOGY;
}
splint(x,y,y2,n,log(k),&a);
return(exp(a));
}
double calc_tf_eh(double k)
{
double rk,e,thet,thetsq,thetpf,b1,b2,zd,ze,rd,re,rke,s,rks,q,y,g;
double ab,a1,a2,ac,bc,f,c1,c2,tc,bb,bn,ss,tb,tk_eh;
double h,hsq,om_mhsq,om_b,om_m;
// set up cosmology
h = HUBBLE;
om_m = OMEGA_M;
om_b = OMEGA_B;
// convert k to Mpc^-1 rather than hMpc^-1
rk=k*h;
hsq=h*h;
om_mhsq=om_m*hsq;
// constants
e=exp(1.);
thet=2.728/2.7;
thetsq=thet*thet;
thetpf=thetsq*thetsq;
// Equation 4 - redshift of drag epoch
b1=0.313*pow(om_mhsq,-0.419)*(1.+0.607*pow(om_mhsq,0.674));
b2=0.238*pow(om_mhsq,0.223);
zd=1291.*(1.+b1*pow(om_b*hsq,b2))*pow(om_mhsq,0.251)
/(1.+0.659*pow(om_mhsq,0.828));
// Equation 2 - redshift of matter-radiation equality
ze=2.50e4*om_mhsq/thetpf;
// value of R=(ratio of baryon-photon momentum density) at drag epoch
rd=31500.*om_b*hsq/(thetpf*zd);
// value of R=(ratio of baryon-photon momentum density) at epoch of matter-radiation equality
re=31500.*om_b*hsq/(thetpf*ze);
// Equation 3 - scale of ptcle horizon at matter-radiation equality
rke=7.46e-2*om_mhsq/(thetsq);
// Equation 6 - sound horizon at drag epoch
s=(2./3./rke)*sqrt(6./re)*log((sqrt(1.+rd)+sqrt(rd+re))/(1.+sqrt(re)));
// Equation 7 - silk damping scale
rks=1.6*pow(om_b*hsq,0.52)*pow(om_mhsq,0.73)*(1.+pow(10.4*om_mhsq,-0.95));
// Equation 10 - define q
q=rk/13.41/rke;
// Equations 11 - CDM transfer function fits
a1=pow(46.9*om_mhsq,0.670)*(1.+pow(32.1*om_mhsq,-0.532));
a2=pow(12.0*om_mhsq,0.424)*(1.+pow(45.0*om_mhsq,-0.582));
ac=pow(a1,(-om_b/om_m))*pow(a2,pow(-(om_b/om_m),3.));
// Equations 12 - CDM transfer function fits
b1=0.944/(1.+pow(458.*om_mhsq,-0.708));
b2=pow(0.395*om_mhsq,-0.0266);
bc=1./(1.+b1*(pow(1.-om_b/om_m,b2)-1.));
// Equation 18
f=1./(1.+pow(rk*s/5.4,4.));
// Equation 20
c1=14.2 + 386./(1.+69.9*pow(q,1.08));
c2=14.2/ac + 386./(1.+69.9*pow(q,1.08));
// Equation 17 - CDM transfer function
tc=f*log(e+1.8*bc*q)/(log(e+1.8*bc*q)+c1*q*q) +
(1.-f)*log(e+1.8*bc*q)/(log(e+1.8*bc*q)+c2*q*q);
// Equation 15
y=(1.+ze)/(1.+zd);
g=y*(-6.*sqrt(1.+y)+(2.+3.*y)*log((sqrt(1.+y)+1.)/(sqrt(1.+y)-1.)));
// Equation 14
ab=g*2.07*rke*s/pow(1.+rd,0.75);
// Equation 23
bn=8.41*pow(om_mhsq,0.435);
// Equation 22
ss=s/pow(1.+pow(bn/rk/s,3.),1./3.);
// Equation 24
bb=0.5+(om_b/om_m) + (3.-2.*om_b/om_m)*sqrt(pow(17.2*om_mhsq,2.)+1.);
// Equations 19 & 21
tb=log(e+1.8*q)/(log(e+1.8*q)+c1*q*q)/(1+pow(rk*s/5.2,2.));
tb=(tb+ab*exp(-pow(rk/rks,1.4))/(1.+pow(bb/rk/s,3.)))*sin(rk*ss)/rk/ss;
// Equation 8
tk_eh=(om_b/om_m)*tb+(1.-om_b/om_m)*tc;
return tk_eh;
}

View file

@ -1,70 +0,0 @@
/* PROGRAM TRANSFERFUNCTION
--- transfnc(xk)
--- compute the transfer function T(k) of the power spectrum
--- T(k) defined as P(k) = k^{xindx}*T^{2}(k)
* itrans=type of transfer function
0 -> no change (returns 1)
4 -> Efstathiou, Bond & White transfer function with Gamma as
specified (eqn. 7)
5 -> Eisnstein & Hu
11 -> read in TF from file (usually CMBFAST)
NOTE: xk is in h/Mpc and is defined as k=2pi/lambda (not k=1/lambda)
*/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include "header.h"
/* EBW CDM parameters */
#define aa 6.4
#define bb 3.0
#define cc 1.7
#define xnu 1.13
#define twopi 6.283185
#define xnuinv -0.884956
double transfnc(double xk)
{
double transf;
double q,t1,t2;
if(xk==0.)
{
transf=1.;
return (double)transf;
}
switch(ITRANS)
{
case 0:
transf=1.;
break;
case 4:
q = xk/GAMMA;
t1=aa*q+pow((bb*q),1.5)+(cc*q)*(cc*q);
t2=pow(t1,xnu);
transf=pow((1.+t2),xnuinv);
break;
case 5:
transf = tf_eisenstein_hu(xk);
break;
case 11:
transf = transfunc_file(xk);
break;
default:
fprintf(stderr,"transfnc> Unrecognized transfer function %d \n",ITRANS);
exit(-1);
break;
}
return (double)transf;
}

View file

@ -1,59 +0,0 @@
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "header.h"
/* This routine reads in a transfer function from a file.
* - col 1 = k [h/Mpc]
* - col 2 = T(k)
* other columns not used.
*
* The TF is considered to be un-normalized, and will normalize all values
* by the entry in the first line.
*
* The TF is stored in arrays an spline interpolation is used at all k values.
* The interpolation is in log(k)/log(TF) to preserve the power-law dependence
* of T(k) on k outside the k-range of the file.
*/
double transfunc_file(double xk)
{
static double *x,*y,*y2;
static int flag=1,n;
int i;
double t,x0;
FILE *fp;
char a[1000];
float x1,x2;
if(flag)
{
flag=0;
fp=openfile(Files.TF_file);
n=filesize(fp);
x=dvector(1,n);
y=dvector(1,n);
y2=dvector(1,n);
for(i=1;i<=n;++i)
{
fscanf(fp,"%f %f",&x1,&x2);
x[i]=x1;
y[i]=x2;
if(i==1)x0=y[i];
fgets(a,1000,fp);
y[i]/=x0;
x[i]=log(x[i]);
y[i]=log(y[i]);
}
fclose(fp);
spline(x,y,n,1.0E+30,1.0E+30,y2);
}
xk=log(xk);
splint(x,y,y2,n,xk,&t);
return(exp(t));
}

View file

@ -1,511 +0,0 @@
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include "header.h"
/* This is the two-halo term of the correlation function in both real space.
* This is solved in Fourier space and requires the
* non-linear power spectrum P_nl(k), which is from the model of Smith et al.
* (see Smith et al, astro-ph/0207664).
*
* For specifics see Berlind & Weinberg (2002), Zheng (2003), Tinker et al (2005)
*
*/
/* Internal functions.
*/
double func2(double m);
double func2_cen(double m);
double func2_sat(double m);
double func5(double xk);
double func_mlimit(double m);
void calc_real_space_two_halo(double *r, double *xi, int *n);
double HOD2_two_halo_real_space(double r);
/* the restricted number density.
*/
double NG_MINUS2;
/* Globals needed for the qromo functions.
*/
double r_g1,
k1;
/* Global checkflag to stop doing the restricted
* number density.
*/
int checkflag;
/* This tabulates the two-halo real-space term for future
* spline interpolation.
*/
double two_halo_real_space(double r)
{
static int flag=0,n=45;
static double *x,*y,*y2;
int i;
double max=16,min=9,a,rvir_min;
float x1,x2;
FILE *fp;
// if(r<25)
// return(nbody_two_halo(r));
if(!flag || RESET_FLAG_2H)
{
if(!LINEAR_PSP)
nonlinear_sigmac(8.0);
n=30;
if(!flag)
{
x=dvector(1,n);
y=dvector(1,n);
y2=dvector(1,n);
}
RESET_FLAG_2H=0;
flag=1;
if(OUTPUT>1)
fprintf(stdout,"Calculating real-space two-halo term...\n");
calc_real_space_two_halo(x,y,&n);
if(!HOD.color==2)
check_for_smoothness(x,y,n,1.5);
XI_MAX_RADIUS=x[n];
if(XCORR)
for(i=1;i<=n;++i)
{
a = HOD2_two_halo_real_space(x[i]);
y[i] = a*y[i];
if(y[i]<0)y[i]=-1;
else y[i] = sqrt(y[i]);
}
spline(x,y,n,2.0E+30,2.0E+30,y2);
}
if(r<R_MIN_2HALO)return(-1);
if(r>XI_MAX_RADIUS)return(-1);
splint(x,y,y2,n,r,&a);
/* This check is against the spline interpolation, which
* sometimes has (smoothness) problems with the sharp truncation of xi_2h
* at small separations.
*/
if(a<-1)return(-1);
return(a);
}
void calc_real_space_two_halo(double *r, double *xi, int *nn)
{
double xtemp[200],rtemp[200];
double mlimit,s2,rlo,rhi=90,dr,klo,tolerance=1.0e-7,s1;
int i,j,imin=0,n;
double t0,t1,t2,t1s=0,t2s=0;
n=*nn;
/* Set the minimum separation of two-halo pairs.
*/
if(HOD.color)
{
while(N_avg(HOD.M_low)<0.001)
HOD.M_low*=1.01;
}
rlo = 2.2*pow(3*HOD.M_low/(4*DELTA_HALO*PI*OMEGA_M*RHO_CRIT),1.0/3.0);
if(EXCLUSION==3)
rlo = rlo/1.3;
if(EXCLUSION==4)
rlo = rlo/2.1;
R_MIN_2HALO = rlo;
rlo = log(rlo);
dr=(log(rhi)-rlo)/(n-1);
checkflag=0;
for(i=1;i<=n;++i)
{
r_g1=r[i]=exp(rlo+dr*(i-1));
if(r_g1>30)
{
GALAXY_BIAS = qromo(func_galaxy_bias,log(HOD.M_low),log(HOD.M_max),midpnt)/GALAXY_DENSITY;
xi[i]=xi_interp(r_g1)*GALAXY_BIAS*GALAXY_BIAS;
continue;
}
/* Fourier transform. If BOX_SIZE specified, then truncate
* transform at that k. If no box, take integral to k=0.
*/
klo = 0;
if(BOX_SIZE)klo=1/BOX_SIZE;
j=16;
s1 = qromo(func5,klo,j*TWOPI/r_g1,midpnt);
s2 = s1;
klo = j*TWOPI/r_g1;
while(fabs(s1)>tolerance*s2) {
j+=16;
s1 = qromo(func5,klo,j*TWOPI/r_g1,midpnt);
s2 += s1;
klo = j*TWOPI/r_g1;
}
/* Divide the integral by the restricted number density.
* (This is calculated in the pk_gg_2h function.)
*/
s2=s2/NG_MINUS2;
/* Correction factor b/c we haven't been
* including all halos.
*/
xi[i]=(NG_MINUS2/GALAXY_DENSITY/GALAXY_DENSITY)*(1+s2)-1;
if(isnan(xi[i]))xi[i]=-1;
if(xi[i]==-1)imin=i;
if(OUTPUT>1)
printf("calc_2halo> %f %f %d\n",r[i],xi[i],checkflag);
//printf("calc_2halo> %f %f %d\n",r[i],xi[i],checkflag);
}
/* Eliminate all entries which have xi=-1
* (to eliminate a "wavy" spline fit at small r
*/
for(j=0,i=imin+1;i<=n;++i)
{
j++;
xtemp[j]=xi[i];
rtemp[j]=r[i];
}
n=j;
for(i=1;i<=n;++i)
{
r[i]=rtemp[i];
xi[i]=xtemp[i];
}
*nn=n;
R_MIN_2HALO=r[1];
/*printf("calc_2halo> %d %d %f\n",imin,n,r[1]);*/
}
/* This calculates and tabulates the galaxy power spectrum. This is done
* by taking the galaxy-number-weighted halo bias (with scale-dependent
* halo bias and extended structure of halos taken into account) and multiplying
* it by the non-linear matter power spectrum.
*
* The galaxy-number-weighted average is an integral over the halo mass function
* (see Equations B10 & B12 in Appendix B of Tinker et al.) over all allowed halo
* pairs.
*
* The allowed number of halo pairs is controlled by the type of halo exclusion used:
* EXCLUSION = 1 --> only halos with Rvir < r/2
* EXCLUSION = 2 --> only halo PAIRS with R1+R2 < r ("spherical eclusion")
* EXCLUSION = 3 --> same as 3
* EXCLUSION = 4 --> only halos with Rvir < r
*
* 1 and 4 are fast b/c the integral is separable, calculated once and squared. Can drastically
* underestimate the number of small-sep pairs.
*
* for 2/3, I've incorporated the approximation in Tinker etal 2005 called the n_g-matched
* method, where instead of actually doing the double integral over halo_mass_1 and halo_mass_2,
* i calculate the number density of that type of exclusion, and calculate the effective
* radius for the 1/4-type exlcusion-- ie, halos with Rvir < [x]*r where 0.5<[x]<1.0.
*/
double psp_gg_2h(double k, double r)
{
static double rp=-1,*x,*y,*y2;
static int flag=0;
int n=60,i;
double a,dk,klo=-3,khi=3,xk,s1,s2,mhi,mlo,mhi1,mlo1;
double t1,t0,ttot1,ttot2;
/* The fourier transform is done at each r, so tabulate the power spectrum
* at all k for a given r. If r has changed, re-tabulate.
*/
if(rp!=r)
{
mlo=HOD.M_low;
if(!flag)
{
flag=1;
x=dvector(1,n);
y=dvector(1,n);
y2=dvector(1,n);
}
dk=(khi-klo)/n;
switch(EXCLUSION) {
case 1:
mhi=4./3.*DELTA_HALO*RHO_CRIT*PI*r*r*r*OMEGA_M*0.125;
if(mhi>HOD.M_max)mhi=HOD.M_max;
NG_MINUS2 = qromo(func_galaxy_density,log(mlo),log(mhi),midpnt);
NG_MINUS2*=NG_MINUS2;
for(i=n;i>=1;--i)
{
xk=pow(10.0,klo+dk*i);
k1=xk;
if(nfw_transform(xk,mhi)<0.999)
a=qromo(func2,log(mlo),log(mhi),midpnt);
x[i]=log(xk);
if(LINEAR_PSP)
y[i]=log(linear_power_spectrum(xk)*a*a);
else
y[i]=log(nonlinear_power_spectrum(xk)*a*a);
}
break;
case 2:
case 3:
if(!checkflag)
s2=restricted_number_density(r);
else
s2=GALAXY_DENSITY;
mhi=4./3.*DELTA_HALO*RHO_CRIT*PI*r*r*r*OMEGA_M*0.125;
if(s2>=GALAXY_DENSITY || mhi>HOD.M_max)
{
s2=GALAXY_DENSITY;
mhi=log(HOD.M_max);
checkflag=1;
}
else
{
mhi=0;
NG_MINUS2=s2;
if(N_avg(HOD.M_low*1.001)>0)
{
if(qromo(func_galaxy_density,log(HOD.M_low),log(HOD.M_low*1.0001),midpnt)
>NG_MINUS2)mhi=log(HOD.M_low*1.0001);
}
if(qromo(func_galaxy_density,log(HOD.M_low),log(HOD.M_max*4.0),midpnt)
<NG_MINUS2)mhi=log(HOD.M_max);
if(!mhi)
mhi=zbrent(func_mlimit,log(HOD.M_low*1.0001),log(HOD.M_max*4.0),1.0E-4);
if(mhi>log(HOD.M_max))mhi=log(HOD.M_max);
}
NG_MINUS2=s2*s2;
for(i=n;i>=1;--i)
{
xk=pow(10.0,klo+dk*i);
k1=xk;
if(nfw_transform(xk,exp(mhi))<0.999) {
if(HOD.pdfc==7)/* || HOD.pdfc==6 || HOD.pdfc==9 || HOD.pdfc==8)*/
{
a = qromo(func2_cen,log(HOD.M_low),log(HOD.M_cen_max),midpnt);
a += qromo(func2_sat,log(HOD.M_low),log(HOD.M_max),midpnt);
}
else {
MAG_21_CHECK:
a=qromo(func2,log(mlo),mhi,midpnt);
}
}
x[i]=log(xk);
if(LINEAR_PSP)
y[i]=log(linear_power_spectrum(xk)*a*a);
else
y[i]=log(nonlinear_power_spectrum(xk)*a*a);
}
break;
case 4: // where max halo mass is M(R=r) rather than M(R=r/2)
mhi=4./3.*DELTA_HALO*RHO_CRIT*PI*r*r*r*OMEGA_M;
if(mhi>HOD.M_max)mhi=HOD.M_max;
NG_MINUS2 = qromo(func_galaxy_density,log(mlo),log(mhi),midpnt);
NG_MINUS2*=NG_MINUS2;
for(i=n;i>=1;--i)
{
xk=pow(10.0,klo+dk*i);
k1=xk;
if(nfw_transform(xk,mhi)<0.999)
a=qromo(func2,log(mlo),log(mhi),midpnt);
x[i]=log(xk);
if(LINEAR_PSP)
y[i]=log(linear_power_spectrum(xk)*a*a);
else
y[i]=log(nonlinear_power_spectrum(xk)*a*a);
}
break;
default:
endrun("Error: invalid choice of EXCLUSION");
}
spline(x,y,n,2.0E+30,2.0E+30,y2);
rp=r;
}
k=log(k);
splint(x,y,y2,n,k,&a);
return(exp(a));
}
/* The routine called by qromo for the integral in Zheng's Eq [7]
*/
double func2(double m)
{
double n,N,b,yg,x,Ncen,Nsat;
m=exp(m);
n=dndM_interp(m);
b=bias_interp(m,r_g1);
Ncen=N_cen(m);
Nsat=N_sat(m);
yg=nfw_transform(k1,m);
x = n*(Ncen + Nsat*yg)*b*m;
return(x);
}
/* The routine called by qromo for the integral in Zheng's Eq [7]
* FOR CENTRAL GALAXIES ONLY: This means no Fourier transform of NFW profile.
*/
double func2_cen(double m)
{
double n,N,b,yg,x;
m=exp(m);
n=dndM_interp(m);
b=bias_interp(m,r_g1);
N=N_cen(m);
x=n*N*b*m;
return(x);
}
/* The routine called by qromo for the integral in Zheng's Eq [7]
* FOR SATELLITE GALAXIES ONLY.
*/
double func2_sat(double m)
{
double n,N,b,yg,x;
m=exp(m);
n=dndM_interp(m);
b=bias_interp(m,r_g1);
N=N_sat(m);
yg=nfw_transform(k1,m);
x=n*N*b*yg*m;
return(x);
}
/* This is the integrand of the Fourier transform
* of the two-halo power spectrum to correlation function
*/
double func5(double xk)
{
double psp1;
double xk1,xk2,x3;
if(xk==0)return(0);
psp1=psp_gg_2h(xk,r_g1);
xk1=r_g1*xk;
psp1*=sin(xk1)/xk1/xk;
return(psp1);
}
/* This is the function sent to zbrent to calculate the halo mass
* which gives the matched restricted number density.
*/
double func_mlimit(double m)
{
double s1;
if(N_avg(exp(m))>0)
s1=qromo(func_galaxy_density,log(HOD.M_low),m,midpnt);
else
s1=0;
return(s1-NG_MINUS2);
}
/* This tabulates the two-halo real-space term
* for the second HOD function for future
* spline interpolation.
*/
double HOD2_two_halo_real_space(double r)
{
static int flag=0,n=45;
static double *x,*y,*y2;
int i;
double max=16,min=9,a,rvir_min,galtemp;
float x1,x2;
FILE *fp;
if(!flag || RESET_FLAG_2H)
{
/* Switch HODs temporarily
*/
HODt.M_min = HOD.M_min;
HODt.M_low = HOD.M_low;
HODt.M1 = HOD.M1;
HODt.alpha = HOD.alpha;
HODt.M_cen_max = HOD.M_cen_max;
HODt.sigma_logM = HOD.sigma_logM;
HODt.M_max = HOD.M_max;
HODt.pdfc = HOD.pdfc;
HODt.pdfs = HOD.pdfs;
galtemp = GALAXY_DENSITY;
HOD.M_min = HOD2.M_min;
HOD.M_low = HOD2.M_low;
HOD.M1 = HOD2.M1;
HOD.alpha = HOD2.alpha;
HOD.M_cen_max = HOD2.M_cen_max;
HOD.sigma_logM = HOD2.sigma_logM;
HOD.M_max = HOD2.M_max;
HOD.pdfc = HOD2.pdfc;
HOD.pdfs = HOD2.pdfs;
GALAXY_DENSITY = GALAXY_DENSITY2;
set_HOD_params();
if(!LINEAR_PSP)
nonlinear_sigmac(8.0);
n=30;
if(!flag)
{
x=dvector(1,n);
y=dvector(1,n);
y2=dvector(1,n);
}
RESET_FLAG_2H=0;
flag=1;
if(OUTPUT)
fprintf(stdout,"Calculating HOD2 real-space two-halo term...\n");
calc_real_space_two_halo(x,y,&n);
check_for_smoothness(x,y,n,1.5);
XI_MAX_RADIUS=x[n];
spline(x,y,n,2.0E+30,2.0E+30,y2);
/* Switch HODs back
*/
HOD.M_min = HODt.M_min;
HOD.M_low = HODt.M_low;
HOD.M1 = HODt.M1;
HOD.alpha = HODt.alpha;
HOD.M_cen_max = HODt.M_cen_max;
HOD.sigma_logM = HODt.sigma_logM;
HOD.M_max = HODt.M_max;
HOD.pdfc = HODt.pdfc;
HOD.pdfs = HODt.pdfs;
GALAXY_DENSITY = galtemp;
set_HOD_params();
}
if(r>XI_MAX_RADIUS)return(-1);
if(r<R_MIN_2HALO)return(-1);
splint(x,y,y2,n,r,&a);
/* This check is against the spline interpolation, which
* sometimes has (smoothness) problems with the sharp truncation of xi_2h
* at small separations.
*/
if(a<-1)return(-1);
return(a);
}

View file

@ -1,745 +0,0 @@
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#ifdef PARALLEL
#include <mpi.h>
#endif
#include "header.h"
/* This is a series of routines to calculate the projected correlation
* function from the real-space one (HOD calculation) and then chi2
* minimize the parameters based on the SDSS data.
*/
int FIT_WITH_COSMO = 0,
FIRST_CALL = 0;
double rp_g1;
double func_wp(double z);
double func_wp_rspace(double z);
double func_wp_matter(double z);
double chi2_wp(double *a);
void wp_input(void);
void initial_wp_values(double *a, double **pp, double *yy);
/* This integrates the calculated real-space correlation function
* along the line-of-sight to get the projected correlation function (wp(rp).
* The value passed if the projected separation.
*
* In most SDSS work, the maximum line of sight separation considered when
* claculating wp is pi=40 Mpc/h, which is the default, but can be set
* in the parameter file if desired.
*
* NB! - note that this routine uses a correction for redshift space distortions.
*/
double projected_xi(double r)
{
double x,zmax;
rp_g1=r*r;
two_halo_real_space(1.0);
zmax=sqrt(XI_MAX_RADIUS*XI_MAX_RADIUS-rp_g1);
if(zmax>wp.pi_max)zmax=wp.pi_max;
zmax = wp.pi_max;
x=2*qromo(func_wp,log(0.001),log(zmax),midpnt);
return(x);
}
/* Same as above, but it projects the real-space correlation function
* directly, with no correction for redshift-space effects.
*/
double projected_xi_rspace(double r)
{
double x,zmax;
rp_g1=r*r;
two_halo_real_space(1.0);
zmax=sqrt(XI_MAX_RADIUS*XI_MAX_RADIUS-rp_g1);
if(zmax>wp.pi_max)zmax=wp.pi_max;
zmax = wp.pi_max;
x=2*qromo(func_wp_rspace,log(0.001),log(zmax),midpnt);
return(x);
}
/* Same as above but for matter.
* I've set the maximum line-of-sight separation to be 50 Mpc/h,
* which should be good for visual comparison purposes.
*/
double projected_xi_matter(double r)
{
double x,zmax;
rp_g1=r*r;
zmax=50.0;
x=2*qtrap(func_wp_matter,log(0.001),log(zmax),1.0E-3);
return(x);
}
/* Function called from qromo/qtrap to get xi->wp
* Note that the two-halo term has the linear Kaiser distortion correction.
*/
double func_wp(double z)
{
double r;
z=exp(z);
r=sqrt(rp_g1 + z*z);
return(z*(one_halo_real_space(r)+linear_kaiser_distortion(r,z)));
}
/* Function called from qromo/qtrap to get xi->wpm but without
* correction for redshift-space distortions in the two-halo term
*/
double func_wp_rspace(double z)
{
double r;
z=exp(z);
r=sqrt(rp_g1 + z*z);
return(z*(one_halo_real_space(r)+two_halo_real_space(r)));
}
/* Function called from qromo/qtrap to get xi->wp (dark matter)
*/
double func_wp_matter(double z)
{
double r;
z=exp(z);
r=sqrt(rp_g1 + z*z);
return(z*(xi_interp(r)));
}
/********************************************************************
* Below is the actual minimization of the wp data.
* Relevant variables: [default]
*
* COVAR -> [1]=use covariance matrixl; 0=diagonal error bars
* DEPROJECTED -> [1]=we're fitting a real-space xi(r) [0]= fitting wp(rp)
*
* HOD.free[] is a vector which holds 1/0 as to whether or not a parameter is going
* to be help constant during the chi^2 minimization. 1==vary 0==constant. The default
* on any will be [0].
*
* i variable
* --- --------
* [1] -> M_min
* [2] -> M1
* [3] -> alpha
* [4] -> M_cut
* [5] -> sigmaM
* [6] -> CVIR_FAC
* [7] -> MaxCen (M_cen_max)
*
* Currently I have no checks on whether the values of this vector line
* up correctly with the specific HOD pdfs, so double-check hod.bat files.
*
* Once the code is finished, it will output the values of the HOD parameters
* to a file called [filename].fit (+ the bias, satellite fraction, and chi^2).
* Then it outputs the mean <N>_M to a file called [filename].HOD.
* Then it will go through all the TASKS asked for in the hod.bat file.
*/
void wp_minimization(char *fname)
{
int n,niter,i,j;
double *a,**pp,*yy,FTOL=1.0E-3,chi2min,s1,dlogm,m;
FILE *fp;
char aa[1000];
fprintf(stderr,"\n\nCHI2 MINIMIZATION OF W_P(R_P) DATA..........\n");
fprintf(stderr, "--------------------------------------------\n\n");
OUTPUT = 0;
FIRST_CALL = 1;
if(POWELL)
FTOL=1.0E-3;
else
FTOL=1.0E-4;
for(n=0,i=1;i<=N_HOD_PARAMS;++i)
{
n+=HOD.free[i];
if(!OUTPUT)continue;
printf("wp_min> free[%i] = %d\n",i,HOD.free[i]);
}
if(XCORR)n*=2;
if(OUTPUT)printf("wp_min> Number of free parameters: %d\n",n);
printf("FNAME %s\n",Task.root_filename);
wp_input();
wp.ncf=n;
a=dvector(1,n);
if(POWELL)
pp=dmatrix(1,n,1,n);
else
pp=dmatrix(1,n+1,1,n);
yy=dvector(1,n+1);
initial_wp_values(a,pp,yy);
printf("IVALS %e %e %e %e\n",a[1],a[2],a[3],a[4]);
if(POWELL)
{
if(OUTPUT)printf("wp_min> starting powell.\n");
powell(a,pp,n,FTOL,&niter,&chi2min,chi2_wp);
chi2min = chi2_wp(a);
}
else
{
if(OUTPUT)printf("wp_min> starting amoeba.\n");
amoeba(pp,yy,n,FTOL,chi2_wp,&niter);
for(i=1;i<=n;++i)a[i]=pp[1][i];
chi2min = chi2_wp(a);
}
s1=qromo(func_galaxy_bias,log(HOD.M_low),log(HOD.M_max),midpnt);
GALAXY_BIAS=s1/GALAXY_DENSITY;
printf("POWELL %e %e ",chi2min,HOD.M_min);
for(i=1;i<=n;++i)printf("%e ",a[i]);
printf(" %f\n",GALAXY_BIAS);
/* These outputs are for easy cut & paste into
* another batch file.
*/
//output_parameter_file(fname);
/* Output the fit and the HOD curve.
*/
printf("FNAME2 %s\n",Task.root_filename);
sprintf(aa,"%s.fit",Task.root_filename);
fp=fopen(aa,"w");
fprintf(fp,"%e %e ",chi2min,HOD.M_min);
for(i=1;i<=n;++i)fprintf(fp,"%e ",a[i]);
fprintf(fp," %f\n",GALAXY_BIAS);
fclose(fp);
sprintf(aa,"%s.HOD",Task.root_filename);
fp=fopen(aa,"w");
dlogm=(log(HOD.M_max)-log(HOD.M_low))/99;
for(i=1;i<=100;++i)
{
m=exp((i-1)*dlogm)*HOD.M_low;
fprintf(fp,"%e %e %e %e\n",m,N_cen(m),N_sat(m),N_avg(m));
}
fclose(fp);
fprintf(stderr,"here\n");
free_dvector(a,1,n);
if(POWELL)
free_dmatrix(pp,1,n,1,n);
else
free_dmatrix(pp,1,n+1,1,n);
free_dvector(yy,1,n+1);
fprintf(stderr,"here\n");
free_dvector(wp.r,1,wp.np);
free_dvector(wp.x,1,wp.np);
free_dvector(wp.e,1,wp.np);
if(COVAR)
free_dmatrix(wp.covar,1,wp.np,1,wp.np);
fprintf(stderr,"done in wp_min\n");
}
double integrated_wp_bin(double r)
{
return(projected_xi(r)*r);
}
double integrated_xi_bin(double r)
{
//return((1+one_halo_real_space(r))*r*r);
return((one_halo_real_space(r)+two_halo_real_space(r))*r*r);
}
double chi2_wp(double *a)
{
static int flag=1,niter=0,ichi=-1;
static double *x,mmin_prev=0,t0=-1,t1,sig_prev=0,chi2_prev,chi2_array[10];
double **tmp,**tmp2,chi2,x1,ta1,ta2,dt1h,dt2h,par_chi,chi2ngal;
int i,j,k,ncf_hod;
double rlo,rhi,rmin,rmax,dlogr,integrated_wp_bin();
if(FIRST_CALL)
{
flag = 1;
FIRST_CALL = 0;
}
t0 = clock();
if(HOD.free[1])FIX_PARAM = 0;
wp.iter=niter;
for(j=0,i=1;i<=N_HOD_PARAMS;++i)
if(HOD.free[i])
if(a[++j]<=0) { printf("%d %e\n",j,a[j]); return(1.0E7); }
ncf_hod = j;
RESET_FLAG_1H=1;
RESET_FLAG_2H=1;
RESET_KAISER++;
i=0;j=0;
if(HOD.free[++i])HOD.M_min=a[++j];
if(HOD.free[++i])HOD.M1=a[++j];
if(HOD.free[++i])HOD.alpha=a[++j];
if(HOD.free[++i])HOD.M_cut=a[++j];
if(HOD.free[++i])HOD.sigma_logM=a[++j];
if(HOD.pdfc!=9) {
if(HOD.free[++i])CVIR_FAC=a[++j];
if(HOD.pdfc>=7) {
if(HOD.free[++i])HOD.M_cen_max=a[++j]; }
else {
if(HOD.free[++i])HOD.MaxCen=a[++j]; }
}
if(HOD.free[++i])HOD.M_sat_break=a[++j];
if(HOD.free[++i])HOD.alpha1=a[++j];
if(XCORR) {
i=0;
if(HOD.free[++i])HOD2.M_min=a[++j];
if(HOD.free[++i])HOD2.M1=a[++j];
if(HOD.free[++i])HOD2.alpha=a[++j];
if(HOD.free[++i])HOD2.M_cut=a[++j];
if(HOD.free[++i])HOD2.sigma_logM=a[++j];
if(HOD2.pdfc!=9) {
if(HOD.free[++i])CVIR_FAC=a[++j];
if(HOD2.pdfc>=7) {
if(HOD2.free[++i])HOD2.M_cen_max=a[++j]; }
else {
if(HOD2.free[++i])HOD2.MaxCen=a[++j]; }
}
}
if(!ThisTask) {
printf("START %d ",niter);
for(i=1;i<=ncf_hod;++i)printf("%e ",a[i]);
printf("\n");
}
if(HOD.pdfs==2 && HOD.M_cut<1.0e7)return(1.0e7);
if(HOD.pdfs==2 && HOD.M_cut>1.0e15)return(1.0e7);
/* if(HOD.M_min>HOD.M_max)return(1.0e7); */
/* I've noticed some problems when sigma_logM gets to be
* unresonably high or low, so I've put some limits on the
* values they can have when doing mag-bin fitting.
*/
if(HOD.pdfc==6) {
if(HOD.sigma_logM<0.07)return(1.0e7);
if(HOD.sigma_logM>1.2)return(1.0e7);
}
if(HOD.pdfc==2 || HOD.pdfc==9) {
if(HOD.sigma_logM>1.8)return(1.0e7);
if(HOD.sigma_logM<0.05)return(1.0e7);
}
if(HOD.M1>1.0e17)return(1.0e7);
if(FIX_PARAM==2)
{
HOD.M1=HOD.M_low;
x1=qromo(func_galaxy_density,log(HOD.M_low),log(HOD.M_max),midpnt);
if(x1<GALAXY_DENSITY)return(1.0e7);
HOD.M1=pow(10.0,14.8);
x1=qromo(func_galaxy_density,log(HOD.M_low),log(HOD.M_max),midpnt);
if(x1>GALAXY_DENSITY)return(1.0e7);
HOD.M1=0;
}
/* Check the make sure these are reasonable parameters
* (Assuming that M_min is NOT a FREE parameter but is
* calculated from the GALAXY_DENSITY.)
*/
if(FIX_PARAM==1 && !HOD.color)
{
HOD.M_min=pow(10.0,8.0);
HOD.M_low=set_low_mass();
if(HOD.M_low<1.0e8)HOD.M_low=1.0e8;
x1=qromo(func_galaxy_density,log(HOD.M_low),log(HOD.M_max),midpnt);
// fprintf(stderr,"PC1 %e %e\n",x1,GALAXY_DENSITY);
if(x1<GALAXY_DENSITY) {
fprintf(stdout,"PCHECK %e %e %e\n",x1,GALAXY_DENSITY,HOD.M_low);
return(1.0e7); }
HOD.M_min=pow(10.0,14.8);
if(HOD.pdfc==7 && HOD.pdfc==8)
HOD.M_min=HOD.M_cen_max*0.99;
HOD.M_low=set_low_mass();
x1=qromo(func_galaxy_density,log(HOD.M_low),log(HOD.M_max),midpnt);
/* fprintf(stderr,"PC2 %e %e %e %e\n",HOD.M_min,HOD.M_low,x1,GALAXY_DENSITY); */
if(x1>GALAXY_DENSITY) {
fprintf(stdout,"PCHECK %e %e\n",x1,GALAXY_DENSITY);
return(1.0e7); }
HOD.M_min=0;
}
if(ERROR_FLAG)
{
ERROR_FLAG=0;
return(1e7);
}
if(HOD.free[0] || HOD.free[1])
GALAXY_DENSITY=0;
if(!HOD.color)
set_HOD_params();
/*
if(XCORR)
set_HOD2_params();
*/
if(HOD.free[0])
{
chi2ngal = (GALAXY_DENSITY-wp.ngal)*(GALAXY_DENSITY-wp.ngal)/wp.ngal_err/wp.ngal_err;
if(chi2ngal>1.0E3)return(chi2ngal);
}
if(ERROR_FLAG)
{
ERROR_FLAG=0;
return(1e7);
}
/* if(HOD.pdfs==3 && HOD.M_cut<HOD.M_low)return(1.1e7); */
if(HOD.M_min>HOD.M1)return(1.0e7);
/* if(HOD.pdfs==3)if(HOD.M_cut<HOD.M_min*0.9)return(1.2e7); */
mmin_prev=HOD.M_min;
sig_prev=HOD.sigma_logM;
if(HOD.color>=niter)
flag = 1;
if(flag && COVAR)
{
// printf("INVERTING COVARIANCE MATRIX\n");
flag=0;
tmp=dmatrix(1,wp.np,1,1);
tmp2=dmatrix(1,wp.np,1,wp.np);
for(i=1;i<=wp.np;++i)
for(j=1;j<=wp.np;++j)
tmp2[i][j]=wp.covar[i][j];
gaussj(tmp2,wp.np,tmp,1);
for(i=1;i<=wp.np;++i)
for(j=1;j<=wp.np;++j)
wp.covar[i][j]=tmp2[i][j];
free_dmatrix(tmp,1,wp.np,1,1);
free_dmatrix(tmp2,1,wp.np,1,wp.np);
x=dvector(1,wp.np);
}
if(!COVAR)
x=dvector(1,wp.np);
rmax = wp.r[wp.np];
rmin = wp.r[1];
dlogr = (log(rmax)-log(rmin))/(wp.np-1);
BETA = pow(OMEGA_M,0.6)/qromo(func_galaxy_bias,log(HOD.M_low),log(HOD.M_max),midpnt)*
GALAXY_DENSITY;
if(OUTPUT)
printf("BETA = %f\n",BETA);
rlo = exp(log(rmin) - 0.5*dlogr);
for(i=1;i<=wp.np;++i)
{
rhi = exp(dlogr)*rlo;
if(DEPROJECTED)
x[i]=one_halo_real_space(wp.r[i])+two_halo_real_space(wp.r[i]);
else
{
x[i]=projected_xi(wp.r[i]);
if(wp.format==3)
x[i]/=wp.r[i];
}
if(OUTPUT && !ThisTask)
printf("WP%d %f %e %e %e %e\n",niter+1,wp.r[i],wp.x[i],x[i],rlo,rhi);
rlo=rhi;
}
if(ERROR_FLAG)
{
ERROR_FLAG=0;
return(1e7);
}
chi2=0;
if(COVAR)
{
for(i=1;i<=wp.np;++i)
for(j=1;j<=wp.np;++j)
{
x1=(x[i]-wp.x[i])*(x[j]-wp.x[j])*wp.covar[i][j];
chi2+=x1;
}
}
else
{
if(!PCA)
{
for(i=1;i<=wp.np;++i)
{
x1=(x[i]-wp.x[i])*(x[i]-wp.x[i])/
(wp.e[i]*wp.e[i] + wp.esys*wp.esys*x[i]*x[i]);
chi2+=x1;
}
}
else
{
chi2=0;
for(i=1;i<=wp.npca;++i)
{
par_chi=0;
for(j=1;j<=wp.np;++j)
par_chi+=wp.covar[j][i]*(x[j]-wp.x[j])/wp.e[j];
chi2+=par_chi*par_chi/wp.eigen[i];
}
}
}
/*
* From Peder Norberg's instructions for use of PCA:
do i=1,npca
par_chi=0.d0
do j=1,npoints
par_chi=par_chi+pca(j,i)*(xi_teo(j)-xi_data(j))/err_data(j)
enddo
chi2=chi2+(par_chi**2)/ev(i) ! (****)
enddo
*/
/* Add in the error on the galaxy density
*/
if(HOD.free[0])
chi2+=chi2ngal;
t1 = clock();
t0 = difftime(t1,t0)/CLOCKS_PER_SEC;
niter++;
if(!ThisTask){
printf("ITER %7d %e ",niter,chi2);
for(i=1;i<=ncf_hod;++i)printf("%e ",a[i]);
printf(" %.2f\n",t0);
fflush(stdout);
if(HOD.free[0])
printf("NGAL %e %e %e\n",chi2ngal,
GALAXY_DENSITY,wp.ngal);
}
chi2_prev=chi2;
chi2_array[ichi]=chi2;
ichi++;
if(ichi==10)ichi=0;
fflush(stdout);
return(chi2);
}
void initial_wp_values(double *a, double **pp, double *yy)
{
static int flag=1;
int i,j;
double d[100];
if(flag) {
i=0;j=0;
if(HOD.free[++i])a[++j]=HOD.M_min;
if(HOD.free[++i])a[++j]=HOD.M1;
if(HOD.free[++i])a[++j]=HOD.alpha;
if(HOD.free[++i])a[++j]=HOD.M_cut;
if(HOD.free[++i])a[++j]=HOD.sigma_logM;
if(HOD.free[++i])a[++j]=CVIR_FAC;
if(HOD.pdfc>=7){
if(HOD.free[++i])a[++j]=HOD.M_cen_max; }
else {
if(HOD.free[++i])a[++j]=HOD.MaxCen; }
if(HOD.free[++i])a[++j]=HOD.M_sat_break;
if(HOD.free[++i])a[++j]=HOD.alpha1;
if(XCORR){
i=0;
if(HOD.free[++i])a[++j]=HOD2.M_min;
if(HOD.free[++i])a[++j]=HOD2.M1;
if(HOD.free[++i])a[++j]=HOD2.alpha;
if(HOD.free[++i])a[++j]=HOD2.M_cut;
if(HOD.free[++i])a[++j]=HOD2.sigma_logM;
if(HOD.free[++i])a[++j]=CVIR_FAC;
if(HOD2.pdfc>=7){
if(HOD.free[++i])a[++j]=HOD2.M_cen_max; }
else {
if(HOD.free[++i])a[++j]=HOD2.MaxCen; }
}
printf("INITIAL VALUES: ");
for(i=1;i<=wp.ncf;++i)printf("%e ",a[i]);
printf("\n");
}
//flag++;
/* Make the starting stepsize 10% of the initial values.
*/
for(i=1;i<=wp.ncf;++i)
d[i]=a[i]*0.25/flag;
if(POWELL)
{
for(i=1;i<=wp.ncf;++i)
{
for(j=1;j<=wp.ncf;++j)
{
pp[i][j]=0;
if(i==j)pp[i][j]+=d[j];
}
}
}
else
{
for(j=1;j<=wp.ncf;++j)
pp[1][j]=a[j];
yy[1]=chi2_wp(a);
for(i=1;i<=wp.ncf;++i)
{
a[i]+=d[i];
if(i>1)a[i-1]-=d[i-1];
yy[i+1]=chi2_wp(a);
for(j=1;j<=wp.ncf;++j)
pp[i+1][j]=a[j];
}
a[wp.ncf]-=d[wp.ncf];
}
}
/* This routine reads in the wp data and covariance matrix from
* the filenames specified.
*
* FILE FORMATS:
*
* - fname_wp -> r xi e_xi
* - fname_covar -> (i=1,np)(j=1,np) read(covar[i][j])
*
*/
void wp_input()
{
float x1,x2,x3;
FILE *fp;
int i,j,n;
char a[1000];
if(!(fp=fopen(wp.fname_wp,"r")))
{
fprintf(stdout,"ERROR opening [%s]\n",wp.fname_wp);
endrun("error in wp_input");
}
wp.np=filesize(fp);
/* [wp.format==2] means that there are two header lines at
* the top of the file.
*/
if(wp.format==2)
{
wp.np-=2;
fgets(a,1000,fp);
fgets(a,1000,fp);
}
/* [wp.format==3] means that there is one header lines at
* the top of the file.
*/
if(wp.format==3)
{
wp.np-=1;
fgets(a,1000,fp);
}
wp.r=dvector(1,wp.np);
wp.x=dvector(1,wp.np);
wp.e=dvector(1,wp.np);
if(PCA)
{
wp.eigen=dvector(1,wp.np);
wp.covar=dmatrix(1,wp.np,1,wp.np);
}
/* Read in the projected correlation function data.
* Standard format [wp.format==1] is linear r, linear wp, linear err.
* [wp.format==2] is log10 r, log10 wp, linear err.
* NB! Peder's format is to list the inner edge of the bin, so we're adding 0.1 to each number.
*/
for(i=1;i<=wp.np;++i)
{
fscanf(fp,"%f %f %f",&x1,&x2,&x3);
wp.r[i]=x1;
wp.x[i]=x2;
wp.e[i]=x3;
if(wp.format==2){
wp.r[i] = pow(10.0,wp.r[i]+0.1);
wp.x[i] = pow(10.0,wp.x[i])*wp.r[i];
wp.e[i] = wp.e[i]*wp.r[i];
}
if(wp.format==3){
fscanf(fp,"%f",&x3);
wp.e[i]=x3;
wp.r[i] = pow(10.0,wp.r[i]+0.1);
// wp.x[i] = wp.x[i]*wp.r[i];
// wp.e[i] = wp.e[i]*wp.r[i];
}
if(wp.format==3 && PCA) {
fscanf(fp,"%lf",&wp.eigen[i]);
for(j=1;j<=wp.np;++j)
fscanf(fp,"%lf",&wp.covar[j][i]);
if(wp.npca==0)
wp.npca = wp.np;
}
// if(wp.format==3 && !PCA)
fgets(a,1000,fp);
}
fclose(fp);
fprintf(stderr,"Done reading %d lines from [%s]\n",wp.np,wp.fname_wp);
if(!COVAR || PCA)
return;
/*
if(wp.format==1)
{
Work.SDSS_bins=1;
for(i=1;i<=40;++i)
Work.rad[i] = i-0.5;
}
*/
if(!(fp=fopen(wp.fname_covar,"r")))
{
fprintf(stdout,"ERROR opening [%s]\n",wp.fname_covar);
endrun("error in wp_input");
}
wp.covar=dmatrix(1,wp.np,1,wp.np);
for(i=1;i<=wp.np;++i)
for(j=1;j<=wp.np;++j)
{
fscanf(fp,"%lf",&(wp.covar[i][j]));
/* printf("COVAR %d %d %e\n",i,j,wp.covar[i][j]); */
}
fclose(fp);
if(!ThisTask)
fprintf(stdout,"Done reading %d lines from [%s]\n",wp.np,wp.fname_covar);
}

View file

@ -1,180 +0,0 @@
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "header.h"
/* This calculates and tabulates both the linear and non-linear
* matter correlation function.
*
* If the parameter BOX_SIZE is set, then the lower limits of the
* Fourier transform in 1/BOX_SIZE, else it starts at k=0.
*
* The integral (Smith etal 2003 MNRAS.341.1311S Eq [4]) to transform:
*
* Int_0^\infty Delta(k) sin(rk)/rk/k dk
*
* This integral sometimes gives qromo problems when calculating xi(r) at large r.
* Therefore, we cut off the integral at k = 10^3, which has negligible effect on the
* correlation function at the relevent scales.
*/
double r_g4;
double xi_int(double xk);
/* Calculates and tabulates the non-linear matter correlation function.
* Since this is only really needed for the scale-dependence of the bias,
* which is basically 1 at scales r>~8 Mpc/h, I won't calculate this much
* past that value.
*/
double xi_interp(double r)
{
static int flag=0,prev_cosmo=0;
static double *x,*y,*y2;
int n=30,i,j;
double a,xi_int(),rhi=95,rlo=0.1,dlogr,klo,s1,s2,tolerance=1.0e-6;
if(!flag || RESET_COSMOLOGY!=prev_cosmo)
{
if(!flag)
{
x=dvector(1,n);
y=dvector(1,n);
y2=dvector(1,n);
}
flag=1;
dlogr = (log(rhi)-log(rlo))/(n-1);
for(i=1;i<=n;++i)
{
klo = 0;
if(BOX_SIZE>0)klo = 1/BOX_SIZE;
r_g4 = x[i] = exp((i-1)*dlogr)*rlo;
j=1;
s1 = qromo(xi_int,klo,j/r_g4,midpnt);
s2 = s1;
klo = j/r_g4;
while(mabs(s1)>tolerance*mabs(s2)) {
j+=16;
s1 = qromo(xi_int,klo,j/r_g4,midpnt);
s2 += s1;
klo = j/r_g4;
}
y[i]=s2;
}
check_for_smoothness(x,y,n,0);
spline(x,y,n,2.0E+30,2.0E+30,y2);
prev_cosmo=RESET_COSMOLOGY;
}
splint(x,y,y2,n,r,&a);
return(a);
}
/* This is the integrand of Smith et al Eq. [4]
*/
double xi_int(double xk)
{
double xk1,xk2,psp;
if(xk==0)return(0);
/* power spectrum at xk
*/
psp=nonlinear_power_spectrum(xk);
/* Integrand of Fourier transform
*/
xk1=r_g4*xk;
psp*=sin(xk1)/xk1/xk;
return psp;
}
double xi_linear_interp(double r)
{
static int flag=0,prev_cosmo=0;
static double *x,*y,*y2;
int n=100,i;
double a,rlo=0.1,rhi=150,dlogr,klo;
double xi_linear_int();
if(!flag || RESET_COSMOLOGY!=prev_cosmo)
{
if(!flag)
{
x=dvector(1,n);
y=dvector(1,n);
y2=dvector(1,n);
}
flag=1;
dlogr = (log(rhi)-log(rlo))/(n-1);
klo = 0;
if(BOX_SIZE>0)klo = 1/BOX_SIZE;
for(i=1;i<=n;++i)
{
r_g4 = x[i] = exp((i-1)*dlogr)*rlo;
y[i] = qromo(xi_linear_int,klo,1.0/r_g4,midpnt)+
qromo(xi_linear_int,1.0/r_g4,1.0E+3,midpnt);
}
check_for_smoothness(x,y,n,0);
spline(x,y,n,2.0E+30,2.0E+30,y2);
prev_cosmo=RESET_COSMOLOGY;
}
splint(x,y,y2,n,r,&a);
return(a);
}
double xi_linear_int(xk)
double xk;
{
double psp;
double xk1,xk2;
/* power spectrum at xk
*/
psp=linear_power_spectrum(xk);
/* Now take Fourier transform
*/
xk1=r_g4*xk;
psp*=sin(xk1)/xk1/xk;
return psp;
}
/* Sometimes one or two of the correlation function values will
* totally crap out, [a side-effect of qromo] so here we check for that and if
* we find it then we interpolate the values in log-space.
*/
void check_for_smoothness(double *x, double *y, int n, double r)
{
int i,flag,flag2=0;
double m,b,new;
for(i=2;i<n;++i)
{
flag=0;
if(y[i]>0)flag2=1;
if(y[i]<0 && !flag2)continue;
if(x[i]<r)continue;
if(fabs(y[i]/y[i-1])>3.0 && fabs(y[i]/y[i+1])>3.0)flag=1;
if(fabs(y[i]/y[i-1])<0.2 && fabs(y[i]/y[i+1])<0.2)flag=1;
if(y[i]<0 && (y[i-1]>=0 && y[i+1]>=0))flag=1;
if(y[i+1]<0)flag=0;
if(!flag)continue;
m=(log(y[i+1])-log(y[i-1]))/(log(x[i+1])-log(x[i-1]));
b=log(y[i+1])-m*log(x[i+1]);
new=m*log(x[i])+b;
fprintf(stderr,"SMOOTHING: %e %e %e r= %f new=%e\n",y[i-1],y[i],y[i+1],x[i],exp(new));
y[i]=exp(new);
}
}

View file

@ -0,0 +1,417 @@
import numpy as np
import readsnap
import readsubf
import sys
import time
import random
###############################################################################
#this function returns an array containing the positions of the galaxies (kpc/h)
#in the catalogue according to the fiducial density, M1 and alpha
#CDM halos with masses within [min_mass,max_mass], are populated
#with galaxies. The IDs and positions of the CDM particles belonging to the
#different groups are read from the snapshots
#If one needs to creates many catalogues, this function is not appropiate,
#since it wastes a lot of time reading the snapshots and sorting the IDs
#min_mass and max_mass are in units of Msun/h, not 1e10 Msun/h
#mass_criteria: definition of the halo virial radius -- 't200' 'm200' 'c200'
#fiducial_density: galaxy number density to be reproduced, in (h/Mpc)^3
def hod(snapshot_fname,groups_fname,groups_number,min_mass,max_mass,
fiducial_density,M1,alpha,mass_criteria,verbose=False):
thres=1e-3 #controls the max relative error to accept a galaxy density
#read the header and obtain the boxsize
head=readsnap.snapshot_header(snapshot_fname)
BoxSize=head.boxsize #BoxSize in kpc/h
#read positions and IDs of DM particles: sort the IDs array
DM_pos=readsnap.read_block(snapshot_fname,"POS ",parttype=-1) #kpc/h
DM_ids=readsnap.read_block(snapshot_fname,"ID ",parttype=-1)-1
sorted_ids=DM_ids.argsort(axis=0)
#the particle whose ID is N is located in the position sorted_ids[N]
#i.e. DM_ids[sorted_ids[N]]=N
#the position of the particle whose ID is N would be:
#DM_pos[sorted_ids[N]]
#read the IDs of the particles belonging to the CDM halos
halos_ID=readsubf.subf_ids(groups_fname,groups_number,0,0,
long_ids=True,read_all=True)
IDs=halos_ID.SubIDs-1
del halos_ID
#read CDM halos information
halos=readsubf.subfind_catalog(groups_fname,groups_number,
group_veldisp=True,masstab=True,
long_ids=True,swap=False)
if mass_criteria=='t200':
halos_mass=halos.group_m_tophat200*1e10 #masses in Msun/h
halos_radius=halos.group_r_tophat200 #radius in kpc/h
elif mass_criteria=='m200':
halos_mass=halos.group_m_mean200*1e10 #masses in Msun/h
halos_radius=halos.group_r_mean200 #radius in kpc/h
elif mass_criteria=='c200':
halos_mass=halos.group_m_crit200*1e10 #masses in Msun/h
halos_radius=halos.group_r_crit200 #radius in kpc/h
else:
print 'bad mass_criteria'
sys.exit()
halos_pos=halos.group_pos #positions in kpc/h
halos_len=halos.group_len
halos_offset=halos.group_offset
halos_indexes=np.where((halos_mass>min_mass) & (halos_mass<max_mass))[0]
del halos
if verbose:
print ' '
print 'total halos found=',halos_pos.shape[0]
print 'halos number density=',len(halos_pos)/(BoxSize*1e-3)**3
#keep only the halos in the given mass range
halo_mass=halos_mass[halos_indexes]
halo_pos=halos_pos[halos_indexes]
halo_radius=halos_radius[halos_indexes]
halo_len=halos_len[halos_indexes]
halo_offset=halos_offset[halos_indexes]
del halos_indexes
##### COMPUTE Mmin GIVEN M1 & alpha #####
i=0; max_iterations=20 #maximum number of iterations
Mmin1=min_mass; Mmin2=max_mass
while (i<max_iterations):
Mmin=0.5*(Mmin1+Mmin2) #estimation of the HOD parameter Mmin
total_galaxies=0
inside=np.where(halo_mass>Mmin)[0] #take all galaxies with M>Mmin
mass=halo_mass[inside] #only halos with M>Mmin have central/satellites
total_galaxies=mass.shape[0]+np.sum((mass/M1)**alpha)
mean_density=total_galaxies*1.0/(BoxSize*1e-3)**3 #galaxies/(Mpc/h)^3
if (np.absolute((mean_density-fiducial_density)/fiducial_density)<thres):
i=max_iterations
elif (mean_density>fiducial_density):
Mmin1=Mmin
else:
Mmin2=Mmin
i+=1
if verbose:
print ' '
print 'Mmin=',Mmin
print 'average number of galaxies=',total_galaxies
print 'average galaxy density=',mean_density
#########################################
#just halos with M>Mmin; the rest do not host central/satellite galaxies
inside=np.where(halo_mass>Mmin)[0]
halo_mass=halo_mass[inside]
halo_pos=halo_pos[inside]
halo_radius=halo_radius[inside]
halo_len=halo_len[inside]
halo_offset=halo_offset[inside]
del inside
#compute number of satellites in each halo using the Poisson distribution
N_mean_sat=(halo_mass/M1)**alpha #mean number of satellites
N_sat=np.empty(len(N_mean_sat),dtype=np.int32)
for i in range(len(N_sat)):
N_sat[i]=np.random.poisson(N_mean_sat[i])
N_tot=np.sum(N_sat)+len(halo_mass) #total number of galaxies in the catalogue
if verbose:
print ' '
print np.min(halo_mass),'< M_halo <',np.max(halo_mass)
print 'total number of galaxies=',N_tot
print 'galaxy number density=',N_tot/(BoxSize*1e-3)**3
#put satellites following the distribution of dark matter in groups
if verbose:
print ' '
print 'Creating mock catalogue ...',
pos_galaxies=np.empty((N_tot,3),dtype=np.float32)
#index: variable that go through halos (may be several galaxies in a halo)
#i: variable that go through all (central/satellites) galaxies
#count: find number of galaxies that lie beyond its host halo virial radius
index=0; count=0; i=0
while (index<halo_mass.shape[0]):
position=halo_pos[index] #position of the DM halo
radius=halo_radius[index] #radius of the DM halo
#save the position of the central galaxy
pos_galaxies[i]=position; i+=1
#if halo contains satellites, save their positions
Nsat=N_sat[index]
if Nsat>0:
offset=halo_offset[index]
length=halo_len[index]
idss=sorted_ids[IDs[offset:offset+length]]
#compute the distances to the halo center keeping those with R<Rvir
pos=DM_pos[idss] #positions of the particles belonging to the halo
posc=pos-position
#this is to populate correctly halos closer to box boundaries
if np.any((position+radius>BoxSize) + (position-radius<0.0)):
inside=np.where(posc[:,0]>BoxSize/2.0)[0]
posc[inside,0]-=BoxSize
inside=np.where(posc[:,0]<-BoxSize/2.0)[0]
posc[inside,0]+=BoxSize
inside=np.where(posc[:,1]>BoxSize/2.0)[0]
posc[inside,1]-=BoxSize
inside=np.where(posc[:,1]<-BoxSize/2.0)[0]
posc[inside,1]+=BoxSize
inside=np.where(posc[:,2]>BoxSize/2.0)[0]
posc[inside,2]-=BoxSize
inside=np.where(posc[:,2]<-BoxSize/2.0)[0]
posc[inside,2]+=BoxSize
radii=np.sqrt(posc[:,0]**2+posc[:,1]**2+posc[:,2]**2)
inside=np.where(radii<radius)[0]
selected=random.sample(inside,Nsat)
pos=pos[selected]
#aditional, not esential check. Can be comment out
posc=pos-position
if np.any((posc>BoxSize/2.0) + (posc<-BoxSize/2.0)):
inside=np.where(posc[:,0]>BoxSize/2.0)[0]
posc[inside,0]-=BoxSize
inside=np.where(posc[:,0]<-BoxSize/2.0)[0]
posc[inside,0]+=BoxSize
inside=np.where(posc[:,1]>BoxSize/2.0)[0]
posc[inside,1]-=BoxSize
inside=np.where(posc[:,1]<-BoxSize/2.0)[0]
posc[inside,1]+=BoxSize
inside=np.where(posc[:,2]>BoxSize/2.0)[0]
posc[inside,2]-=BoxSize
inside=np.where(posc[:,2]<-BoxSize/2.0)[0]
posc[inside,2]+=BoxSize
r_max=np.max(np.sqrt(posc[:,0]**2+posc[:,1]**2+posc[:,2]**2))
if r_max>radius: #check no particles beyond Rv selected
print position
print radius
print pos
count+=1
for j in range(Nsat):
pos_galaxies[i]=pos[j]; i+=1
index+=1
if verbose:
print 'done'
#some final checks
if i!=N_tot:
print 'some galaxies missing:'
print 'register',i,'galaxies out of',N_tot
if count>0:
print 'error:',count,'particles beyond the virial radius selected'
return pos_galaxies
###############################################################################
#This function is equal to the above one, except that the snapshot read, halos
#read and ID sorting it is not performing here. It is best suited when many
#galaxy catalogues need to be created: for example, when iterating among M1 and
#alpha trying to find the best combination that reproduces the measured wp(r)
#VARIABLES:
#DM_pos: array containing the positions of the CDM particles
#sorted_ids: array containing the positions of the IDs in the snapshots.
#sorted_ids[N] gives the position where the particle whose ID is N is located
#IDs:IDs array as read from the subfind ID file
#halo_mass: array containing the masses of the CDM halos in the mass interval
#halo_pos: array containing the positions of the CDM halos in the mass interval
#halo_radius: array containing the radii of the CDM halos in the mass interval
#halo_len: array containing the len of the CDM halos in the mass interval
#halo_offset: array containing the offset of the CDM halos in the mass interval
#BoxSize: Size of the simulation Box. In Mpc/h
#fiducial_density: galaxy number density to be reproduced, in (h/Mpc)^3
def hod_fast(DM_pos,sorted_ids,IDs,halo_mass,halo_pos,halo_radius,halo_len,
halo_offset,BoxSize,min_mass,max_mass,fiducial_density,
M1,alpha,seed,verbose=False):
problematic_cases=0 #number of problematic cases (e.g. halos with Rvir=0.0)
thres=1e-3 #controls the max relative error to accept a galaxy density
##### COMPUTE Mmin GIVEN M1 & alpha #####
i=0; max_iterations=20 #maximum number of iterations
Mmin1=min_mass; Mmin2=max_mass
while (i<max_iterations):
Mmin=0.5*(Mmin1+Mmin2) #estimation of the HOD parameter Mmin
total_galaxies=0
inside=np.where(halo_mass>Mmin)[0]
mass=halo_mass[inside] #only halos with M>Mmin have central/satellites
total_galaxies=mass.shape[0]+np.sum((mass/M1)**alpha)
mean_density=total_galaxies*1.0/BoxSize**3
if (np.absolute((mean_density-fiducial_density)/fiducial_density)<thres):
i=max_iterations
elif (mean_density>fiducial_density):
Mmin1=Mmin
else:
Mmin2=Mmin
i+=1
if verbose:
print ' '
print 'Mmin=',Mmin
print 'average number of galaxies=',total_galaxies
print 'average galaxy density=',mean_density
#########################################
#just halos with M>Mmin; the rest do not host central/satellite galaxies
inside=np.where(halo_mass>Mmin)[0]
halo_mass=halo_mass[inside]
halo_pos=halo_pos[inside]
halo_radius=halo_radius[inside]
halo_len=halo_len[inside]
halo_offset=halo_offset[inside]
del inside
#compute number of satellites in each halo using the Poisson distribution
np.random.seed(seed) #this is just to check convergence on w_p(r_p)
N_mean_sat=(halo_mass/M1)**alpha #mean number of satellites
N_sat=np.empty(len(N_mean_sat),dtype=np.int32)
for i in range(len(N_sat)):
N_sat[i]=np.random.poisson(N_mean_sat[i])
N_tot=np.sum(N_sat)+len(halo_mass) #total number of galaxies in the catalogue
if verbose:
print ' '
print np.min(halo_mass),'< M_halo <',np.max(halo_mass)
print 'total number of galaxies=',N_tot
print 'galaxy number density=',N_tot/BoxSize**3
#put satellites following the distribution of dark matter in groups
if verbose:
print ' '
print 'Creating mock catalogue ...',
pos_galaxies=np.empty((N_tot,3),dtype=np.float32)
#index: variable that go through halos (may be several galaxies in a halo)
#i: variable that go through galaxies
#count: find number of galaxies that lie beyond its host halo virial radius
random.seed(seed) #this is just to check convergence on w_p(r_p)
index=0; count=0; i=0
while (index<halo_mass.size):
position=halo_pos[index] #position of the DM halo
radius=halo_radius[index] #radius of the DM halo
#save the position of the central galaxy
pos_galaxies[i]=position; i+=1
#if halo contains satellites, save their positions
Nsat=N_sat[index]
if Nsat>0:
offset=halo_offset[index]
length=halo_len[index]
idss=sorted_ids[IDs[offset:offset+length]]
#compute the radius of those particles and keep those with R<Rvir
pos=DM_pos[idss]
posc=pos-position
#this is to populate correctly halos closer to box boundaries
if np.any((position+radius>BoxSize) + (position-radius<0.0)):
inside=np.where(posc[:,0]>BoxSize/2.0)[0]
posc[inside,0]-=BoxSize
inside=np.where(posc[:,0]<-BoxSize/2.0)[0]
posc[inside,0]+=BoxSize
inside=np.where(posc[:,1]>BoxSize/2.0)[0]
posc[inside,1]-=BoxSize
inside=np.where(posc[:,1]<-BoxSize/2.0)[0]
posc[inside,1]+=BoxSize
inside=np.where(posc[:,2]>BoxSize/2.0)[0]
posc[inside,2]-=BoxSize
inside=np.where(posc[:,2]<-BoxSize/2.0)[0]
posc[inside,2]+=BoxSize
radii=np.sqrt(posc[:,0]**2+posc[:,1]**2+posc[:,2]**2)
inside=np.where(radii<radius)[0]
if len(inside)<Nsat:
problematic_cases+=1
print 'problematic case',len(inside),Nsat
else:
selected=random.sample(inside,Nsat)
pos=pos[selected]
#aditional, not esential check. Can be comment out
#posc=pos-position
#if np.any((posc>BoxSize/2.0) + (posc<-BoxSize/2.0)):
# inside=np.where(posc[:,0]>BoxSize/2.0)[0]
# posc[inside,0]-=BoxSize
# inside=np.where(posc[:,0]<-BoxSize/2.0)[0]
# posc[inside,0]+=BoxSize
# inside=np.where(posc[:,1]>BoxSize/2.0)[0]
# posc[inside,1]-=BoxSize
# inside=np.where(posc[:,1]<-BoxSize/2.0)[0]
# posc[inside,1]+=BoxSize
# inside=np.where(posc[:,2]>BoxSize/2.0)[0]
# posc[inside,2]-=BoxSize
# inside=np.where(posc[:,2]<-BoxSize/2.0)[0]
# posc[inside,2]+=BoxSize
#r_max=np.max(np.sqrt(posc[:,0]**2+posc[:,1]**2+posc[:,2]**2))
#if r_max>radius: #check no particles beyond Rv selected
# print position
# print radius
# print pos
# count+=1
for j in range(Nsat):
pos_galaxies[i]=pos[j]; i+=1
index+=1
if verbose:
print 'done'
#some final checks
if i!=N_tot:
print 'some galaxies missing:'
print 'register',i,'galaxies out of',N_tot
if count>0:
print 'error:',count,'particles beyond the virial radius selected'
return pos_galaxies
###############################################################################
##### example of use #####
"""
snapshot_fname='/data1/villa/b500p512nu0.6z99np1024tree/snapdir_017/snap_017'
groups_fname='/home/villa/data1/b500p512nu0.6z99np1024tree'
groups_number=17
### HALO CATALOGUE PARAMETERS ###
mass_criteria='t200'
min_mass=2e12 #Msun/h
max_mass=2e15 #Msun/h
### HOD PARAMETERS ###
fiducial_density=0.00111 #mean number density for galaxies with Mr<-21
M1=8e13
alpha=1.4
pos=hod(snapshot_fname,groups_fname,groups_number,min_mass,max_mass,fiducial_density,M1,alpha,mass_criteria,verbose=True)
print pos
"""

View file

@ -0,0 +1,276 @@
#LATEST MODIFICATION: 10/11/2013
#This code computes the Xi^2 for a set of different HOD parameters
#to generate always the same results for a particular value of M1 & alpha
#edit the HOD_library.py code and comment out the lines with the seeds
#the range over which M1 and alpha wants to be varied has to be specified
#below: not in the INPUT
#Be careful with the IDs. In Gadget the IDs start from 1 whereas when we sort
#them the first one will be 0, for instance:
#import numpy as np
#a=np.array([1,2,8,5,4,9,6,3,7])
#b=a.argsort(axis=0)
#b
#array([0, 1, 7, 4, 3, 6, 8, 2, 5])
#i.e. b[1] will return 1, whereas it should be 0
from mpi4py import MPI
import numpy as np
import scipy.integrate as si
import snap_chooser as SC
import readsnap
import readsubf
import HOD_library as HOD
import correlation_function_library as CF
import sys
import os
import random
#function used to compute wp(rp): d(wp) / dr = 2r*xi(r) / sqrt(r^2-rp^2)
def deriv(y,x,r,xi,rp):
value=2.0*x*np.interp(x,r,xi)/np.sqrt(x**2-rp**2)
return np.array([value])
###### MPI DEFINITIONS ######
comm=MPI.COMM_WORLD
nprocs=comm.Get_size()
myrank=comm.Get_rank()
########################### INPUT ###############################
if len(sys.argv)>1:
sa=sys.argv
snapshot_fname=sa[1]; groups_fname=sa[2]; groups_number=sa[3]
mass_criteria=sa[4]; min_mass=float(sa[5]); max_mass=float(sa[6])
fiducial_density=float(sa[7])
M1_min=float(sa[8]); M1_max=float(sa[9]); M1_bins=int(sa[10]);
alpha_min=float(sa[11]); alpha_max=float(sa[12]); alpha_bins=int(sa[13])
random_file=sa[14]
BoxSize=float(sa[15])
Rmin=float(sa[16]); Rmax=float(sa[17]); bins=int(sa[18])
DD_name=sa[19]; RR_name=sa[20]; DR_name=sa[21]
DD_action=sa[22]; RR_action=sa[23]; DR_action=sa[24]
wp_file=sa[25]; results_file=sa[26]
else:
#### SNAPSHOTS TO SELECT GALAXIES WITHIN CDM HALOS ####
snapshot_fname='../../snapdir_003/snap_003'
groups_fname='../../'
groups_number=3
#### HALO CATALOGUE PARAMETERS ####
mass_criteria='m200' #'t200' 'm200' or 'c200'
min_mass=3e10 #Msun/h
max_mass=2e15 #Msun/h
### HOD PARAMETERS ###
fiducial_density=0.00111 #mean number density for galaxies with Mr<-21
#M1_min=6.0e13; M1_max=1.0e14; M1_bins=20
#alpha_min=1.05; alpha_max=1.60; alpha_bins=20
M1_min=6.9e+13; M1_max= 6.9e+13; M1_bins=100
alpha_min=1.20; alpha_max=1.20; alpha_bins=100
#### RANDOM CATALOG ####
random_file='/home/villa/disksom2/Correlation_function/Random_catalogue/random_catalogue_4e5.dat'
#### PARAMETERS ####
BoxSize=500.0 #Mpc/h
Rmin=0.1 #Mpc/h
Rmax=75.0 #Mpc/h
bins=60
#### PARTIAL RESULTS NAMES ####
DD_name='DD.dat' #name for the file containing DD results
RR_name='../RR_0.1_75_60_4e5.dat' #name for the file containing RR results
DR_name='DR.dat' #name for the file containing DR results
#### ACTIONS ####
DD_action='compute' #'compute' or 'read' (from DD_name file)
RR_action='read' #'compute' or 'read' (from RR_name file)
DR_action='compute' #'compute' or 'read' (from DR_name file)
#### wp FILE ####
wp_file='../w_p_21.dat'
wp_covariance_file='../wp_covar_21.0.dat'
#### OUTPUT ####
results_file='borrar.dat'
######################################################
if myrank==0:
#read positions and IDs of DM particles: sort the IDs array
DM_pos=readsnap.read_block(snapshot_fname,"POS ",parttype=-1)
#IDs should go from 0 to N-1, instead from 1 to N
DM_ids=readsnap.read_block(snapshot_fname,"ID ",parttype=-1)-1
if np.min(DM_ids)!=0 or np.max(DM_ids)!=(len(DM_pos)-1):
print 'Error!!!!'
print 'IDs should go from 0 to N-1'
print len(DM_ids),np.min(DM_ids),np.max(DM_ids)
sorted_ids=DM_ids.argsort(axis=0)
del DM_ids
#the particle whose ID is N is located in the position sorted_ids[N]
#i.e. DM_ids[sorted_ids[N]]=N
#the position of the particle whose ID is N would be:
#DM_pos[sorted_ids[N]]
#read the IDs of the particles belonging to the CDM halos
#again the IDs should go from 0 to N-1
halos_ID=readsubf.subf_ids(groups_fname,groups_number,0,0,
long_ids=True,read_all=True)
IDs=halos_ID.SubIDs-1
del halos_ID
print 'subhalos IDs=',np.min(IDs),np.max(IDs)
#read CDM halos information
halos=readsubf.subfind_catalog(groups_fname,groups_number,
group_veldisp=True,masstab=True,
long_ids=True,swap=False)
if mass_criteria=='t200':
halos_mass=halos.group_m_tophat200*1e10 #masses in Msun/h
halos_radius=halos.group_r_tophat200 #radius in kpc/h
elif mass_criteria=='m200':
halos_mass=halos.group_m_mean200*1e10 #masses in Msun/h
halos_radius=halos.group_r_mean200 #radius in kpc/h
elif mass_criteria=='c200':
halos_mass=halos.group_m_crit200*1e10 #masses in Msun/h
halos_radius=halos.group_r_crit200 #radius in kpc/h
else:
print 'bad mass_criteria'
sys.exit()
halos_pos=halos.group_pos
halos_len=halos.group_len
halos_offset=halos.group_offset
halos_indexes=np.where((halos_mass>min_mass) & (halos_mass<max_mass))[0]
del halos
print ' '
print 'total halos found=',len(halos_pos)
print 'halos number density=',len(halos_pos)/BoxSize**3
#keep only the halos in the given mass range
halo_mass=halos_mass[halos_indexes]
halo_pos=halos_pos[halos_indexes]
halo_radius=halos_radius[halos_indexes]
halo_len=halos_len[halos_indexes]
halo_offset=halos_offset[halos_indexes]
del halos_indexes
if np.any(halo_len==[]):
print 'something bad'
#read the random catalogue (new version)
dt=np.dtype((np.float32,3))
pos_r=np.fromfile(random_file,dtype=dt)*BoxSize #Mpc/h
#read the wp file
f=open(wp_file,'r'); wp=[]
for line in f.readlines():
a=line.split()
wp.append([float(a[0]),float(a[1]),float(a[2])])
f.close(); wp=np.array(wp)
#read covariance matrix file
f=open(wp_covariance_file,'r')
Cov=[]
for line in f.readlines():
a=line.split()
for value in a:
Cov.append(float(value))
f.close(); Cov=np.array(Cov)
if len(Cov)!=len(wp)**2:
print 'problem with point numbers in the covariance file'
sys.exit()
Cov=np.reshape(Cov,(len(wp),len(wp)))
Cov=np.matrix(Cov)
for g in range(100):
##### MASTER #####
if myrank==0:
#set here the range of M1, alpha to vary
#print 'M1='; M1=float(raw_input())
#print 'alpha='; alpha=float(raw_input())
#M1=1.0e14+0.4e14*np.random.random()
#alpha=1.10+0.3*np.random.random()
#seed=np.random.randint(0,3000,1)[0]
M1=1.15e14
alpha=1.27
seed=955
#create the galaxy catalogue through the HOD parameters
pos_g=HOD.hod_fast(DM_pos,sorted_ids,IDs,halo_mass,halo_pos,
halo_radius,halo_len,halo_offset,BoxSize,
min_mass,max_mass,fiducial_density,M1,
alpha,seed,verbose=True)/1e3
#compute the 2pt correlation function
r,xi_r,error_xi=CF.TPCF(pos_g,pos_r,BoxSize,DD_action,
RR_action,DR_action,DD_name,RR_name,
DR_name,bins,Rmin,Rmax)
f=open('correlation_function.dat','w')
for i in range(len(r)):
f.write(str(r[i])+' '+str(xi_r[i])+' '+str(error_xi[i])+'\n')
f.close()
r_max=np.max(r)
h=1e-13 #discontinuity at r=rp. We integrate from r=rp+h to r_max
yinit=np.array([0.0])
f=open('projected_correlation_function.dat','w')
wp_HOD=[]
for rp in wp[:,0]:
x=np.array([rp+h,r_max])
y=si.odeint(deriv,yinit,x,args=(r,xi_r,rp),mxstep=100000)
wp_HOD.append(y[1][0])
f.write(str(rp)+' '+str(y[1][0])+'\n')
wp_HOD=np.array(wp_HOD)
f.close()
print 'M1=',M1
print 'alpha=',alpha
chi2_bins=(wp_HOD-wp[:,1])**2/wp[:,2]**2
for min_bin in [2]:
for max_bin in [12]:
elements=np.arange(min_bin,max_bin)
#X^2 without covariance matrix
chi2_nocov=np.sum(chi2_bins[elements])
#X^2 with covariance matrix
wp_aux=wp[elements,1]; wp_HOD_aux=wp_HOD[elements]
Cov_aux=Cov[elements,:][:,elements]
diff=np.matrix(wp_HOD_aux-wp_aux)
chi2=diff*Cov_aux.I*diff.T
print 'X2('+str(min_bin)+'-'+str(max_bin)+')=',chi2_nocov,chi2
g=open(results_file,'a')
g.write(str(M1)+ ' '+str(alpha)+' '+str(seed)+' '+str(chi2)+'\n')
g.close()
##### SLAVES #####
else:
pos_g=None; pos_r=None
CF.TPCF(pos_g,pos_r,BoxSize,DD_action,RR_action,DR_action,
DD_name,RR_name,DR_name,bins,Rmin,Rmax)

View file

@ -0,0 +1,773 @@
#Version 1.1
#LATEST MODIFICATION: 15/05/2013
#This file contains the functions needed to compute:
#1)-the 2pt correlation function
#2)-the 2pt cross-correlation function
from mpi4py import MPI
import numpy as np
import scipy.weave as wv
import sys,os
import time
###### MPI DEFINITIONS ######
comm=MPI.COMM_WORLD
nprocs=comm.Get_size()
myrank=comm.Get_rank()
################################################################################
#This functions computes the TPCF (2pt correlation function)
#from an N-body simulation. It takes into account boundary conditions
#VARIABLES:
#pos_g: array containing the positions of the galaxies
#pos_r: array containing the positions of the random particles catalogue
#BoxSize: Size of the Box. Units must be equal to those of pos_r/pos_g
#DD_action: compute number of galaxy pairs from data or read them---compute/read
#RR_action: compute number of random pairs from data or read them---compute/read
#DR_action: compute number of galaxy-random pairs or read them---compute/read
#DD_name: file name to write/read galaxy-galaxy pairs results
#RR_name: file name to write/read random-random pairs results
#DR_name: file name to write/read galaxy-random pairs results
#bins: number of bins to compute the 2pt correlation function
#Rmin: minimum radius to compute the 2pt correlation function
#Rmax: maximum radius to compute the 2pt correlation function
#USAGE: at the end of the file there is a example of how to use this function
def TPCF(pos_g,pos_r,BoxSize,DD_action,RR_action,DR_action,
DD_name,RR_name,DR_name,bins,Rmin,Rmax,verbose=False):
#dims determined requiring that no more 8 adyacent subboxes will be taken
dims=int(BoxSize/Rmax)
dims2=dims**2; dims3=dims**3
##### MASTER #####
if myrank==0:
#compute the indexes of the halo/subhalo/galaxy catalogue
Ng=len(pos_g)*1.0; indexes_g=[]
coord=np.floor(dims*pos_g/BoxSize).astype(np.int32)
index=dims2*coord[:,0]+dims*coord[:,1]+coord[:,2]
for i in range(dims3):
ids=np.where(index==i)[0]
indexes_g.append(ids)
indexes_g=np.array(indexes_g)
#compute the indexes of the random catalogue
Nr=len(pos_r)*1.0; indexes_r=[]
coord=np.floor(dims*pos_r/BoxSize).astype(np.int32)
index=dims2*coord[:,0]+dims*coord[:,1]+coord[:,2]
for i in range(dims3):
ids=np.where(index==i)[0]
indexes_r.append(ids)
indexes_r=np.array(indexes_r)
#compute galaxy-galaxy pairs: DD
if DD_action=='compute':
DD=DDR_pairs(bins,Rmin,Rmax,BoxSize,dims,indexes1=indexes_g,
indexes2=None,pos1=pos_g,pos2=None)
if verbose:
print DD
print np.sum(DD)
#write results to a file
write_results(DD_name,DD,bins,'radial')
else:
#read results from a file
DD,bins_aux=read_results(DD_name,'radial')
if bins_aux!=bins:
print 'Sizes are different!'
sys.exit()
#compute random-random pairs: RR
if RR_action=='compute':
RR=DDR_pairs(bins,Rmin,Rmax,BoxSize,dims,indexes1=indexes_r,
indexes2=None,pos1=pos_r,pos2=None)
if verbose:
print RR
print np.sum(RR)
#write results to a file
write_results(RR_name,RR,bins,'radial')
else:
#read results from a file
RR,bins_aux=read_results(RR_name,'radial')
if bins_aux!=bins:
print 'Sizes are different!'
sys.exit()
#compute galaxy-random pairs: DR
if DR_action=='compute':
DR=DDR_pairs(bins,Rmin,Rmax,BoxSize,dims,indexes1=indexes_g,
indexes2=indexes_r,pos1=pos_g,pos2=pos_r)
if verbose:
print DR
print np.sum(DR)
#write results to a file
write_results(DR_name,DR,bins,'radial')
else:
#read results from a file
DR,bins_aux=read_results(DR_name,'radial')
if bins_aux!=bins:
print 'Sizes are different!'
sys.exit()
#final procesing
bins_histo=np.logspace(np.log10(Rmin),np.log10(Rmax),bins+1)
middle=0.5*(bins_histo[:-1]+bins_histo[1:])
DD*=1.0; RR*=1.0; DR*=1.0
r,xi_r,error_xi_r=[],[],[]
for i in range(bins):
if (RR[i]>0.0): #avoid divisions by 0
xi_aux,error_xi_aux=xi(DD[i],RR[i],DR[i],Ng,Nr)
r.append(middle[i])
xi_r.append(xi_aux)
error_xi_r.append(error_xi_aux)
r=np.array(r)
xi_r=np.array(xi_r)
error_xi_r=np.array(error_xi_r)
return r,xi_r,error_xi_r
##### SLAVES #####
else:
if DD_action=='compute':
DDR_pairs(bins,Rmin,Rmax,BoxSize,dims,
indexes1=None,indexes2=None,pos1=None,pos2=None)
if RR_action=='compute':
DDR_pairs(bins,Rmin,Rmax,BoxSize,dims,
indexes1=None,indexes2=None,pos1=None,pos2=None)
if DR_action=='compute':
DDR_pairs(bins,Rmin,Rmax,BoxSize,dims,
indexes1=None,indexes2=None,pos1=None,pos2=None)
################################################################################
################################################################################
#This functions computes the TPCCF (2pt cross-correlation function)
#from an N-body simulation. It takes into account boundary conditions
#VARIABLES:
#pos_g1: array containing the positions of the galaxies1
#pos_g2: array containing the positions of the galaxies2
#pos_r: array containing the positions of the random particles catalogue
#BoxSize: Size of the Box. Units must be equal to those of pos_r/pos_g1/pos_g2
#DD_action: compute number of galaxy pairs from data or read them---compute/read
#RR_action: compute number of random pairs from data or read them---compute/read
#DR_action: compute number of galaxy-random pairs or read them---compute/read
#DD_name: file name to write/read galaxy-galaxy pairs results
#RR_name: file name to write/read random-random pairs results
#DR_name: file name to write/read galaxy-random pairs results
#bins: number of bins to compute the 2pt correlation function
#Rmin: minimum radius to compute the 2pt correlation function
#Rmax: maximum radius to compute the 2pt correlation function
#USAGE: at the end of the file there is a example of how to use this function
def TPCCF(pos_g1,pos_g2,pos_r,BoxSize,
D1D2_action,D1R_action,D2R_action,RR_action,
D1D2_name,D1R_name,D2R_name,RR_name,
bins,Rmin,Rmax,verbose=False):
#dims determined requiring that no more 8 adyacent subboxes will be taken
dims=int(BoxSize/Rmax)
dims2=dims**2; dims3=dims**3
##### MASTER #####
if myrank==0:
#compute the indexes of the halo1/subhalo1/galaxy1 catalogue
Ng1=len(pos_g1)*1.0; indexes_g1=[]
coord=np.floor(dims*pos_g1/BoxSize).astype(np.int32)
index=dims2*coord[:,0]+dims*coord[:,1]+coord[:,2]
for i in range(dims3):
ids=np.where(index==i)[0]
indexes_g1.append(ids)
indexes_g1=np.array(indexes_g1)
#compute the indexes of the halo2/subhalo2/galaxy2 catalogue
Ng2=len(pos_g2)*1.0; indexes_g2=[]
coord=np.floor(dims*pos_g2/BoxSize).astype(np.int32)
index=dims2*coord[:,0]+dims*coord[:,1]+coord[:,2]
for i in range(dims3):
ids=np.where(index==i)[0]
indexes_g2.append(ids)
indexes_g2=np.array(indexes_g2)
#compute the indexes of the random catalogue
Nr=len(pos_r)*1.0; indexes_r=[]
coord=np.floor(dims*pos_r/BoxSize).astype(np.int32)
index=dims2*coord[:,0]+dims*coord[:,1]+coord[:,2]
for i in range(dims3):
ids=np.where(index==i)[0]
indexes_r.append(ids)
indexes_r=np.array(indexes_r)
#compute galaxy1-galaxy2 pairs: D1D2
if D1D2_action=='compute':
D1D2=DDR_pairs(bins,Rmin,Rmax,BoxSize,dims,indexes1=indexes_g1,
indexes2=indexes_g2,pos1=pos_g1,pos2=pos_g2)
if verbose:
print D1D2
print np.sum(D1D2)
#write results to a file
write_results(D1D2_name,D1D2,bins,'radial')
else:
#read results from a file
D1D2,bins_aux=read_results(D1D2_name,'radial')
if bins_aux!=bins:
print 'Sizes are different!'
sys.exit()
#compute galaxy1-random pairs: D1R
if D1R_action=='compute':
D1R=DDR_pairs(bins,Rmin,Rmax,BoxSize,dims,indexes1=indexes_g1,
indexes2=indexes_r,pos1=pos_g1,pos2=pos_r)
if verbose:
print D1R
print np.sum(D1R)
#write results to a file
write_results(D1R_name,D1R,bins,'radial')
else:
#read results from a file
D1R,bins_aux=read_results(D1R_name,'radial')
if bins_aux!=bins:
print 'Sizes are different!'
sys.exit()
#compute galaxy2-random pairs: D2R
if D2R_action=='compute':
D2R=DDR_pairs(bins,Rmin,Rmax,BoxSize,dims,indexes1=indexes_g2,
indexes2=indexes_r,pos1=pos_g2,pos2=pos_r)
if verbose:
print D2R
print np.sum(D2R)
#write results to a file
write_results(D2R_name,D2R,bins,'radial')
else:
#read results from a file
D2R,bins_aux=read_results(D2R_name,'radial')
if bins_aux!=bins:
print 'Sizes are different!'
sys.exit()
#compute random-random pairs: RR
if RR_action=='compute':
RR=DDR_pairs(bins,Rmin,Rmax,BoxSize,dims,indexes1=indexes_r,
indexes2=None,pos1=pos_r,pos2=None)
if verbose:
print RR
print np.sum(RR)
#write results to a file
write_results(RR_name,RR,bins,'radial')
else:
#read results from a file
RR,bins_aux=read_results(RR_name,'radial')
if bins_aux!=bins:
print 'Sizes are different!'
sys.exit()
#final procesing
bins_histo=np.logspace(np.log10(Rmin),np.log10(Rmax),bins+1)
middle=0.5*(bins_histo[:-1]+bins_histo[1:])
inside=np.where(RR>0)[0]
D1D2=D1D2[inside]; D1R=D1R[inside]; D2R=D2R[inside]; RR=RR[inside]
middle=middle[inside]
D1D2n=D1D2*1.0/(Ng1*Ng2)
D1Rn=D1R*1.0/(Ng1*Nr)
D2Rn=D2R*1.0/(Ng2*Nr)
RRn=RR*2.0/(Nr*(Nr-1.0))
xi_r=D1D2n/RRn-D1Rn/RRn-D2Rn/RRn+1.0
return middle,xi_r
##### SLAVES #####
else:
if D1D2_action=='compute':
DDR_pairs(bins,Rmin,Rmax,BoxSize,dims,
indexes1=None,indexes2=None,pos1=None,pos2=None)
if D1R_action=='compute':
DDR_pairs(bins,Rmin,Rmax,BoxSize,dims,
indexes1=None,indexes2=None,pos1=None,pos2=None)
if D2R_action=='compute':
DDR_pairs(bins,Rmin,Rmax,BoxSize,dims,
indexes1=None,indexes2=None,pos1=None,pos2=None)
if RR_action=='compute':
DDR_pairs(bins,Rmin,Rmax,BoxSize,dims,
indexes1=None,indexes2=None,pos1=None,pos2=None)
################################################################################
################################################################################
#This function is used to compute the DD file (the number of random pairs in a
#random catalogue) that it is need for massive computation of the 2pt
#correlation function
#from an N-body simulation. It takes into account boundary conditions
#VARIABLES:
#pos_r: array containing the positions of the random particles catalogue
#BoxSize: Size of the Box. Units must be equal to those of pos_r/pos_g
#RR_name: file name to write/read random-random pairs results
#bins: number of bins to compute the 2pt correlation function
#Rmin: minimum radius to compute the 2pt correlation function
#Rmax: maximum radius to compute the 2pt correlation function
#USAGE: at the end of the file there is a example of how to use this function
def DD_file(pos_r,BoxSize,RR_name,bins,Rmin,Rmax):
#dims determined requiring that no more 8 adyacent subboxes will be taken
dims=int(BoxSize/Rmax)
dims2=dims**2; dims3=dims**3
##### MASTER #####
if myrank==0:
#compute the indexes of the random catalogue
Nr=len(pos_r)*1.0; indexes_r=[]
coord=np.floor(dims*pos_r/BoxSize).astype(np.int32)
index=dims2*coord[:,0]+dims*coord[:,1]+coord[:,2]
for i in range(dims3):
ids=np.where(index==i)[0]
indexes_r.append(ids)
indexes_r=np.array(indexes_r)
#compute random-random pairs: RR
RR=DDR_pairs(bins,Rmin,Rmax,BoxSize,dims,indexes1=indexes_r,
indexes2=None,pos1=pos_r,pos2=None)
print RR
print np.sum(RR)
#write results to a file
write_results(RR_name,RR,bins,'radial')
##### SLAVES #####
else:
DDR_pairs(bins,Rmin,Rmax,BoxSize,dims,
indexes1=None,indexes2=None,pos1=None,pos2=None)
################################################################################
################################################################################
####### COMPUTE THE NUMBER OF PAIRS IN A CATALOG ####### (x,y,z) very fast
################################################################################
def DDR_pairs(bins,Rmin,Rmax,BoxSize,dims,indexes1,indexes2,pos1,pos2):
dims2=dims**2; dims3=dims**3
#we put bins+1. The last bin is only for pairs separated by r=Rmax
pairs=np.zeros(bins+1,dtype=np.int64)
##### MASTER #####
if myrank==0:
#Master sends the indexes and particle positions to the slaves
for i in range(1,nprocs):
comm.send(pos1,dest=i,tag=6)
comm.send(pos2,dest=i,tag=7)
comm.send(indexes1,dest=i,tag=8)
comm.send(indexes2,dest=i,tag=9)
#Masters distributes the calculation among slaves
for subbox in range(dims3):
b=comm.recv(source=MPI.ANY_SOURCE,tag=1)
comm.send(False,dest=b,tag=2)
comm.send(subbox,dest=b,tag=3)
#Master gathers partial results from slaves and returns the final result
for j in range(1,nprocs):
b=comm.recv(source=MPI.ANY_SOURCE,tag=1)
comm.send(True,dest=b,tag=2)
pairs_aux=comm.recv(source=b,tag=10)
pairs+=pairs_aux
#the last element is just for situations in which r=Rmax
pairs[bins-1]+=pairs[bins]
return pairs[:-1]
##### SLAVES #####
else:
#position of the center of each subbox
sub_c=np.empty(3,dtype=np.float32)
#slaves receive the positions and indexes
pos1=comm.recv(source=0,tag=6)
pos2=comm.recv(source=0,tag=7)
indexes1=comm.recv(source=0,tag=8)
indexes2=comm.recv(source=0,tag=9)
comm.send(myrank,dest=0,tag=1)
final=comm.recv(source=0,tag=2)
while not(final):
subbox=comm.recv(source=0,tag=3)
core_ids=indexes1[subbox] #ids of the particles in the subbox
pos0=pos1[core_ids]
sub_c[0]=(subbox/dims2+0.5)*BoxSize/dims
sub_c[1]=((subbox%dims2)/dims+0.5)*BoxSize/dims
sub_c[2]=((subbox%dims2)%dims+0.5)*BoxSize/dims
#galaxy-galaxy or random-random case
if pos2==None:
#first: distances between particles in the same subbox
distances_core(pos0,BoxSize,bins,Rmin,Rmax,pairs)
#second: distances between particles in the subbox and particles around
ids=indexes_subbox_neigh(sub_c,Rmax,dims,BoxSize,indexes1,subbox)
if ids!=[]:
posN=pos1[ids]
DR_distances(pos0,posN,BoxSize,bins,Rmin,Rmax,pairs)
#galaxy-random case
else:
ids=indexes_subbox(sub_c,Rmax,dims,BoxSize,indexes2)
posN=pos2[ids]
DR_distances(pos0,posN,BoxSize,bins,Rmin,Rmax,pairs)
comm.send(myrank,dest=0,tag=1)
final=comm.recv(source=0,tag=2)
print 'cpu ',myrank,' finished: transfering data to master'
comm.send(pairs,dest=0,tag=10)
################################################################################
################################################################################
#this function computes the distances between all the particles-pairs and
#return the number of pairs found in each distance bin
def distances_core(pos,BoxSize,bins,Rmin,Rmax,pairs):
l=pos.shape[0]
support = """
#include <iostream>
using namespace std;
"""
code = """
float middle=BoxSize/2.0;
float dx,dy,dz,r;
float x1,y1,z1,x2,y2,z2;
float delta=log10(Rmax/Rmin)/bins;
int bin,i,j;
for (i=0;i<l;i++){
x1=pos(i,0);
y1=pos(i,1);
z1=pos(i,2);
for (j=i+1;j<l;j++){
x2=pos(j,0);
y2=pos(j,1);
z2=pos(j,2);
dx=(fabs(x1-x2)<middle) ? x1-x2 : BoxSize-fabs(x1-x2);
dy=(fabs(y1-y2)<middle) ? y1-y2 : BoxSize-fabs(y1-y2);
dz=(fabs(z1-z2)<middle) ? z1-z2 : BoxSize-fabs(z1-z2);
r=sqrt(dx*dx+dy*dy+dz*dz);
if (r>=Rmin && r<=Rmax){
bin=(int)(log10(r/Rmin)/delta);
pairs(bin)+=1;
}
}
}
"""
wv.inline(code,['pos','l','BoxSize','Rmin','Rmax','bins','pairs'],
type_converters = wv.converters.blitz,
support_code = support,libraries = ['m'])
return pairs
################################################################################
#pos1---an array of positions
#pos2---an array of positions
#the function returns the number of pairs in distance bins between pos1 and pos2
def DR_distances(p1,p2,BoxSize,bins,Rmin,Rmax,pairs):
l1=p1.shape[0]
l2=p2.shape[0]
support = """
#include <iostream>
using namespace std;
"""
code = """
float middle=BoxSize/2.0;
float dx,dy,dz,r;
float x1,y1,z1,x2,y2,z2;
float delta=log10(Rmax/Rmin)/bins;
int bin,i,j;
for (i=0;i<l1;i++){
x1=p1(i,0);
y1=p1(i,1);
z1=p1(i,2);
for (j=0;j<l2;j++){
x2=p2(j,0);
y2=p2(j,1);
z2=p2(j,2);
dx=(fabs(x1-x2)<middle) ? x1-x2 : BoxSize-fabs(x1-x2);
dy=(fabs(y1-y2)<middle) ? y1-y2 : BoxSize-fabs(y1-y2);
dz=(fabs(z1-z2)<middle) ? z1-z2 : BoxSize-fabs(z1-z2);
r=sqrt(dx*dx+dy*dy+dz*dz);
if (r>=Rmin && r<=Rmax){
bin=(int)(log10(r/Rmin)/delta);
pairs(bin)+=1;
}
}
}
"""
wv.inline(code,['p1','p2','l1','l2','BoxSize','Rmin','Rmax','bins','pairs'],
type_converters = wv.converters.blitz,
support_code = support)
return pairs
################################################################################
################################################################################
#this routine computes the IDs of all the particles within the neighboord cells
#that which can lie within the radius Rmax
def indexes_subbox(pos,Rmax,dims,BoxSize,indexes):
#we add dims to avoid negative numbers. For example
#if something hold between -1 and 5, the array to be
#constructed should have indexes -1 0 1 2 3 4 5.
#To achieve this in a clever way we add dims
i_min=int(np.floor((pos[0]-Rmax)*dims/BoxSize))+dims
i_max=int(np.floor((pos[0]+Rmax)*dims/BoxSize))+dims
j_min=int(np.floor((pos[1]-Rmax)*dims/BoxSize))+dims
j_max=int(np.floor((pos[1]+Rmax)*dims/BoxSize))+dims
k_min=int(np.floor((pos[2]-Rmax)*dims/BoxSize))+dims
k_max=int(np.floor((pos[2]+Rmax)*dims/BoxSize))+dims
i_array=np.arange(i_min,i_max+1)%dims
j_array=np.arange(j_min,j_max+1)%dims
k_array=np.arange(k_min,k_max+1)%dims
PAR_indexes=np.array([])
for i in i_array:
for j in j_array:
for k in k_array:
num=dims**2*i+dims*j+k
ids=indexes[num]
PAR_indexes=np.concatenate((PAR_indexes,ids)).astype(np.int32)
return PAR_indexes
################################################################################
#this routine returns the ids of the particles in the neighboord cells
#that havent been already selected
def indexes_subbox_neigh(pos,Rmax,dims,BoxSize,indexes,subbox):
#we add dims to avoid negative numbers. For example
#if something hold between -1 and 5, the array to be
#constructed should have indexes -1 0 1 2 3 4 5.
#To achieve this in a clever way we add dims
i_min=int(np.floor((pos[0]-Rmax)*dims/BoxSize))+dims
i_max=int(np.floor((pos[0]+Rmax)*dims/BoxSize))+dims
j_min=int(np.floor((pos[1]-Rmax)*dims/BoxSize))+dims
j_max=int(np.floor((pos[1]+Rmax)*dims/BoxSize))+dims
k_min=int(np.floor((pos[2]-Rmax)*dims/BoxSize))+dims
k_max=int(np.floor((pos[2]+Rmax)*dims/BoxSize))+dims
i_array=np.arange(i_min,i_max+1)%dims
j_array=np.arange(j_min,j_max+1)%dims
k_array=np.arange(k_min,k_max+1)%dims
ids=np.array([])
for i in i_array:
for j in j_array:
for k in k_array:
num=dims**2*i+dims*j+k
if num>subbox:
ids_subbox=indexes[num]
ids=np.concatenate((ids,ids_subbox)).astype(np.int32)
return ids
################################################################################
################################################################################
#This function computes the correlation function and its error once the number
#of galaxy-galaxy, random-random & galaxy-random pairs are given together
#with the total number of galaxies and random points
def xi(GG,RR,GR,Ng,Nr):
normGG=2.0/(Ng*(Ng-1.0))
normRR=2.0/(Nr*(Nr-1.0))
normGR=1.0/(Ng*Nr)
GGn=GG*normGG
RRn=RR*normRR
GRn=GR*normGR
xi=GGn/RRn-2.0*GRn/RRn+1.0
fact=normRR/normGG*RR*(1.0+xi)+4.0/Ng*(normRR*RR/normGG*(1.0+xi))**2
err=normGG/(normRR*RR)*np.sqrt(fact)
err=err*np.sqrt(3.0)
return xi,err
################################################################################
################################################################################
#This function writes partial results to a file
def write_results(fname,histogram,bins,case):
f=open(fname,'w')
if case=='par-perp':
for i in range(len(histogram)):
coord_perp=i/bins
coord_par=i%bins
f.write(str(coord_par)+' '+str(coord_perp)+' '+str(histogram[i])+'\n')
elif case=='radial':
for i in range(len(histogram)):
f.write(str(i)+' '+str(histogram[i])+'\n')
else:
print 'Error in the description of case:'
print 'Choose between: par-perp or radial'
f.close()
################################################################################
#This functions reads partial results of a file
def read_results(fname,case):
histogram=[]
if case=='par-perp':
bins=np.around(np.sqrt(size)).astype(np.int64)
if bins*bins!=size:
print 'Error finding the size of the matrix'
sys.exit()
f=open(fname,'r')
for line in f.readlines():
a=line.split()
histogram.append(int(a[2]))
f.close()
histogram=np.array(histogram)
return histogram,bins
elif case=='radial':
f=open(fname,'r')
for line in f.readlines():
a=line.split()
histogram.append(int(a[1]))
f.close()
histogram=np.array(histogram)
return histogram,histogram.shape[0]
else:
print 'Error in the description of case:'
print 'Choose between: par-perp or radial'
################################################################################
############ EXAMPLE OF USAGE: TPCF ############
"""
points_g=150000
points_r=200000
BoxSize=500.0 #Mpc/h
Rmin=1.0 #Mpc/h
Rmax=50.0 #Mpc/h
bins=30
DD_action='compute'
RR_action='compute'
DR_action='compute'
DD_name='DD.dat'
RR_name='RR.dat'
DR_name='DR.dat'
if myrank==0:
pos_g=np.random.random((points_g,3))*BoxSize
pos_r=np.random.random((points_r,3))*BoxSize
start=time.clock()
r,xi_r,error_xi=TPCF(pos_g,pos_r,BoxSize,DD_action,RR_action,DR_action,
DD_name,RR_name,DR_name,bins,Rmin,Rmax,verbose=True)
print r
print xi_r
print error_xi
end=time.clock()
print 'time:',end-start
else:
pos_g=None; pos_r=None
TPCF(pos_g,pos_r,BoxSize,DD_action,RR_action,DR_action,
DD_name,RR_name,DR_name,bins,Rmin,Rmax,verbose=True)
"""
############ EXAMPLE OF USAGE: TPCCF ############
"""
points_g1=150000
points_g2=150000
points_r=200000
BoxSize=500.0 #Mpc/h
Rmin=1.0 #Mpc/h
Rmax=50.0 #Mpc/h
bins=30
D1D2_action='compute'; D1D2_name='D1D2.dat'
D1R_action='compute'; D1R_name='D1R.dat'
D2R_action='compute'; D2R_name='D2R.dat'
RR_action='compute'; RR_name='RR.dat'
if myrank==0:
pos_g1=np.random.random((points_g1,3))*BoxSize
pos_g2=np.random.random((points_g2,3))*BoxSize
pos_r=np.random.random((points_r,3))*BoxSize
r,xi_r=TPCCF(pos_g1,pos_g2,pos_r,BoxSize,
D1D2_action,D1R_action,D2R_action,RR_action,
D1D2_name,D1R_name,D2R_name,RR_name,
bins,Rmin,Rmax,verbose=True)
print r
print xi_r
else:
pos_g1=None; pos_g2=None; pos_r=None
TPCCF(pos_g1,pos_g2,pos_r,BoxSize,D1D2_action,D1R_action,D2R_action,
RR_action,D1D2_name,D1R_name,D2R_name,RR_name,bins,Rmin,Rmax,
verbose=True)
"""
############ EXAMPLE OF USAGE: DD_file ############
"""
points_r=200000
BoxSize=500.0 #Mpc/h
Rmin=1.0 #Mpc/h
Rmax=50.0 #Mpc/h
bins=30
RR_name='RR.dat'
if myrank==0:
pos_r=np.random.random((points_r,3))*BoxSize
DD_file(pos_r,BoxSize,RR_name,bins,Rmin,Rmax)
else:
pos_r=None
DD_file(pos_r,BoxSize,RR_name,bins,Rmin,Rmax)
"""

View file

@ -0,0 +1,431 @@
# routines for reading headers and data blocks from Gadget snapshot files
# usage e.g.:
#
# import readsnap as rs
# header = rs.snapshot_header("snap_063.0") # reads snapshot header
# print header.massarr
# mass = rs.read_block("snap_063","MASS",parttype=5) # reads mass for particles of type 5, using block names should work for both format 1 and 2 snapshots
# print "mass for", mass.size, "particles read"
# print mass[0:10]
#
# before using read_block, make sure that the description (and order if using format 1 snapshot files) of the data blocks
# is correct for your configuration of Gadget
#
# for mutliple file snapshots give e.g. the filename "snap_063" rather than "snap_063.0" to read_block
# for snapshot_header the file number should be included, e.g."snap_063.0", as the headers of the files differ
#
# the returned data block is ordered by particle species even when read from a multiple file snapshot
import numpy as np
import os
import sys
import math
# ----- class for snapshot header -----
class snapshot_header:
def __init__(self, filename):
if os.path.exists(filename):
curfilename = filename
elif os.path.exists(filename+".0"):
curfilename = filename+".0"
else:
print "file not found:", filename
sys.exit()
self.filename = filename
f = open(curfilename,'rb')
blocksize = np.fromfile(f,dtype=np.int32,count=1)
if blocksize[0] == 8:
swap = 0
format = 2
elif blocksize[0] == 256:
swap = 0
format = 1
else:
blocksize.byteswap(True)
if blocksize[0] == 8:
swap = 1
format = 2
elif blocksize[0] == 256:
swap = 1
format = 1
else:
print "incorrect file format encountered when reading header of", filename
sys.exit()
self.format = format
self.swap = swap
if format==2:
f.seek(16, os.SEEK_CUR)
self.npart = np.fromfile(f,dtype=np.int32,count=6)
self.massarr = np.fromfile(f,dtype=np.float64,count=6)
self.time = (np.fromfile(f,dtype=np.float64,count=1))[0]
self.redshift = (np.fromfile(f,dtype=np.float64,count=1))[0]
self.sfr = (np.fromfile(f,dtype=np.int32,count=1))[0]
self.feedback = (np.fromfile(f,dtype=np.int32,count=1))[0]
self.nall = np.fromfile(f,dtype=np.int32,count=6)
self.cooling = (np.fromfile(f,dtype=np.int32,count=1))[0]
self.filenum = (np.fromfile(f,dtype=np.int32,count=1))[0]
self.boxsize = (np.fromfile(f,dtype=np.float64,count=1))[0]
self.omega_m = (np.fromfile(f,dtype=np.float64,count=1))[0]
self.omega_l = (np.fromfile(f,dtype=np.float64,count=1))[0]
self.hubble = (np.fromfile(f,dtype=np.float64,count=1))[0]
if swap:
self.npart.byteswap(True)
self.massarr.byteswap(True)
self.time = self.time.byteswap()
self.redshift = self.redshift.byteswap()
self.sfr = self.sfr.byteswap()
self.feedback = self.feedback.byteswap()
self.nall.byteswap(True)
self.cooling = self.cooling.byteswap()
self.filenum = self.filenum.byteswap()
self.boxsize = self.boxsize.byteswap()
self.omega_m = self.omega_m.byteswap()
self.omega_l = self.omega_l.byteswap()
self.hubble = self.hubble.byteswap()
f.close()
# ----- find offset and size of data block -----
def find_block(filename, format, swap, block, block_num, only_list_blocks=False):
if (not os.path.exists(filename)):
print "file not found:", filename
sys.exit()
f = open(filename,'rb')
f.seek(0, os.SEEK_END)
filesize = f.tell()
f.seek(0, os.SEEK_SET)
found = False
curblock_num = 1
while ((not found) and (f.tell()<filesize)):
if format==2:
f.seek(4, os.SEEK_CUR)
curblock = f.read(4)
if (block == curblock):
found = True
f.seek(8, os.SEEK_CUR)
else:
if curblock_num==block_num:
found = True
curblocksize = (np.fromfile(f,dtype=np.uint32,count=1))[0]
if swap:
curblocksize = curblocksize.byteswap()
# - print some debug info about found data blocks -
#if format==2:
# print curblock, curblock_num, curblocksize
#else:
# print curblock_num, curblocksize
if only_list_blocks:
if format==2:
print curblock_num,curblock,f.tell(),curblocksize
else:
print curblock_num,f.tell(),curblocksize
found = False
if found:
blocksize = curblocksize
offset = f.tell()
else:
f.seek(curblocksize, os.SEEK_CUR)
blocksize_check = (np.fromfile(f,dtype=np.uint32,count=1))[0]
if swap: blocksize_check = blocksize_check.byteswap()
if (curblocksize != blocksize_check):
print "something wrong"
sys.exit()
curblock_num += 1
f.close()
if ((not found) and (not only_list_blocks)):
print "Error: block not found"
sys.exit()
if (not only_list_blocks):
return offset,blocksize
# ----- read data block -----
#for snapshots with very very large number of particles set nall manually
#for instance nall=np.array([0,2048**3,0,0,0,0])
def read_block(filename, block, parttype=-1, physical_velocities=True, arepo=0, no_masses=False, verbose=False, nall=[0,0,0,0,0,0]):
if (verbose):
print "reading block", block
blockadd=0
blocksub=0
if arepo==0:
if (verbose):
print "Gadget format"
blockadd=0
if arepo==1:
if (verbose):
print "Arepo format"
blockadd=1
if arepo==2:
if (verbose):
print "Arepo extended format"
blockadd=4
if no_masses==True:
if (verbose):
print "No mass block present"
blocksub=1
if parttype not in [-1,0,1,2,3,4,5]:
print "wrong parttype given"
sys.exit()
if os.path.exists(filename):
curfilename = filename
elif os.path.exists(filename+".0"):
curfilename = filename+".0"
else:
print "file not found:", filename
print "and:", curfilename
sys.exit()
head = snapshot_header(curfilename)
format = head.format
print "FORMAT=", format
swap = head.swap
npart = head.npart
massarr = head.massarr
if np.all(nall==[0,0,0,0,0,0]):
nall = head.nall
filenum = head.filenum
redshift = head.redshift
time = head.time
del head
# - description of data blocks -
# add or change blocks as needed for your Gadget version
data_for_type = np.zeros(6,bool) # should be set to "True" below for the species for which data is stored in the data block #by doing this, the default value is False data_for_type=[False,False,False,False,False,False]
dt = np.float32 # data type of the data in the block
if block=="POS ":
data_for_type[:] = True
dt = np.dtype((np.float32,3))
block_num = 2
elif block=="VEL ":
data_for_type[:] = True
dt = np.dtype((np.float32,3))
block_num = 3
elif block=="ID ":
data_for_type[:] = True
dt = np.uint32
block_num = 4
#only used for format I, when file structure is HEAD,POS,VEL,ID,ACCE
elif block=="ACCE": #This is only for the PIETRONI project
data_for_type[:] = True #This is only for the PIETRONI project
dt = np.dtype((np.float32,3)) #This is only for the PIETRONI project
block_num = 5 #This is only for the PIETRONI project
elif block=="MASS":
data_for_type[np.where(massarr==0)] = True
block_num = 5
if parttype>=0 and massarr[parttype]>0:
if (verbose):
print "filling masses according to massarr"
return np.ones(nall[parttype],dtype=dt)*massarr[parttype]
elif block=="U ":
data_for_type[0] = True
block_num = 6-blocksub
elif block=="RHO ":
data_for_type[0] = True
block_num = 7-blocksub
elif block=="VOL ":
data_for_type[0] = True
block_num = 8-blocksub
elif block=="CMCE":
data_for_type[0] = True
dt = np.dtype((np.float32,3))
block_num = 9-blocksub
elif block=="AREA":
data_for_type[0] = True
block_num = 10-blocksub
elif block=="NFAC":
data_for_type[0] = True
dt = np.dtype(np.int64) #depends on code version, most recent hast int32, old MyIDType
block_num = 11-blocksub
elif block=="NE ":
data_for_type[0] = True
block_num = 8+blockadd-blocksub
elif block=="NH ":
data_for_type[0] = True
block_num = 9+blockadd-blocksub
elif block=="HSML":
data_for_type[0] = True
block_num = 10+blockadd-blocksub
elif block=="SFR ":
data_for_type[0] = True
block_num = 11+blockadd-blocksub
elif block=="MHI ": #This is only for the bias_HI project
data_for_type[0] = True #This is only for the bias_HI project
block_num = 12+blockadd-blocksub #This is only for the bias_HI project
elif block=="TEMP": #This is only for the bias_HI project
data_for_type[0] = True #This is only for the bias_HI project
block_num = 13+blockadd-blocksub #This is only for the bias_HI project
elif block=="AGE ":
data_for_type[4] = True
block_num = 12+blockadd-blocksub
elif block=="Z ":
data_for_type[0] = True
data_for_type[4] = True
block_num = 13+blockadd-blocksub
elif block=="BHMA":
data_for_type[5] = True
block_num = 14+blockadd-blocksub
elif block=="BHMD":
data_for_type[5] = True
block_num = 15+blockadd-blocksub
else:
print "Sorry! Block type", block, "not known!"
sys.exit()
# - end of block description -
actual_data_for_type = np.copy(data_for_type)
if parttype >= 0:
actual_data_for_type[:] = False
actual_data_for_type[parttype] = True
if data_for_type[parttype]==False:
print "Error: no data for specified particle type", parttype, "in the block", block
sys.exit()
elif block=="MASS":
actual_data_for_type[:] = True
allpartnum = np.int64(0)
species_offset = np.zeros(6,np.int64)
for j in range(6):
species_offset[j] = allpartnum
if actual_data_for_type[j]:
allpartnum += nall[j]
for i in range(filenum): # main loop over files
if filenum>1:
curfilename = filename+"."+str(i)
if i>0:
head = snapshot_header(curfilename)
npart = head.npart
del head
curpartnum = np.int32(0)
cur_species_offset = np.zeros(6,np.int64)
for j in range(6):
cur_species_offset[j] = curpartnum
if data_for_type[j]:
curpartnum += npart[j]
if parttype>=0:
actual_curpartnum = npart[parttype]
add_offset = cur_species_offset[parttype]
else:
actual_curpartnum = curpartnum
add_offset = np.int32(0)
offset,blocksize = find_block(curfilename,format,swap,block,block_num)
if i==0: # fix data type for ID if long IDs are used
if block=="ID ":
if blocksize == np.dtype(dt).itemsize*curpartnum * 2:
dt = np.uint64
if np.dtype(dt).itemsize*curpartnum != blocksize:
print "something wrong with blocksize! expected =",np.dtype(dt).itemsize*curpartnum,"actual =",blocksize
sys.exit()
f = open(curfilename,'rb')
f.seek(offset + add_offset*np.dtype(dt).itemsize, os.SEEK_CUR)
curdat = np.fromfile(f,dtype=dt,count=actual_curpartnum) # read data
f.close()
if swap:
curdat.byteswap(True)
if i==0:
data = np.empty(allpartnum,dt)
for j in range(6):
if actual_data_for_type[j]:
if block=="MASS" and massarr[j]>0: # add mass block for particles for which the mass is specified in the snapshot header
data[species_offset[j]:species_offset[j]+npart[j]] = massarr[j]
else:
if parttype>=0:
data[species_offset[j]:species_offset[j]+npart[j]] = curdat
else:
data[species_offset[j]:species_offset[j]+npart[j]] = curdat[cur_species_offset[j]:cur_species_offset[j]+npart[j]]
species_offset[j] += npart[j]
del curdat
if physical_velocities and block=="VEL " and redshift!=0:
data *= math.sqrt(time)
return data
# ----- list all data blocks in a format 2 snapshot file -----
def list_format2_blocks(filename):
if os.path.exists(filename):
curfilename = filename
elif os.path.exists(filename+".0"):
curfilename = filename+".0"
else:
print "file not found:", filename
sys.exit()
head = snapshot_header(curfilename)
format = head.format
swap = head.swap
del head
print 'GADGET FORMAT ',format
if (format != 2):
print "# OFFSET SIZE"
else:
print "# BLOCK OFFSET SIZE"
print "-------------------------"
find_block(curfilename, format, swap, "XXXX", 0, only_list_blocks=True)
print "-------------------------"
def read_gadget_header(filename):
if os.path.exists(filename):
curfilename = filename
elif os.path.exists(filename+".0"):
curfilename = filename+".0"
else:
print "file not found:", filename
sys.exit()
head=snapshot_header(curfilename)
print 'npar=',head.npart
print 'nall=',head.nall
print 'a=',head.time
print 'z=',head.redshift
print 'masses=',head.massarr*1e10,'Msun/h'
print 'boxsize=',head.boxsize,'kpc/h'
print 'filenum=',head.filenum
print 'cooling=',head.cooling
print 'Omega_m,Omega_l=',head.omega_m,head.omega_l
print 'h=',head.hubble,'\n'
rhocrit=2.77536627e11 #h**2 M_sun/Mpc**3
rhocrit=rhocrit/1e9 #h**2M_sun/kpc**3
Omega_DM=head.nall[1]*head.massarr[1]*1e10/(head.boxsize**3*rhocrit)
print 'DM mass=',head.massarr[1]*1e10,'Omega_DM=',Omega_DM
if head.nall[2]>0 and head.massarr[2]>0:
Omega_NU=head.nall[2]*head.massarr[2]*1e10/(head.boxsize**3*rhocrit)
print 'NU mass=',head.massarr[2]*1e10,'Omega_NU=',Omega_NU
print 'Sum of neutrino masses=',Omega_NU*head.hubble**2*94.1745,'eV'

View file

@ -0,0 +1,290 @@
# code for reading Subfind's subhalo_tab files
# usage e.g.:
#
# import readsubf
# cat = readsubf.subfind_catalog("./m_10002_h_94_501_z3_csf/",63,masstab=True)
# print cat.nsubs
# print "largest halo x position = ",cat.sub_pos[0][0]
import numpy as np
import os
import sys
class subfind_catalog:
def __init__(self, basedir, snapnum, group_veldisp = False, masstab = False, long_ids = False, swap = False):
self.filebase = basedir + "/groups_" + str(snapnum).zfill(3) + "/subhalo_tab_" + str(snapnum).zfill(3) + "."
#print
#print "reading subfind catalog for snapshot",snapnum,"of",basedir
if long_ids: self.id_type = np.uint64
else: self.id_type = np.uint32
self.group_veldisp = group_veldisp
self.masstab = masstab
filenum = 0
doneflag = False
skip_gr = 0
skip_sub = 0
while not doneflag:
curfile = self.filebase + str(filenum)
if (not os.path.exists(curfile)):
print "file not found:", curfile
sys.exit()
f = open(curfile,'rb')
ngroups = np.fromfile(f, dtype=np.uint32, count=1)[0]
totngroups = np.fromfile(f, dtype=np.uint32, count=1)[0]
nids = np.fromfile(f, dtype=np.uint32, count=1)[0]
totnids = np.fromfile(f, dtype=np.uint64, count=1)[0]
ntask = np.fromfile(f, dtype=np.uint32, count=1)[0]
nsubs = np.fromfile(f, dtype=np.uint32, count=1)[0]
totnsubs = np.fromfile(f, dtype=np.uint32, count=1)[0]
if swap:
ngroups = ngroups.byteswap()
totngroups = totngroups.byteswap()
nids = nids.byteswap()
totnids = totnids.byteswap()
ntask = ntask.byteswap()
nsubs = nsubs.byteswap()
totnsubs = totnsubs.byteswap()
if filenum == 0:
self.ngroups = totngroups
self.nids = totnids
self.nfiles = ntask
self.nsubs = totnsubs
self.group_len = np.empty(totngroups, dtype=np.uint32)
self.group_offset = np.empty(totngroups, dtype=np.uint32)
self.group_mass = np.empty(totngroups, dtype=np.float32)
self.group_pos = np.empty(totngroups, dtype=np.dtype((np.float32,3)))
self.group_m_mean200 = np.empty(totngroups, dtype=np.float32)
self.group_r_mean200 = np.empty(totngroups, dtype=np.float32)
self.group_m_crit200 = np.empty(totngroups, dtype=np.float32)
self.group_r_crit200 = np.empty(totngroups, dtype=np.float32)
self.group_m_tophat200 = np.empty(totngroups, dtype=np.float32)
self.group_r_tophat200 = np.empty(totngroups, dtype=np.float32)
if group_veldisp:
self.group_veldisp_mean200 = np.empty(totngroups, dtype=np.float32)
self.group_veldisp_crit200 = np.empty(totngroups, dtype=np.float32)
self.group_veldisp_tophat200 = np.empty(totngroups, dtype=np.float32)
self.group_contamination_count = np.empty(totngroups, dtype=np.uint32)
self.group_contamination_mass = np.empty(totngroups, dtype=np.float32)
self.group_nsubs = np.empty(totngroups, dtype=np.uint32)
self.group_firstsub = np.empty(totngroups, dtype=np.uint32)
self.sub_len = np.empty(totnsubs, dtype=np.uint32)
self.sub_offset = np.empty(totnsubs, dtype=np.uint32)
self.sub_parent = np.empty(totnsubs, dtype=np.uint32)
self.sub_mass = np.empty(totnsubs, dtype=np.float32)
self.sub_pos = np.empty(totnsubs, dtype=np.dtype((np.float32,3)))
self.sub_vel = np.empty(totnsubs, dtype=np.dtype((np.float32,3)))
self.sub_cm = np.empty(totnsubs, dtype=np.dtype((np.float32,3)))
self.sub_spin = np.empty(totnsubs, dtype=np.dtype((np.float32,3)))
self.sub_veldisp = np.empty(totnsubs, dtype=np.float32)
self.sub_vmax = np.empty(totnsubs, dtype=np.float32)
self.sub_vmaxrad = np.empty(totnsubs, dtype=np.float32)
self.sub_halfmassrad = np.empty(totnsubs, dtype=np.float32)
self.sub_id_mostbound = np.empty(totnsubs, dtype=self.id_type)
self.sub_grnr = np.empty(totnsubs, dtype=np.uint32)
if masstab:
self.sub_masstab = np.empty(totnsubs, dtype=np.dtype((np.float32,6)))
if ngroups > 0:
locs = slice(skip_gr, skip_gr + ngroups)
self.group_len[locs] = np.fromfile(f, dtype=np.uint32, count=ngroups)
self.group_offset[locs] = np.fromfile(f, dtype=np.uint32, count=ngroups)
self.group_mass[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
self.group_pos[locs] = np.fromfile(f, dtype=np.dtype((np.float32,3)), count=ngroups)
self.group_m_mean200[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
self.group_r_mean200[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
self.group_m_crit200[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
self.group_r_crit200[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
self.group_m_tophat200[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
self.group_r_tophat200[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
if group_veldisp:
self.group_veldisp_mean200[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
self.group_veldisp_crit200[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
self.group_veldisp_tophat200[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
self.group_contamination_count[locs] = np.fromfile(f, dtype=np.uint32, count=ngroups)
self.group_contamination_mass[locs] = np.fromfile(f, dtype=np.float32, count=ngroups)
self.group_nsubs[locs] = np.fromfile(f, dtype=np.uint32, count=ngroups)
self.group_firstsub[locs] = np.fromfile(f, dtype=np.uint32, count=ngroups)
skip_gr += ngroups
if nsubs > 0:
locs = slice(skip_sub, skip_sub + nsubs)
self.sub_len[locs] = np.fromfile(f, dtype=np.uint32, count=nsubs)
self.sub_offset[locs] = np.fromfile(f, dtype=np.uint32, count=nsubs)
self.sub_parent[locs] = np.fromfile(f, dtype=np.uint32, count=nsubs)
self.sub_mass[locs] = np.fromfile(f, dtype=np.float32, count=nsubs)
self.sub_pos[locs] = np.fromfile(f, dtype=np.dtype((np.float32,3)), count=nsubs)
self.sub_vel[locs] = np.fromfile(f, dtype=np.dtype((np.float32,3)), count=nsubs)
self.sub_cm[locs] = np.fromfile(f, dtype=np.dtype((np.float32,3)), count=nsubs)
self.sub_spin[locs] = np.fromfile(f, dtype=np.dtype((np.float32,3)), count=nsubs)
self.sub_veldisp[locs] = np.fromfile(f, dtype=np.float32, count=nsubs)
self.sub_vmax[locs] = np.fromfile(f, dtype=np.float32, count=nsubs)
self.sub_vmaxrad[locs] = np.fromfile(f, dtype=np.float32, count=nsubs)
self.sub_halfmassrad[locs] = np.fromfile(f, dtype=np.float32, count=nsubs)
self.sub_id_mostbound[locs] = np.fromfile(f, dtype=self.id_type, count=nsubs)
self.sub_grnr[locs] = np.fromfile(f, dtype=np.uint32, count=nsubs)
if masstab:
self.sub_masstab[locs] = np.fromfile(f, dtype=np.dtype((np.float32,6)), count=nsubs)
skip_sub += nsubs
curpos = f.tell()
f.seek(0,os.SEEK_END)
if curpos != f.tell(): print "Warning: finished reading before EOF for file",filenum
f.close()
#print 'finished with file number',filenum,"of",ntask
filenum += 1
if filenum == self.nfiles: doneflag = True
if swap:
self.group_len.byteswap(True)
self.group_offset.byteswap(True)
self.group_mass.byteswap(True)
self.group_pos.byteswap(True)
self.group_m_mean200.byteswap(True)
self.group_r_mean200.byteswap(True)
self.group_m_crit200.byteswap(True)
self.group_r_crit200.byteswap(True)
self.group_m_tophat200.byteswap(True)
self.group_r_tophat200.byteswap(True)
if group_veldisp:
self.group_veldisp_mean200.byteswap(True)
self.group_veldisp_crit200.byteswap(True)
self.group_veldisp_tophat200.byteswap(True)
self.group_contamination_count.byteswap(True)
self.group_contamination_mass.byteswap(True)
self.group_nsubs.byteswap(True)
self.group_firstsub.byteswap(True)
self.sub_len.byteswap(True)
self.sub_offset.byteswap(True)
self.sub_parent.byteswap(True)
self.sub_mass.byteswap(True)
self.sub_pos.byteswap(True)
self.sub_vel.byteswap(True)
self.sub_cm.byteswap(True)
self.sub_spin.byteswap(True)
self.sub_veldisp.byteswap(True)
self.sub_vmax.byteswap(True)
self.sub_vmaxrad.byteswap(True)
self.sub_halfmassrad.byteswap(True)
self.sub_id_mostbound.byteswap(True)
self.sub_grnr.byteswap(True)
if masstab:
self.sub_masstab.byteswap(True)
#print
#print "number of groups =", self.ngroups
#print "number of subgroups =", self.nsubs
#if self.nsubs > 0:
# print "largest group of length",self.group_len[0],"has",self.group_nsubs[0],"subhalos"
# print
# code for reading Subfind's ID files
# usage e.g.:
#
# import readsubf
# ids = readsubf.subf_ids("./m_10002_h_94_501_z3_csf/", 0, 100)
class subf_ids:
def __init__(self, basedir, snapnum, substart, sublen, swap = False, verbose = False, long_ids = False, read_all = False):
self.filebase = basedir + "/groups_" + str(snapnum).zfill(3) + "/subhalo_ids_" + str(snapnum).zfill(3) + "."
if (verbose):
print
print "reading subhalo IDs for snapshot",snapnum,"of",basedir
if long_ids: self.id_type = np.uint64
else: self.id_type = np.uint32
filenum = 0
doneflag = False
count=substart
found=0
while not doneflag:
curfile = self.filebase + str(filenum)
if (not os.path.exists(curfile)):
print "file not found:", curfile
sys.exit()
f = open(curfile,'rb')
Ngroups = np.fromfile(f, dtype=np.uint32, count=1)[0]
TotNgroups = np.fromfile(f, dtype=np.uint32, count=1)[0]
NIds = np.fromfile(f, dtype=np.uint32, count=1)[0]
TotNids = np.fromfile(f, dtype=np.uint64, count=1)[0]
NTask = np.fromfile(f, dtype=np.uint32, count=1)[0]
Offset = np.fromfile(f, dtype=np.uint32, count=1)[0]
if read_all:
substart=0
sublen=TotNids
if swap:
Ngroups = Ngroups.byteswap()
TotNgroups = TotNgroups.byteswap()
NIds = NIds.byteswap()
TotNids = TotNids.byteswap()
NTask = NTask.byteswap()
Offset = Offset.byteswap()
if filenum == 0:
if (verbose):
print "Ngroups = ", Ngroups
print "TotNgroups = ", Ngroups
print "NIds = ", NIds
print "TotNids = ", TotNids
print "NTask = ", NTask
print "Offset = ", Offset
self.nfiles = NTask
self.SubLen=sublen
self.SubIDs = np.empty(sublen, dtype=self.id_type)
if count <= Offset+NIds:
nskip = count - Offset
nrem = Offset + NIds - count
if sublen > nrem:
n_to_read = nrem
else:
n_to_read = sublen
if n_to_read > 0:
if (verbose):
print filenum, n_to_read
if nskip > 0:
dummy=np.fromfile(f, dtype=self.id_type, count=nskip)
if (verbose):
print dummy
locs = slice(found, found + n_to_read)
dummy2 = np.fromfile(f, dtype=self.id_type, count=n_to_read)
if (verbose):
print dummy2
self.SubIDs[locs]=dummy2
found += n_to_read
count += n_to_read
sublen -= n_to_read
f.close()
filenum += 1
if filenum == self.nfiles: doneflag = True
if swap:
self.SubIDs.byteswap(True)