Update cosmotool 2nd part

This commit is contained in:
Guilhem Lavaux 2018-07-19 15:11:23 +03:00
parent 64e05fc180
commit 003bc39d4a
70 changed files with 8708 additions and 0 deletions

View file

@ -0,0 +1,23 @@
from ._cosmotool import *
from ._project import *
from ._cosmo_power import *
from ._cosmo_cic import *
from ._fast_interp import *
from .grafic import writeGrafic, writeWhitePhase, readGrafic, readWhitePhase
from .borg import read_borg_vol
from .cic import cicParticles
try:
import pyopencl
from .cl_cic import cl_CIC_Density
except:
print("No opencl support")
from .simu import loadRamsesAll, simpleWriteGadget, SimulationBare
from .timing import time_block, timeit, timeit_quiet
from .bispectrum import bispectrum, powerspectrum
from .smooth import smooth_particle_density
try:
from .fftw import CubeFT
except ImportError:
print("No FFTW support")

View file

@ -0,0 +1,127 @@
import numpy as np
try:
import cffi
import os
_ffi = cffi.FFI()
_ffi.cdef("""
void CosmoTool_compute_bispectrum(
double *delta_hat, size_t Nx, size_t Ny, size_t Nz,
size_t *Ntriangles,
double* B, double delta_k, size_t Nk ) ;
void CosmoTool_compute_powerspectrum(
double *delta_hat, size_t Nx, size_t Ny, size_t Nz,
size_t *Ncounts,
double* P, double delta_k, size_t Nk );
""");
_pathlib = os.path.dirname(os.path.abspath(__file__))
_lib = _ffi.dlopen(os.path.join(_pathlib,"_cosmo_bispectrum.so"))
except Exception as e:
print(repr(e))
raise RuntimeError("Failed to initialize _cosmo_bispectrum module")
def bispectrum(delta, delta_k, Nk, fourier=True):
"""bispectrum(delta, fourier=True)
Args:
* delta: a 3d density field, can be Fourier modes if fourier set to True
Return:
* A 3d array of the binned bispectrum
"""
if len(delta.shape) != 3:
raise ValueError("Invalid shape for delta")
try:
delta_k = float(delta_k)
Nk = int(Nk)
except:
raise ValueError()
if not fourier:
delta = np.fft.rfftn(delta)
N1,N2,N3 = delta.shape
rN3 = (N3-1)*2
delta_hat_buf = np.empty((N1*N2*N3*2),dtype=np.double)
delta_hat_buf[::2] = delta.real.ravel()
delta_hat_buf[1::2] = delta.imag.ravel()
size_size = _ffi.sizeof("size_t")
if size_size == 4:
triangle_buf = np.zeros((Nk,Nk,Nk),dtype=np.int32)
elif size_size == 8:
triangle_buf = np.zeros((Nk,Nk,Nk),dtype=np.int64)
else:
raise RuntimeError("Internal error, do not know how to map size_t")
B_buf = np.zeros((Nk*Nk*Nk*2), dtype=np.double)
_lib.CosmoTool_compute_bispectrum( \
_ffi.cast("double *", delta_hat_buf.ctypes.data), \
N1, N2, rN3, \
_ffi.cast("size_t *", triangle_buf.ctypes.data), \
_ffi.cast("double *", B_buf.ctypes.data), \
delta_k, \
Nk)
B_buf = B_buf.reshape((Nk,Nk,Nk,2))
return triangle_buf, B_buf[...,0]+1j*B_buf[...,1]
def powerspectrum(delta, delta_k, Nk, fourier=True):
"""powerspectrum(delta, fourier=True)
Args:
* delta: a 3d density field, can be Fourier modes if fourier set to True
Return:
* A 3d array of the binned bispectrum
"""
if len(delta.shape) != 3:
raise ValueError("Invalid shape for delta")
try:
delta_k = float(delta_k)
Nk = int(Nk)
except:
raise ValueError()
if not fourier:
delta = np.fft.rfftn(delta)
N1,N2,N3 = delta.shape
delta_hat_buf = np.empty((N1*N2*N3*2),dtype=np.double)
delta_hat_buf[::2] = delta.real.ravel()
delta_hat_buf[1::2] = delta.imag.ravel()
size_size = _ffi.sizeof("size_t")
if size_size == 4:
count_buf = np.zeros((Nk,),dtype=np.int32)
elif size_size == 8:
count_buf = np.zeros((Nk,),dtype=np.int64)
else:
raise RuntimeError("Internal error, do not know how to map size_t")
B_buf = np.zeros((Nk,), dtype=np.double)
_lib.CosmoTool_compute_powerspectrum( \
_ffi.cast("double *", delta_hat_buf.ctypes.data), \
N1, N2, N3, \
_ffi.cast("size_t *", count_buf.ctypes.data), \
_ffi.cast("double *", B_buf.ctypes.data), \
delta_k, \
Nk)
return count_buf, B_buf[...]
if __name__=="__main__":
delta=np.zeros((16,16,16))
delta[0,0,0]=1
delta[3,2,1]=1
b = powerspectrum(delta, 1, 16, fourier=False)
a = bispectrum(delta, 1, 16, fourier=False)
print(a[0].max())

View file

@ -0,0 +1,265 @@
###
### BORG code is from J. Jasche
###
import io
import numpy as np
from numpy import *
import os.path
import array
import glob
class BorgVolume(object):
def __init__(self, density, ranges):
self.density = density
self.ranges = ranges
def build_filelist(fdir):
#builds list of all borg density fields which may be distributed over several directories
fname_0=glob.glob(fdir[0]+'initial_density_*')
fname_1=glob.glob(fdir[0]+'final_density_*')
fdir=fdir[1:] #eliminate first element
for fd in fdir:
fname_0=fname_0+glob.glob(fd+'initial_density_*')
fname_1=fname_1+glob.glob(fd+'final_density_*')
return fname_0, fname_1
def read_borg_vol(BORGFILE):
""" Reading routine for BORG data
"""
openfile=open(BORGFILE,'rb')
period=0
N0=0
N1=0
N2=0
xmin=0
xmax=0
ymin=0
ymax=0
zmin=0
zmax=0
nlines=0
while True:
line=openfile.readline()
s=line.rstrip('\n')
r=s.rsplit(' ')
if size(r)==5 :
if r[0] =="define":
if r[1]=="Lattice" :
N0=int(r[2])
N1=int(r[3])
N2=int(r[4])
if size(r)==11 :
if r[4] =="BoundingBox":
xmin=float(r[5])
xmax=float(r[6])
ymin=float(r[7])
ymax=float(r[8])
zmin=float(r[9])
zmax=float(r[10].rstrip(','))
if r[0]=='@1': break
ranges=[]
ranges.append(xmin)
ranges.append(xmax)
ranges.append(ymin)
ranges.append(ymax)
ranges.append(zmin)
ranges.append(zmax)
#now read data
data=np.fromfile(openfile, '>f4')
data=data.reshape(N2,N0,N1)
return BorgVolume(data,ranges)
def read_spec( fname ):
""" Reading routine for ARES spectrum samples
"""
x,y=np.loadtxt( fname ,usecols=(0,1),unpack=True)
return x , y
def read_bias_nmean( fname ):
""" Reading routine for ARES bias data
"""
x,b0,b1,nmean=np.loadtxt( fname ,usecols=(0,1,2,3),unpack=True)
return x , b0, b1, nmean
def read_nmean( fname ):
""" Reading routine for BORG bias data
"""
x,nmean=np.loadtxt( fname ,usecols=(0,1),unpack=True)
return x, nmean
def get_grid_values(xx,data, ranges):
""" return values at grid positions
"""
xmin=ranges[0]
xmax=ranges[1]
ymin=ranges[2]
ymax=ranges[3]
zmin=ranges[4]
zmax=ranges[5]
Lx= xmax-xmin
Ly= ymax-ymin
Lz= zmax-zmin
Nx=shape(data)[0]
Ny=shape(data)[1]
Nz=shape(data)[2]
dx=Lx/float(Nx)
dy=Ly/float(Ny)
dz=Lz/float(Nz)
idx=(xx[:,0]-xmin)/dx
idy=(xx[:,1]-ymin)/dz
idz=(xx[:,2]-zmin)/dy
idx=idx.astype(int)
idy=idy.astype(int)
idz=idz.astype(int)
idflag=np.where( (idx>-1)*(idx<Nx)*(idy>-1)*(idy<Ny)*(idz>-1)*(idz<Nz) )
flag=[False]*len(xx)
vals=[-999.]*len(xx)
flag=np.array(flag)
vals=np.array(vals)
flag[idflag]=True
vals[idflag]=data[idx[idflag],idy[idflag],idz[idflag]]
return vals,flag
def get_mean_density(fdir, smin, step):
""" estimate ensemble mean
"""
import progressbar as pb
print('-'*60)
print('Get 3D ensemble mean density field')
print('-'*60)
fname0 = fdir + 'initial_density_'+str(0)+'.dat'
fname1 = fdir + 'final_density_'+str(0)+'.dat'
MEAN0,ranges=read_borg_vol(fname0)
MEAN0=MEAN0*0.;
VAR0=copy(MEAN0)
MEAN1=copy(MEAN0)
VAR1=copy(MEAN0)
norm=0.
idat=smin
fname0 = fdir + 'initial_density_'+str(idat)+'.dat'
fname1 = fdir + 'final_density_'+str(idat)+'.dat'
#and (idat<smin+1000)
while((os.path.exists(fname0))):
auxdata0,auxranges0=read_borg_vol(fname0)
auxdata1,auxranges1=read_borg_vol(fname1)
auxx0=auxdata0
auxx1=auxdata1
MEAN0+=auxx0
VAR0+=auxx0**2
MEAN1+=auxx1
VAR1+=auxx1**2
norm+=1
idat+=step
fname0 = fdir + 'initial_density_'+str(idat)+'.dat'
fname1 = fdir + 'final_density_'+str(idat)+'.dat'
del auxranges0
del auxdata0
del auxranges1
del auxdata1
MEAN0/=norm
VAR0/=norm
VAR0-=MEAN0**2
VAR0=sqrt(fabs(VAR0))
MEAN1/=norm
VAR1/=norm
VAR1-=MEAN1**2
VAR1=sqrt(fabs(VAR1))
return MEAN0,VAR0,MEAN1,VAR1,ranges
def get_mean_density_fdir(fdir,init,steps):
""" estimate ensemble mean
"""
import progressbar as pb
print('-'*60)
print('Get 3D ensemble mean density field')
print('-'*60)
fname0,fname1=build_filelist(fdir)
fname0=fname0[init::steps]
fname1=fname1[init::steps]
borg=read_borg_vol(fname0[0])
MEAN0 = borg.density
RANGES0 = borg.ranges
MEAN0=MEAN0*0.;
VAR0=copy(MEAN0)
MEAN1=copy(MEAN0)
VAR1=copy(MEAN0)
norm0=0.
norm1=0.
for fn in pb.ProgressBar(len(fname0))(fname0):
auxborg=read_borg_vol(fn)
auxdata0 = auxborg.density
MEAN0+=auxdata0
VAR0+=auxdata0**2.
norm0+=1.
del auxdata0
del auxborg
for fn in pb.ProgressBar(len(fname1))(fname1):
auxborg1=read_borg_vol(fn)
auxdata1 = auxborg1.density
MEAN1+=auxdata1
VAR1+=auxdata1**2.
norm1+=1.
del auxdata1
del auxborg1
MEAN0/=norm0
VAR0/=norm0
VAR0-=MEAN0**2
VAR0=sqrt(fabs(VAR0))
MEAN1/=norm1
VAR1/=norm1
VAR1-=MEAN1**2
VAR1=sqrt(fabs(VAR1))
return MEAN0,VAR0,MEAN1,VAR1,ranges

View file

@ -0,0 +1,42 @@
import numexpr as ne
import numpy as np
def cicParticles(particles, L, N):
if type(N) not in [int,int]:
raise TypeError("N must be a numeric type")
def shifted(i, t):
a = np.empty(i[0].size, dtype=np.int64)
return ne.evaluate('(i2+t2)%N + N*((i1+t1)%N + N*((i0+t0)%N) )', local_dict={'i2':i[2], 't2':t[2], 'i1':i[1], 't1':t[1], 'i0':i[0], 't0':t[0], 'N':N}, out=a)
i =[]
r = []
for d in range(3):
q = ne.evaluate('(p%L)*N/L', local_dict={'p':particles[d], 'L':L, 'N':N })
o = np.empty(q.size, dtype=np.int64)
o[:] = np.floor(q)
i.append(o)
r.append(ne.evaluate('q-o'))
D = {'a':r[0],'b':r[1],'c':r[2]}
N3 = N*N*N
def accum(density, ss, op):
d0 = np.bincount(shifted(i, ss), weights=ne.evaluate(op, local_dict=D), minlength=N3)
ne.evaluate('d + d0', local_dict={'d':density, 'd0':d0}, out=density)
density = np.empty(N3, dtype=np.float64)
accum(density, (1,1,1), 'a * b * c ')
accum(density, (1,1,0), 'a * b * (1-c)')
accum(density, (1,0,1), 'a * (1-b) * c ')
accum(density, (1,0,0), 'a * (1-b) * (1-c)')
accum(density, (0,1,1), '(1-a) * b * c ')
accum(density, (0,1,0), '(1-a) * b * (1-c)')
accum(density, (0,0,1), '(1-a) * (1-b) * c ')
accum(density, (0,0,0), '(1-a) * (1-b) * (1-c)')
return density.reshape((N,N,N))

View file

@ -0,0 +1,239 @@
from .timing import time_block as time_block_orig
import numpy as np
import pyopencl as cl
import pyopencl.array as cl_array
from contextlib import contextmanager
TIMER_ACTIVE=False
@contextmanager
def time_block_dummy(*args):
yield
if TIMER_ACTIVE:
time_block=time_block_orig
else:
time_block=time_block_dummy
CIC_PREKERNEL='''
#define NDIM {ndim}
#define CENTERED {centered}
typedef {cicType} BASIC_TYPE;
'''
CIC_KERNEL='''///CL///
#pragma OPENCL EXTENSION cl_khr_global_int32_base_atomics : enable
__kernel void init_pcell(__global int *p_cell, const int value)
{
int i = get_global_id(0);
p_cell[i] = value;
}
__kernel void build_indices(__global const BASIC_TYPE *pos,
__global int *part_mesh, __global int *part_list, const int N, const BASIC_TYPE delta, const BASIC_TYPE shift_pos)
{
int i_part = get_global_id(0);
long shifter = 1;
long idx = 0;
int d;
for (d = 0; d < NDIM; d++) {
BASIC_TYPE x;
if (CENTERED)
x = pos[i_part*NDIM + d] - shift_pos;
else
x = pos[i_part*NDIM + d];
int m = (int)floor(x*delta) %% N;
idx += shifter * m;
shifter *= N;
}
// Head of the list
int initial_elt = atom_xchg(&part_mesh[idx], i_part);
if (initial_elt == -1) {
return;
}
// Point the next pointer of old_end to i_part
part_list[i_part] = initial_elt;
}
__kernel void reverse_list(__global int *part_mesh, __global int *part_list)
{
int mid = get_global_id(0);
int current_part = part_mesh[mid];
if (current_part >= 0) {
int next_part = part_list[current_part];
part_list[current_part] = -1;
while (next_part != -1) {
int p = part_list[next_part];
part_list[next_part] = current_part;
current_part = next_part;
next_part = p;
}
part_mesh[mid] = current_part;
}
}
__kernel void dance(__global const BASIC_TYPE *pos,
__global BASIC_TYPE *density,
__global int *part_mesh, __global int *part_list, const int N, const BASIC_TYPE delta, const BASIC_TYPE shift_pos)
{
int m[NDIM];
int shifter = 1;
int i;
int first, i_part;
int idx = 0;
for (i = 0; i < NDIM; i++) {
m[i] = get_global_id(i);
idx += shifter * m[i];
shifter *= N;
}
first = 1;
//BEGIN LOOPER
%(looperFor)s
//END LOOPER
int idx_dance = 0;
BASIC_TYPE w = 0;
//LOOPER INDEX
int r[NDIM] = { %(looperVariables)s };
//END LOOPER
i_part = part_mesh[idx];
while (i_part != -1) {
BASIC_TYPE w0 = 1;
for (int d = 0; d < NDIM; d++) {
BASIC_TYPE x;
BASIC_TYPE q;
BASIC_TYPE dx;
if (CENTERED)
x = pos[i_part*NDIM + d]*delta - shift_pos;
else
x = pos[i_part*NDIM + d]*delta;
q = floor(x);
dx = x - q;
w0 *= (r[d] == 1) ? dx : ((BASIC_TYPE)1-dx);
}
i_part = part_list[i_part];
w += w0;
}
shifter = 1;
for (i = 0; i < NDIM; i++) {
idx_dance += shifter * ((m[i]+r[i])%%N);
shifter *= N;
}
density[idx_dance] += w;
// One dance done. Wait for everybody for the next iteration
barrier(CLK_GLOBAL_MEM_FENCE);
%(looperForEnd)s
}
'''
class CIC_CL(object):
def __init__(self, context, ndim=2, ktype=np.float32, centered=False):
global CIC_PREKERNEL, CIC_KERNEL
translator = {}
if ktype == np.float32:
translator['cicType'] = 'float'
pragmas = ''
elif ktype == np.float64:
translator['cicType'] = 'double'
pragmas = '#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n'
else:
raise ValueError("Invalid ktype")
# 2 dimensions
translator['ndim'] = ndim
translator['centered'] = '1' if centered else '0'
looperVariables = ','.join(['id%d' % d for d in range(ndim)])
looperFor = '\n'.join(['for (int id{dim}=0; id{dim} < 2; id{dim}++) {{'.format(dim=d) for d in range(ndim)])
looperForEnd = '}' * ndim
kern = pragmas + CIC_PREKERNEL.format(**translator) + (CIC_KERNEL % {'looperVariables': looperVariables, 'looperFor': looperFor, 'looperForEnd':looperForEnd})
self.kern_code = kern
self.ctx = context
self.queue = cl.CommandQueue(context)#, properties=cl.OUT_OF_ORDER_EXEC_MODE_ENABLE)
self.ktype = ktype
self.ndim = ndim
self.prog = cl.Program(self.ctx, kern).build()
self.centered = centered
def run(self, particles, Ng, L):
assert particles.strides[1] == self.ktype().itemsize # This is C-ordering
assert particles.shape[1] == self.ndim
print("Start again")
ndim = self.ndim
part_pos = cl_array.to_device(self.queue, particles)
part_mesh = cl_array.empty(self.queue, (Ng,)*ndim, np.int32, order='C')
density = cl_array.zeros(self.queue, (Ng,)*ndim, self.ktype, order='C')
part_list = cl_array.empty(self.queue, (particles.shape[0],), np.int32, order='C')
shift_pos = 0.5*L if self.centered else 0
if True:
delta = Ng/L
with time_block("Init pcell array"):
e = self.prog.init_pcell(self.queue, (Ng**ndim,), None, part_mesh.data, np.int32(-1))
e.wait()
with time_block("Init idx array"):
e=self.prog.init_pcell(self.queue, (particles.shape[0],), None, part_list.data, np.int32(-1))
e.wait()
with time_block("Build indices"):
self.prog.build_indices(self.queue, (particles.shape[0],), None,
part_pos.data, part_mesh.data, part_list.data, np.int32(Ng), self.ktype(delta), self.ktype(shift_pos))
if True:
with time_block("Reverse list"):
lastevt = self.prog.reverse_list(self.queue, (Ng**ndim,), None, part_mesh.data, part_list.data)
# We require pmax pass, particles are ordered according to part_idx
with time_block("dance"):
self.prog.dance(self.queue, (Ng,)*ndim, None, part_pos.data, density.data, part_mesh.data, part_list.data, np.int32(Ng), self.ktype(delta), self.ktype(shift_pos))
self.queue.finish()
del part_pos
del part_mesh
del part_list
with time_block("download"):
return density.get()
def cl_CIC_Density(particles, Ngrid, Lbox, context=None, periodic=True, centered=False):
"""
cl_CIC_Density(particles (Nx3), Ngrid, Lbox, context=None, periodic=True, centered=False)
"""
if context is None:
context = cl.create_some_context()
ktype = particles.dtype
if ktype != np.float32 and ktype != np.float64:
raise ValueError("particles may only be float32 or float64")
if len(particles.shape) != 2 or particles.shape[1] != 3:
raise ValueError("particles may only be a Nx3 array")
cic = CIC_CL(context, ndim=3, centered=centered, ktype=ktype)
return cic.run(particles, Ngrid, Lbox)

View file

@ -0,0 +1 @@
install_prefix="@CMAKE_INSTALL_PREFIX@"

View file

@ -0,0 +1,211 @@
import numpy as np
from contextlib import contextmanager
class ProgrammableParticleLoad(object):
@staticmethod
def main_script(source, particles, aname="default", aux=None):
import vtk
from vtk.util import numpy_support as ns
out = source.GetOutput()
vv = vtk.vtkPoints()
assert len(particles.shape) == 2
assert particles.shape[1] == 3
vv.SetData(ns.numpy_to_vtk(np.ascontiguousarray(particles.astype(np.float64)), deep=1))
vv.SetDataTypeToDouble()
out.Allocate(1,1)
out.SetPoints(vv)
if aux is not None:
for n,a in aux:
a_vtk = ns.numpy_to_vtk(
np.ascontiguousarray(a.astype(np.float64)
),
deep=1)
a_vtk.SetName(n)
out.GetPointData().AddArray(a_vtk)
out.InsertNextCell(vtk.VTK_VERTEX, particles.shape[0], range(particles.shape[0]))
@staticmethod
def request_information(source):
pass
class ProgrammableParticleHistoryLoad(object):
@staticmethod
def main_script(source, particles, velocities=None, aname="default",addtime=False):
import vtk
from vtk.util import numpy_support as ns
out = source.GetOutput()
vv = vtk.vtkPoints()
assert len(particles.shape) == 3
assert particles.shape[2] == 3
if not velocities is None:
for i,j in zip(velocities.shape,particles.shape):
assert i==j
Ntime,Npart,_ = particles.shape
vv.SetData(ns.numpy_to_vtk(np.ascontiguousarray(particles.reshape((Ntime*Npart,3)).astype(np.float64)), deep=1))
vv.SetDataTypeToDouble()
out.Allocate(1,1)
out.SetPoints(vv)
if not velocities is None:
print("Adding velocities")
vel_vtk = ns.numpy_to_vtk(np.ascontiguousarray(velocities.reshape((Ntime*Npart,3)).astype(np.float64)), deep=1)
vel_vtk.SetName("velocities")
out.GetPointData().AddArray(vel_vtk)
if addtime:
timearray = np.arange(Ntime)[:,None].repeat(Npart, axis=1).reshape(Ntime*Npart)
timearray = ns.numpy_to_vtk(np.ascontiguousarray(timearray.astype(np.float64)), deep=1)
timearray.SetName("timearray")
out.GetPointData().AddArray(timearray)
out.InsertNextCell(vtk.VTK_VERTEX, particles.shape[0], range(particles.shape[0]))
for p in range(Npart):
out.InsertNextCell(vtk.VTK_LINE, Ntime, range(p, p + Npart*Ntime, Npart) )
@staticmethod
def request_information(source):
pass
class ProgrammableDensityLoad(object):
@staticmethod
def main_script(source, density, extents=None, aname="default"):
import vtk
from vtk.util import numpy_support
if len(density.shape) > 3:
_, Nx, Ny, Nz = density.shape
else:
Nx, Ny, Nz = density.shape
ido = source.GetOutput()
ido.SetDimensions(Nx, Ny, Nz)
if not extents is None:
origin = extents[:6:2]
spacing = (extents[1]-extents[0])/Nx, (extents[3]-extents[2])/Ny, (extents[5]-extents[4])/Nz
else:
origin = (-1, -1, -1)
spacing = 2.0 / Nx, 2.0/Ny, 2.0/Nz
ido.SetOrigin(*origin)
ido.SetSpacing(*spacing)
ido.SetExtent([0,Nx-1,0,Ny-1,0,Nz-1])
if len(density.shape) > 3 and density.shape[0] == 3:
N = Nx*Ny*Nz
density = density.transpose().astype(np.float64).reshape((N,3))
arr = numpy_support.numpy_to_vtk(density, deep=1)
else:
arr = numpy_support.numpy_to_vtk(density.transpose().astype(np.float64).ravel(), deep=1)
arr.SetName(aname)
ido.GetPointData().AddArray(arr)
@staticmethod
def request_information(source, density=None, dims=None):
import vtk
Nx = Ny = Nz = None
if not density is None:
Nx, Ny, Nz = density.shape
elif not dims is None:
Nx, Ny, Nz = dims
else:
raise ValueError("Need at least a density or dims")
source.GetExecutive().GetOutputInformation(0).Set(vtk.vtkStreamingDemandDrivenPipeline.WHOLE_EXTENT(), 0, Nx-1, 0, Ny-1, 0, Nz-1)
@staticmethod
def prepare_timesteps_info(algorithm, timesteps):
def SetOutputTimesteps(algorithm, timesteps):
executive = algorithm.GetExecutive()
outInfo = executive.GetOutputInformation(0)
outInfo.Remove(executive.TIME_STEPS())
for timestep in timesteps:
outInfo.Append(executive.TIME_STEPS(), timestep)
outInfo.Remove(executive.TIME_RANGE())
outInfo.Append(executive.TIME_RANGE(), timesteps[0])
outInfo.Append(executive.TIME_RANGE(), timesteps[-1])
SetOutputTimesteps(algorithm, timesteps)
@staticmethod
@contextmanager
def get_timestep(algorithm):
def GetUpdateTimestep(algorithm):
"""Returns the requested time value, or None if not present"""
executive = algorithm.GetExecutive()
outInfo = executive.GetOutputInformation(0)
if not outInfo.Has(executive.UPDATE_TIME_STEP()):
return None
return outInfo.Get(executive.UPDATE_TIME_STEP())
# This is the requested time-step. This may not be exactly equal to the
# timesteps published in RequestInformation(). Your code must handle that
# correctly
req_time = GetUpdateTimestep(algorithm)
output = algorithm.GetOutput()
yield req_time
# Now mark the timestep produced.
output.GetInformation().Set(output.DATA_TIME_STEP(), req_time)
def load_borg(pdo, restart_name, mcmc_name, info=False, aname="BORG"):
import h5py as h5
with h5.File(restart_name) as f:
N0 = f["/info/scalars/N0"][:]
N1 = f["/info/scalars/N1"][:]
N2 = f["/info/scalars/N2"][:]
L0 = f["/info/scalars/L0"][:]
L1 = f["/info/scalars/L1"][:]
L2 = f["/info/scalars/L2"][:]
c0 = f["/info/scalars/corner0"][:]
c1 = f["/info/scalars/corner1"][:]
c2 = f["/info/scalars/corner2"][:]
if not info:
with h5.File(mcmc_name) as f:
d = f["/scalars/BORG_final_density"][:]+1
ProgrammableDensityLoad.main_script(pdo, d, extents=[c0,c0+L0,c1,c1+L1,c2,c2+L2], aname=aname)
else:
ProgrammableDensityLoad.request_information(pdo, dims=[N0,N1,N2])
def load_borg_galaxies(pdo, restart_name, cid=0, info=False, aname="Galaxies"):
import h5py as h5
with h5.File(restart_name) as f:
gals = f['/info/galaxy_catalog_%d/galaxies' % cid]
ra = gals['phi'][:]
dec = gals['theta'][:]
r = gals['r'][:]
if not info:
x = r * np.cos(ra)*np.cos(dec)
y = r * np.sin(ra)*np.cos(dec)
z = r * np.sin(dec)
parts = np.array([x,y,z]).transpose()
ProgrammableParticleLoad.main_script(pdo, parts)

View file

@ -0,0 +1,45 @@
import pyfftw
import multiprocessing
import numpy as np
import numexpr as ne
class CubeFT(object):
def __init__(self, L, N, max_cpu=-1, width=32):
if width==32:
fourier_type='complex64'
real_type='float32'
elif width==64:
fourier_type='complex128'
real_type='float64'
else:
raise ValueError("Invalid bitwidth (must be 32 or 64)")
self.N = N
self.align = pyfftw.simd_alignment
self.L = float(L)
self.max_cpu = multiprocessing.cpu_count() if max_cpu < 0 else max_cpu
self._dhat = pyfftw.n_byte_align_empty((self.N,self.N,self.N//2+1), self.align, dtype=fourier_type)
self._density = pyfftw.n_byte_align_empty((self.N,self.N,self.N), self.align, dtype=real_type)
self._irfft = pyfftw.FFTW(self._dhat, self._density, axes=(0,1,2), direction='FFTW_BACKWARD', threads=self.max_cpu)#, normalize_idft=False)
self._rfft = pyfftw.FFTW(self._density, self._dhat, axes=(0,1,2), threads=self.max_cpu) #, normalize_idft=False)
def rfft(self):
return ne.evaluate('c*a', out=self._dhat, local_dict={'c':self._rfft(normalise_idft=False),'a':(self.L/self.N)**3}, casting='unsafe')
def irfft(self):
return ne.evaluate('c*a', out=self._density, local_dict={'c':self._irfft(normalise_idft=False),'a':(1/self.L)**3}, casting='unsafe')
def get_dhat(self):
return self._dhat
def set_dhat(self, in_dhat):
self._dhat[:] = in_dhat
dhat = property(get_dhat, set_dhat, None)
def get_density(self):
return self._density
def set_density(self, d):
self._density[:] = d
density = property(get_density, set_density, None)

View file

@ -0,0 +1,108 @@
import struct
import numpy as np
def readGrafic(filename):
"""This function reads a grafic file.
Arguments:
filename (str): the path to the grafic file
Returns:
a tuple containing:
* the array held in the grafic file
* the size of the box
* the scale factor
* the mean matter density :math:`\Omega_\mathrm{m}`
* the dark energy density :math:`\Omega_\Lambda`
* the hubble constant, relative to 100 km/s/Mpc
* xoffset
* yoffset
* zoffset
"""
with open(filename, mode="rb") as f:
p = struct.unpack("IIIIffffffffI", f.read(4*11 + 2*4))
checkPoint0, Nx, Ny, Nz, delta, xoff, yoff, zoff, scalefac, omega0, omegalambda0, h, checkPoint1 = p
if checkPoint0 != checkPoint1 or checkPoint0 != 4*11:
raise ValueError("Invalid unformatted access")
a = np.empty((Nx,Ny,Nz), dtype=np.float32)
BoxSize = delta * Nx * h
xoff *= h
yoff *= h
zoff *= h
checkPoint = 4*Ny*Nz
for i in range(Nx):
checkPoint = struct.unpack("I", f.read(4))[0]
if checkPoint != 4*Ny*Nz:
raise ValueError("Invalid unformatted access")
a[i, :, :] = np.fromfile(f, dtype=np.float32, count=Ny*Nz).reshape((Ny, Nz))
checkPoint = struct.unpack("I", f.read(4))[0]
if checkPoint != 4*Ny*Nz:
raise ValueError("Invalid unformatted access")
return a, BoxSize, scalefac, omega0, omegalambda0, h, xoff, yoff,zoff
def writeGrafic(filename, field, BoxSize, scalefac, **cosmo):
with open(filename, mode="wb") as f:
checkPoint = 4*11
Nx,Ny,Nz = field.shape
delta = BoxSize/Nx/cosmo['h']
bad = 0.0
f.write(struct.pack("IIIIffffffffI", checkPoint,
Nx, Ny, Nz,
delta,
bad, bad, bad,
scalefac,
cosmo['omega_M_0'], cosmo['omega_lambda_0'], 100*cosmo['h'], checkPoint))
checkPoint = 4*Ny*Nz
field = field.reshape(field.shape, order='F')
for i in range(Nx):
f.write(struct.pack("I", checkPoint))
f.write(field[i].astype(np.float32).tostring())
f.write(struct.pack("I", checkPoint))
def writeWhitePhase(filename, field):
with open(filename, mode="wb") as f:
Nx,Ny,Nz = field.shape
checkPoint = 4*4
f.write(struct.pack("IIIIII", checkPoint, Nx, Ny, Nz, 0, checkPoint))
field = field.reshape(field.shape, order='F')
checkPoint = struct.pack("I", 4*Ny*Nz)
for i in range(Nx):
f.write(checkPoint)
f.write(field[i].astype(np.float32).tostring())
f.write(checkPoint)
def readWhitePhase(filename):
with open(filename, mode="rb") as f:
_, Nx, Ny, Nz, _, _ = struct.unpack("IIIIII", f.read(4*4+2*4))
a = np.empty((Nx,Ny,Nz), dtype=np.float32)
checkPoint_ref = 4*Ny*Nz
for i in range(Nx):
if struct.unpack("I", f.read(4))[0] != checkPoint_ref:
raise ValueError("Invalid unformatted access")
b = np.fromfile(f, dtype=np.float32, count=Ny*Nz).reshape((Ny, Nz))
if i==0:
print(b)
a[i, : ,:] = b
if struct.unpack("I", f.read(4))[0] != checkPoint_ref:
raise ValueError("Invalid unformatted access")
return a

View file

@ -0,0 +1,154 @@
import warnings
from _cosmotool import *
class SimulationBare(PySimulationBase):
def __init__(self, *args):
if len(args) == 0:
return
if not isinstance(args[0], PySimulationBase):
raise TypeError("Simulation object to mirror must be a PySimulationBase")
s = args[0]
self.positions = [q.copy() for q in s.getPositions()] if s.getPositions() is not None else None
self.velocities = [q.copy() for q in s.getVelocities()] if s.getVelocities() is not None else None
self.identifiers = s.getIdentifiers().copy() if s.getIdentifiers() is not None else None
self.types = s.getTypes().copy() if s.getTypes() is not None else None
self.boxsize = s.getBoxsize()
self.time = s.getTime()
self.Hubble = s.getHubble()
self.Omega_M = s.getOmega_M()
self.Omega_Lambda = s.getOmega_Lambda()
try:
self.masses = s.getMasses().copy() if s.getMasses() is not None else None
except Exception as e:
warnings.warn("Unexpected exception: " + repr(e))
def merge(self, other):
def _safe_merge(a, b):
if b is not None:
if a is not None:
a = [np.append(q, r) for q,r in zip(a,b)]
else:
a = b
return a
def _safe_merge0(a, b):
if b is not None:
if a is not None:
a = np.append(a, b)
else:
a = b
return a
assert self.time == other.getTime()
assert self.Hubble == other.getHubble()
assert self.boxsize == other.getBoxsize()
assert self.Omega_M == other.getOmega_M()
assert self.Omega_Lambda == other.getOmega_Lambda()
self.positions = _safe_merge(self.positions, other.getPositions())
self.velocities = _safe_merge(self.velocities, other.getVelocities())
self.identifiers = _safe_merge0(self.identifiers, other.getIdentifiers())
self.types = _safe_merge0(self.types, other.getTypes())
try:
self.masses = _safe_merge0(self.masses, other.getMasses())
except Exception as e:
warnings.warn("Unexpected exception: " + repr(e));
self.masses = None
def getTypes(self):
return self.types
def getPositions(self):
return self.positions
def getVelocities(self):
return self.velocities
def getIdentifiers(self):
return self.identifiers
def getMasses(self):
return self.masses
def getTime(self):
return self.time
def getHubble(self):
return self.Hubble
def getBoxsize(self):
return self.boxsize
def getOmega_M(self):
return self.Omega_M
def getOmega_Lambda(self):
return self.Omega_Lambda
def simpleWriteGadget(filename, positions, boxsize=1.0, Hubble=100, Omega_M=0.30, time=1, velocities=None, identifiers=None):
s = SimulationBare()
s.positions = positions
if velocities:
s.velocities = velocities
else:
s.velocities = [np.zeros(positions[0].size,dtype=np.float32)]*3
if identifiers:
s.identifiers = identifiers
else:
s.identifiers = np.arange(positions[0].size, dtype=np.int64)
s.Hubble = Hubble
s.time = time
s.Omega_M = Omega_M
s.Omega_Lambda = 1-Omega_M
s.boxsize = boxsize
writeGadget(filename, s)
def loadRamsesAll(basepath, snapshot_id, **kwargs):
"""This function loads an entire ramses snapshot in memory. The keyword arguments are the one accepted
by cosmotool.loadRamses
Args:
basepath (str): The base path of the snapshot (i.e. the directory holding the output_XXXXX directories)
snapshot_id (int): The identifier of the snapshot to load.
Keyword args:
verboseMulti (bool): If true, print some progress information on loading multiple files
See Also:
cosmotool.loadRamses
"""
cpu_id = 0
output = None
verbose = kwargs.get('verboseMulti',False)
new_kw = dict(kwargs)
if 'verboseMulti' in new_kw:
del new_kw['verboseMulti']
while True:
base = "%s/output_%05d" % (basepath,snapshot_id)
if verbose:
print("Loading sub-snapshot %s (cpu_id=%d)" % (base,cpu_id))
s = loadRamses(base, snapshot_id, cpu_id, **new_kw)
if s == None:
break
if output == None:
output = SimulationBare(s)
else:
output.merge(s)
cpu_id += 1
return output

View file

@ -0,0 +1,109 @@
from .config import install_prefix
import subprocess
import os
try:
from tempfile import TemporaryDirectory
except:
from backports.tempfile import TemporaryDirectory
import h5py as h5
import numpy as np
import weakref
def smooth_particle_density(
position,
velocities=None,
radius=1e6,
boxsize=None,
resolution=128,
center=None, tmpprefix=None ):
"""Use adaptive smoothing to produce density and momentum fields.
The algorithm is originally described in [1].
Parameters:
position : numpy array NxQ
the particle positions
if Q==3, only positions. Q==6 means full space phase
velocities : Optional numpy array Nx3.
It is only optional if the above Q is 6.
radius : float
Maximum radius to which we need to compute fields
boxsize : float
Size of the box for the generated fields
resolution : int
Resolution of the output boxes
center : list of 3 floats
Center of the new box. It depends on the convention
for particles. If those are between [0, L], then [0,0,0]
is correct. If those are [-L/2,L/2] then you should set
[L/2,L/2,L/2].
tmpprefix : string
prefix of the temporary directory that will be used.
It needs to have a lot of space available. By default
'/tmp/ will be typically used.
Returns
-------
dictionnary
The dict has two entries: 'rho' for the density, and 'p' for the momenta.
Once the dictionary is garbage collected all temporary files and directories
will be cleared automatically.
Raises
------
ValueError
if arguments are invalid
.. [1] S. Colombi, M. Chodorowski,
"Cosmic velocity-gravity in redshift space", MNRAS, 2007, 375, 1
"""
if len(position.shape) != 2:
raise ValueError("Invalid position array shape")
if velocities is None:
if position.shape[1] != 6:
raise ValueError("Position must be phase space if no velocities are given")
if boxsize is None:
raise ValueError("Need a boxsize")
cx,cy,cz=center
tmpdir = TemporaryDirectory(prefix=tmpprefix)
h5_file = os.path.join(tmpdir.name, 'particles.h5')
with h5.File(h5_file, mode="w") as f:
data = f.create_dataset('particles', shape=(position.shape[0],7), dtype=np.float32)
data[:,:3] = position[:,:3]
if velocities is not None:
data[:,3:6] = velocities[:,:3]
else:
data[:,3:6] = position[:,3:]
data[:,6] = 1
ret = \
subprocess.run([
os.path.join(install_prefix,'bin','simple3DFilter'),
h5_file,
str(radius),
str(boxsize),
str(resolution),
str(cx), str(cy), str(cz)
], cwd=tmpdir.name)
f0 = h5.File(os.path.join(tmpdir.name,'fields.h5'), mode="r")
def cleanup_f0():
f0.close()
tmpdir.cleanup()
class Dict(dict):
pass
t = Dict(rho=f0['density'], p=[f0['p0'], f0['p1'], f0['p2']])
t._tmpdir_=tmpdir
weakref.finalize(t, cleanup_f0)
return t

View file

@ -0,0 +1,53 @@
import time
from contextlib import contextmanager
@contextmanager
def time_block(name):
"""
This generator measure the time taken by a step, and prints the result
in second to the console.
Arguments:
name (str): prefix to print
"""
ts = time.time()
yield
te = time.time()
print('%s %2.2f sec' % (name, te-ts))
def timeit(method):
"""This decorator add a timing request for each call to the decorated function.
Arguments:
method (function): the method to decorate
"""
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
print('%r (%r, %r) %2.2f sec' % (method.__name__, args, kw, te-ts))
return result
return timed
def timeit_quiet(method):
"""This decorator add a timing request for each call to the decorated function.
Same as cosmotool.timeit_ but is quieter by not printing the values of the arguments.
Arguments:
method (function): the method to decorate
"""
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
print('%r %2.2f sec' % (method.__name__, te-ts))
return result
return timed