In the first version of my code, I used the Multiprocessing library of python, applied on a main function MAIN_LOOP
on 16 threads like this :
def MAIN_LOOP(lll, seed=None):
global aa
global bb
aa, bb = 0,0
if paramo == 0:
C_ij_GG, C_ij_LL, C_ij_GL = np.zeros((len(zrange), len(zrange))), np.zeros((len(zrange), len(zrange))), np.zeros((len(zrange), len(zrange)))
C_ij_GG_up, C_ij_LL_up, C_ij_GL_up = np.zeros((len(zrange), len(zrange))), np.zeros((len(zrange), len(zrange))), np.zeros((len(zrange), len(zrange)))
C_ij_GG_dw, C_ij_LL_dw, C_ij_GL_dw = np.zeros((len(zrange), len(zrange))), np.zeros((len(zrange), len(zrange))), np.zeros((len(zrange), len(zrange)))
while aa < len(zrange):
while bb < len(zrange):
if paramo == 0:
C_ij_GG[aa][bb], C_ij_LL[aa][bb], C_ij_GL[aa][bb] = Pobs_C(zpm, zrange[aa], zrange[bb], h[2], Omega_m[2], Omega_DE[2], w0[2], wa[2], C_IA, A_IA[2], n_IA[2], B_IA[2], E_tab, R_tab, DG_tab, DG_tab, WG_tab, W_tab, WIA_tab, l[lll], P_dd_C, R_tab(z_pk))
C_ij_GG_up[aa][bb], C_ij_LL_up[aa][bb], C_ij_GL_up[aa][bb] = Pobs_C(zpm, zrange[aa], zrange[bb], h[0], Omega_m[0], Omega_DE[0], w0[0], wa[0], C_IA, A_IA[0], n_IA[0], B_IA[0], E_tab_up, R_tab_up, DG_tab, DG_tab_up, WG_tab_up, W_tab_up, WIA_tab_up, l[lll], P_dd_C_up, R_tab_up(z_pk))
C_ij_GG_dw[aa][bb], C_ij_LL_dw[aa][bb], C_ij_GL_dw[aa][bb] = Pobs_C(zpm, zrange[aa], zrange[bb], h[3], Omega_m[3], Omega_DE[3], w0[3], wa[3], C_IA, A_IA[3], n_IA[3], B_IA[3], E_tab_dw, R_tab_dw, DG_tab, DG_tab_dw, WG_tab_dw, W_tab_dw, WIA_tab_dw, l[lll], P_dd_C_dw, R_tab_dw(z_pk))
bb=bb+1
bb=0
aa=aa+1
if paramo == 0:
aa, bb = 0,0
outGG=open(pre_CC_path[0]+CC_path[2]+"/COVAR_fid_"+str(l[lll]),'w')
outLL=open(pre_CC_path[1]+CC_path[2]+"/COVAR_fid_"+str(l[lll]),'w')
outGL=open(pre_CC_path[2]+CC_path[2]+"/COVAR_fid_"+str(l[lll]),'w')
while aa < len(C_ij_GG):
while bb < len(C_ij_GG):
outGG.write(str("%.16e" % C_ij_GG[aa][bb]))
outGG.write(str(' '))
outLL.write(str("%.16e" % C_ij_LL[aa][bb]))
outLL.write(str(' '))
outGL.write(str("%.16e" % C_ij_GL[aa][bb]))
outGL.write(str(' '))
bb=bb+1
outGG.write(str('\n'))
outLL.write(str('\n'))
outGL.write(str('\n'))
bb=0
aa=aa+1
outGG.close()
outLL.close()
outGL.close()
aa, bb = 0,0
outGGU=open(pre_CC_path[0]+CC_path[0]+"/COVAR_up_"+str(l[lll]),'w')
outGGD=open(pre_CC_path[0]+CC_path[3]+"/COVAR_dw_"+str(l[lll]),'w')
outLLU=open(pre_CC_path[1]+CC_path[0]+"/COVAR_up_"+str(l[lll]),'w')
outLLD=open(pre_CC_path[1]+CC_path[3]+"/COVAR_dw_"+str(l[lll]),'w')
outGLU=open(pre_CC_path[2]+CC_path[0]+"/COVAR_up_"+str(l[lll]),'w')
outGLD=open(pre_CC_path[2]+CC_path[3]+"/COVAR_dw_"+str(l[lll]),'w')
while aa < len(C_ij_GG_up):
while bb < len(C_ij_GG_up):
outGGU.write(str("%.16e" % C_ij_GG_up[aa][bb]))
outGGU.write(str(' '))
outGGD.write(str("%.16e" % C_ij_GG_dw[aa][bb]))
outGGD.write(str(' '))
outLLU.write(str("%.16e" % C_ij_LL_up[aa][bb]))
outLLU.write(str(' '))
outLLD.write(str("%.16e" % C_ij_LL_dw[aa][bb]))
outLLD.write(str(' '))
outGLU.write(str("%.16e" % C_ij_GL_up[aa][bb]))
outGLU.write(str(' '))
outGLD.write(str("%.16e" % C_ij_GL_dw[aa][bb]))
outGLD.write(str(' '))
bb=bb+1
outGGU.write(str('\n'))
outGGD.write(str('\n'))
outLLU.write(str('\n'))
outLLD.write(str('\n'))
outGLU.write(str('\n'))
outGLD.write(str('\n'))
bb=0
aa=aa+1
outGGU.close()
outGGD.close()
outLLU.close()
outLLD.close()
outGLU.close()
outGLD.close()
lll=lll+1
lll = range(len(l))
if __name__ == '__main__':
pool = mp.Pool(16)
pool.map(MAIN_LOOP, lll)
The parallelized version is located at the end, i.e with :
if __name__ == '__main__':
pool = mp.Pool(16)
pool.map(MAIN_LOOP, lll)
Now, I am trying to use another method of optimization and I attempt to do it by GPU/OpenCL :
So, instead of this parallized multiprocessing pool
code part, I replaced by :
# NEW VERSION : with OpenCL
if __name__ == '__main__':
# GPU/OPenCL VERSION
# Select a device
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
# Kernel
prg = cl.Program(ctx, """
typedef int T;
// Extern MAIN_LOOP function
void MAIN_LOOP(__global T* in);
__kernel
void
gpu_map(__global T* in,
const size_t n)
{
unsigned gid = get_global_id(0);
// Call MAIN_LOOP with global_id
MAIN_LOOP(in[gid]);
}
""").build()
# Output compiler
os.environ['PYOPENCL_COMPILER_OUTPUT'] = '1'
# Allocate memory on the device and copy the content of our numpy array
mf = cl.mem_flags
# Get kernel function
lll_np = np.array(lll, dtype=np.uint32)
# Create input numpy
lll_g = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=lll_np)
#Get kernel function
my_knl= prg.gpu_map
my_knl.set_scalar_arg_dtypes([None, np.int32])
my_knl(queue, lll_np.shape, None, lll_g, len(lll_np))
Unfortunately, kernel program can't be built, I get the following errors :
Traceback (most recent call last):X2_non_flat_GPU_dev.py
File "X2_non_flat_GPU_OpenCL_dev.py", line 671, in <module>
""").build()
File "/Users/fab/Library/Python/2.7/lib/python/site-packages/pyopencl/__init__.py", line 510, in build
options_bytes=options_bytes, source=self._source)
File "/Users/fab/Library/Python/2.7/lib/python/site-packages/pyopencl/__init__.py", line 554, in _build_and_catch_errors
raise err
pyopencl._cl.RuntimeError: clBuildProgram failed: BUILD_PROGRAM_FAILURE - clBuildProgram failed: BUILD_PROGRAM_FAILURE - clBuildProgram failed: BUILD_PROGRAM_FAILURE
Build on <pyopencl.Device 'AMD Radeon Pro Vega 20 Compute Engine' on 'Apple' at 0x1021d00>:
Error returned by cvms_element_build_from_source
(options: -I /Users/fab/Library/Python/2.7/lib/python/site-packages/pyopencl/cl)
(source saved as /var/folders/y7/5dtgdjld5fxd3c1qm9hknlm40000gn/T/tmpg3pfTx.cl)
How can I solve these errors?
A similar bounty has bee started about the benchmarking of my code : on Different ways to optimize with GPU PyOpenCL a python code : extern function inside kernel GPU/PyOpenCL
There are more infos about the part of code which is greedy from a runtime point of view. But this bounty is more about a general idea of what is possible to find a way of optimization.
Even if your host program is written in Python using PyOpenCL, your OpenCL kernels must be written in OpenCL C or OpenCL C++. There is no way around that. Have a look at Numba if you want to write the code that runs on the GPU using Python.