@@ -131,10 +131,9 @@ on LUMI:
131131```
132132. /projappl/project_462000752/intel/oneapi/setvars.sh --include-intel-llvm
133133
134- module load LUMI/22.08
134+ module load LUMI
135135module load partition/G
136- module load rocm/5.3.3
137- module load cce/16.0.1
136+ module load rocm/6.0.3
138137export MPICH_GPU_SUPPORT_ENABLED=1 # Needed for GPU aware MPI
139138```
140139After this one can load other modules that might be needed for compiling the codes. With the environment set-up we can compile and run the SYCL codes.
@@ -162,13 +161,12 @@ module load openmpi/4.1.2-cuda # This is neeeded for using CUDA aware MPI
162161```
163162on LUMI:
164163```
165- module load LUMI/22.08
164+ module load LUMI
166165module load partition/G
167- module load rocm/5.3.3
168- module load cce/16.0.1
166+ module load rocm/6.0.3
169167export MPICH_GPU_SUPPORT_ENABLED=1
170- export LD_LIBRARY_PATH=/appl/lumi/SW/LUMI-22.08/G/EB/Boost/1.79.0-cpeCray-22.08/lib:$LD_LIBRARY_PATH
171- export LD_PRELOAD=/pfs/lustrep4/appl/lumi/SW/LUMI-22.08/G/EB/rocm/5.3.3/llvm/lib/libomp.so
168+ # export LD_LIBRARY_PATH=/appl/lumi/SW/LUMI-22.08/G/EB/Boost/1.79.0-cpeCray-22.08/lib:$LD_LIBRARY_PATH ???
169+ # export LD_PRELOAD=/pfs/lustrep4/appl/lumi/SW/LUMI-22.08/G/EB/rocm/5.3.3/llvm/lib/libomp.so ??????
172170```
173171
174172```
@@ -205,10 +203,9 @@ Similarly on LUMI. First we set up the envinronment and load the modules as indi
205203```
206204. /projappl/project_462000752/intel/oneapi/setvars.sh --include-intel-llvm
207205
208- module load LUMI/22.08
206+ module load LUMI
209207module load partition/G
210- module load rocm/5.3.3
211- module load cce/16.0.1
208+ module load rocm/6.0.3
212209export MPICH_GPU_SUPPORT_ENABLED=1
213210```
214211Now compile with intel compilers:
@@ -218,7 +215,7 @@ icpx -fsycl -fsycl-targets=amdgcn-amd-amdhsa,spir64_x86_64 -Xsycl-target-backend
218215```
219216Or with AdaptiveCpp:
220217```
221- export LD_PRELOAD=/pfs/lustrep4/appl/lumi/SW/LUMI-22.08/G/EB/rocm/5.3.3/llvm/lib/libomp.so
218+ # export LD_PRELOAD=/pfs/lustrep4/appl/lumi/SW/LUMI-22.08/G/EB/rocm/5.3.3/llvm/lib/libomp.so
222219/projappl/project_462000752/AdaptiveCpp/bin/acpp -O3 `CC --cray-print-opts=cflags` <sycl_mpi_code>.cpp `CC --cray-print-opts=libs`
223220```
224221
@@ -233,7 +230,7 @@ The `job.sh` file contains all the necessary information (number of nodes, tasks
233230
234231Use [ ` SYCL_PI_TRACE ` ] ( https://intel.github.io/llvm-docs/EnvironmentVariables.html#sycl-pi-trace-options ) to enable runtime tracing (e.g. device discovery):
235232
236- export SYCL_PI_TRACE =1
233+ export SYCL_UR_TRACE =1
237234
238235
239236### Running on Mahti
@@ -272,7 +269,7 @@ single GPU with single MPI task and a single thread use:
272269#SBATCH --job-name=example
273270#SBATCH --account=project_2012125
274271#SBATCH --partition=gpusmall
275- #SBATCH --reservation=hlgp-gpu-f2024-thu
272+ #SBATCH --reservation=hlgp-gpu-f2024-thu ?????????
276273#SBATCH --nodes=1
277274#SBATCH --ntasks=1
278275#SBATCH --time=00:05:00
@@ -297,7 +294,7 @@ LUMI is similar to Mahti.
297294#SBATCH --job-name=example
298295#SBATCH --account=project_462000752
299296#SBATCH --partition=standard
300- ##SBATCH --reservation=hlgp-cpu-f2024 # The reservation does not work
297+ ##SBATCH --reservation=hlgp-cpu-f2024 ?????? # The reservation does not work
301298#SBATCH --time=00:05:00
302299#SBATCH --nodes=1
303300#SBATCH --ntasks-per-node=1
@@ -314,7 +311,7 @@ srun my_cpu_exe
314311#SBATCH --job-name=example
315312#SBATCH --account=project_462000752
316313#SBATCH --partition=standard-g
317- #SBATCH --reservation=hlgp-gpu-f2024
314+ #SBATCH --reservation=hlgp-gpu-f2024 ??????
318315#SBATCH --time=00:05:00
319316#SBATCH --nodes=1
320317#SBATCH --ntasks-per-node=1
0 commit comments