diff --git a/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-examples/CMakeLists.txt b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-examples/CMakeLists.txt new file mode 100755 index 0000000000..7e9b0782df --- /dev/null +++ b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-examples/CMakeLists.txt @@ -0,0 +1,24 @@ +if(UNIX) + if(DEFINED CUDA AND(NOT(CUDA EQUAL 0))) + set(CMAKE_CXX_COMPILER clang++) + else() + # Direct CMake to use icpx rather than the default C++ compiler/linker + set(CMAKE_CXX_COMPILER icpx) + endif() +else() # Windows + # Force CMake to use icx-cl rather than the default C++ compiler/linker + # (needed on Windows only) + include (CMakeForceCompiler) + CMAKE_FORCE_CXX_COMPILER (icx-cl IntelDPCPP) + include (Platform/Windows-Clang) +endif() + +cmake_minimum_required (VERSION 3.4) + +project(VectorAdditionSamples CXX) + +set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}) +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}) +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}) + +add_subdirectory (src) diff --git a/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-examples/License.txt b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-examples/License.txt new file mode 100644 index 0000000000..e63c6e13dc --- /dev/null +++ b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-examples/License.txt @@ -0,0 +1,7 @@ +Copyright Intel Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-examples/README.md b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-examples/README.md new file mode 100644 index 0000000000..a709e3b561 --- /dev/null +++ b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-examples/README.md @@ -0,0 +1,13 @@ +Vector addition showing different ways to express conditionals. + +On Intel devcloud run: + qsub build-devcloud.sh + +A script is provided for the NERSC perlmutter machine (using NVIDIA GPUs), +you can use: + bash build-perlmutter.sh + +It should be easy to modify for local installations as long as you have a +version of clang++ that is build for SYCL/CUDA. + +Similar methods should work for SYCL/HIP. diff --git a/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-examples/build-devcloud.sh b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-examples/build-devcloud.sh new file mode 100755 index 0000000000..335ef7b066 --- /dev/null +++ b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-examples/build-devcloud.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +#PBS -l nodes=1:gpu:ppn=2 +#PBS -d . + +#source /opt/intel/oneapi/setvars.sh +rm -rf build +mkdir build +cd build +cmake .. +make + +echo "Running on gpu" +ONEAPI_DEVICE_SELECTOR=level_zero:gpu ./vector-addition-examples +echo "Running on cpu" +ONEAPI_DEVICE_SELECTOR=level_zero:cpu ./vector-addition-examples + +echo "Expected: Sum: 63661.5; Sum neg: -13185.3; Sum pos: 76847.3; checksum: -0.432617" diff --git a/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-examples/build-perlmutter.sh b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-examples/build-perlmutter.sh new file mode 100755 index 0000000000..c29de95ea0 --- /dev/null +++ b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-examples/build-perlmutter.sh @@ -0,0 +1,17 @@ +#!/bin/bash + + +export DPCPP_ROOT=$PSCRATCH/llvm-build/install +export NV_HOME=/opt/nvidia/hpc_sdk/Linux_x86_64/22.7/cuda/11.7 +export PATH=$DPCPP_ROOT/bin:$PATH +export LD_LIBRARY_PATH=$DPCPP_ROOT/lib:$NV_HOME/lib64:$LD_LIBRARY_PATH +rm -rf build +mkdir build +cd build +cmake .. -DCUDA=1 -DNV_HOME=$NV_HOME +make + +## Need to run this under sbatch or salloc with your credentials +#echo "Running on gpu" +#ONEAPI_DEVICE_SELECTOR=cuda:0 ./vector-addition-examples +echo "Expected: Sum: 63661.5; Sum neg: -13185.3; Sum pos: 76847.3; checksum: -0.432617" diff --git a/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-examples/sample.json b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-examples/sample.json new file mode 100644 index 0000000000..6c38d71b54 --- /dev/null +++ b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-examples/sample.json @@ -0,0 +1,68 @@ +{ + "guid": "62EF3FF6-B286-4D44-8F85-9329AF3FC403", + "name": "Base: Vector Addition Samples", + "categories": ["Toolkit/Get Started", "Toolkit/oneAPI Direct Programming/DPC++/Dense Linear Algebra", "Toolkit/oneAPI Tools/Advisor"], + "description": "This sample shows different ways to express conditionals", + "toolchain": ["icpx"], + "languages": [{"cpp": {"properties": {"projectOptions": [{"projectType": "makefile"}]}}}], + "targetDevice": ["CPU", "GPU"], + "os": ["linux"], + "builder": ["make"], + "ciTests": { + "linux": [ + { + "id": "default", + "steps": [ + "mkdir build", + "cd build", + "cmake ..", + "make", + "./vector-addition-examples", + "make clean" + ] + }, + { + "id": "cpu", + "env": [ + "export SYCL_DEVICE_TYPE=CPU" + ], + "steps": [ + "mkdir build", + "cd build", + "cmake ..", + "make", + "./vector-addition-examples", + "make clean" + ] + }, + { + "id": "gpu", + "env": [ + "export SYCL_DEVICE_TYPE=GPU" + ], + "steps": [ + "mkdir build", + "cd build", + "cmake ..", + "make", + "./vector-addition-examples", + "make clean" + ] + }, + { + "id": "HOST", + "env": [ + "export SYCL_DEVICE_TYPE=HOST" + ], + "steps": [ + "mkdir build", + "cd build", + "cmake ..", + "make", + "./vector-addition-examples", + "make clean" + ] + } + }, + "expertise": "Getting Started" +} diff --git a/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-examples/src/CMakeLists.txt b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-examples/src/CMakeLists.txt new file mode 100755 index 0000000000..2d83fff498 --- /dev/null +++ b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-examples/src/CMakeLists.txt @@ -0,0 +1,36 @@ +# This is a Windows-specific flag that enables exception handling in host code +if(WIN32) + set(WIN_FLAG "/EHsc") +endif() +set(SOURCE_FILE vector-addition-examples.cpp) +set(TARGET_NAME vector-addition-examples) + +# +# SECTION 1 +# This section defines rules to create a cpu-gpu make target +# This can safely be removed if your project is only targetting FPGAs +# + +if(DEFINED CUDA AND(NOT(CUDA EQUAL 0))) + string(CONCAT COMPILE_FLAGS + "-fsycl -fsycl-targets=nvptx64-nvidia-cuda" + " -DSYCL2020_DISABLE_DEPRECATION_WARNINGS" + " --cuda-path=${NV_HOME}" + " -Wall" + ) + set(LINK_FLAGS + "-fsycl -fsycl-targets=nvptx64-nvidia-cuda --cuda-path=${NV_HOME}") +else() + set(COMPILE_FLAGS "-fsycl -Wall ${WIN_FLAG}") + set(LINK_FLAGS "-fsycl") +endif() + +add_executable(${TARGET_NAME} ${SOURCE_FILE}) +set_target_properties(${TARGET_NAME} PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS}") +set_target_properties(${TARGET_NAME} PROPERTIES LINK_FLAGS "${LINK_FLAGS}") +add_custom_target(cpu-gpu DEPENDS ${TARGET_NAME}) + +# +# End of SECTION 1 +# + diff --git a/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-examples/src/vector-addition-examples.cpp b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-examples/src/vector-addition-examples.cpp new file mode 100644 index 0000000000..ca92a16015 --- /dev/null +++ b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-examples/src/vector-addition-examples.cpp @@ -0,0 +1,130 @@ +/* Demonstrate conditionals using vector addition */ + +#include + +#include + +using namespace cl; + +/* Base vector add function. */ +void vecAdd(const float* a, const float* b, float* c, size_t id) { + c[id] = a[id] + b[id]; +} + +/* Masked variant where the store is hidden behind a runtime branch. */ +void vecAddMasked(const float* a, const float* b, float* c, size_t id) { + float v = a[id] + b[id]; + if (v < 0.0f) { + c[id] = v; + } +} + +/* Variant where the variable value is predicated on a branch. */ +void vecAddPredicated(const float* a, const float* b, float* c, size_t id) { + float v = a[id] + b[id]; + if (v < 0.0f) { + v = 0.0f; + } + c[id] = v; +} + +class VecAddKernel; +class VecAddKernelMasked; +class VecAddKernelPredicated; + +void zeroBuffer(sycl::buffer b) { + static constexpr auto dwrite = sycl::access::mode::discard_write; + auto h = b.get_access(); + for (auto i = 0u; i < b.get_range()[0]; i++) { + h[i] = 0.f; + } +} + +double sumBuffer(sycl::buffer b) { + static constexpr auto read = sycl::access::mode::read; + auto h = b.get_access(); + auto sum = 0.0f; + for (auto i = 0u; i < b.get_range()[0]; i++) { + sum += h[i]; + } + return sum; +} + +/* This sample shows three different vector addition functions. It + * is possible to inspect the IR generated by these samples using LLVM + * tooling to compare the different approaches. + * The general flow is that the output buffer is zeroed, the calculation + * scheduled, then the sum printed for each of the functions. */ +int main(int argc, char* argv[]) { + static constexpr auto read = sycl::access::mode::read; + static constexpr auto write = sycl::access::mode::write; + static constexpr auto dwrite = sycl::access::mode::discard_write; + constexpr const size_t N = 100000; + const double PI = 3.14159; + const double ival = PI / N; + const sycl::range<1> VecSize{N}; + + double sumall, sumneg, sumpos; + + sycl::buffer bufA{VecSize}; + sycl::buffer bufB{VecSize}; + sycl::buffer bufC{VecSize}; + + { + auto h_a = bufA.get_access(); + auto h_b = bufB.get_access(); + for (auto i = 0u; i < N; i++) { + const double val = i * ival - (PI / 2); + h_a[i] = sin(val); + h_b[i] = cos(val); + } + } + + sycl::queue myQueue; + + { + zeroBuffer(bufC); + auto cg = [&](sycl::handler& h) { + auto a = bufA.get_access(h); + auto b = bufB.get_access(h); + auto c = bufC.get_access(h); + + h.parallel_for( + VecSize, [=](sycl::id<1> i) { vecAdd(&a[0], &b[0], &c[0], i[0]); }); + }; + myQueue.submit(cg); + sumall = sumBuffer(bufC); + } + { + zeroBuffer(bufC); + auto cg = [&](sycl::handler& h) { + auto a = bufA.get_access(h); + auto b = bufB.get_access(h); + auto c = bufC.get_access(h); + + h.parallel_for(VecSize, [=](sycl::id<1> i) { + vecAddMasked(&a[0], &b[0], &c[0], i[0]); + }); + }; + myQueue.submit(cg); + sumneg = sumBuffer(bufC); + } + { + zeroBuffer(bufC); + auto cg = [&](sycl::handler& h) { + auto a = bufA.get_access(h); + auto b = bufB.get_access(h); + auto c = bufC.get_access(h); + + h.parallel_for(VecSize, [=](sycl::id<1> i) { + vecAddPredicated(&a[0], &b[0], &c[0], i[0]); + }); + }; + myQueue.submit(cg); + sumpos = sumBuffer(bufC); + } + std::cout << "Sum: " << sumall << "; Sum neg: " << sumneg << "; Sum pos: " << + sumpos << "; checksum: " << sumall - (sumneg + sumpos) << "\n"; + + return 0; +} diff --git a/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-tiled/CMakeLists.txt b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-tiled/CMakeLists.txt new file mode 100755 index 0000000000..3c3211dab6 --- /dev/null +++ b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-tiled/CMakeLists.txt @@ -0,0 +1,24 @@ +if(UNIX) + if(DEFINED CUDA AND(NOT(CUDA EQUAL 0))) + set(CMAKE_CXX_COMPILER clang++) + else() + # Direct CMake to use icpx rather than the default C++ compiler/linker + set(CMAKE_CXX_COMPILER icpx) + endif() +else() # Windows + # Force CMake to use icx-cl rather than the default C++ compiler/linker + # (needed on Windows only) + include (CMakeForceCompiler) + CMAKE_FORCE_CXX_COMPILER (icx-cl IntelDPCPP) + include (Platform/Windows-Clang) +endif() + +cmake_minimum_required (VERSION 3.4) + +project(VectorAdditionTiled CXX) + +set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}) +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}) +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}) + +add_subdirectory (src) diff --git a/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-tiled/License.txt b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-tiled/License.txt new file mode 100644 index 0000000000..e63c6e13dc --- /dev/null +++ b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-tiled/License.txt @@ -0,0 +1,7 @@ +Copyright Intel Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-tiled/README.md b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-tiled/README.md new file mode 100644 index 0000000000..219914690a --- /dev/null +++ b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-tiled/README.md @@ -0,0 +1,27 @@ +Tiled vector addition demonstrating use of local accessors. +Note that the local accessors use a syntax that is deprecated in SYCL 2020. +Intel dpcpp surpresses warning messages. + +On Intel devcloud run: + qsub build-devcloud.sh + +A script is provided for the NERSC perlmutter machine (using NVIDIA GPUs). +Currently it expects that the compiler has been build with CUDA support +and installed in $PSCRATCH/llvm-build/install (there will be perlmutter +modules with prebuilt compilers eventually). + + bash build-perlmutter.sh + +This will only build the sample. To run it you will need to allocate a GPU +node, e.g. with + + salloc -A -C gpu -q interactive -t 10:00 -n 1 + +Then you can run the binary with: + + ONEAPI_DEVICE_SELECTOR=cuda:0 ./vector-addition-tiled + +It should be easy to modify the script local installations as long as you have a +version of clang++ that is build for SYCL/CUDA. + +Similar methods should work for SYCL/HIP. diff --git a/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-tiled/build-devcloud.sh b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-tiled/build-devcloud.sh new file mode 100755 index 0000000000..e4a0e88688 --- /dev/null +++ b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-tiled/build-devcloud.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +#PBS -l nodes=1:gpu:ppn=2 +#PBS -d . + +#source /opt/intel/oneapi/setvars.sh +rm -rf build +mkdir build +cd build +cmake .. +make + +echo "Running on gpu" +ONEAPI_DEVICE_SELECTOR=level_zero:gpu ./vector-addition-tiled +echo "Running on cpu" +ONEAPI_DEVICE_SELECTOR=level_zero:cpu ./vector-addition-tiled + +echo "expected result: 128000" diff --git a/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-tiled/build-perlmutter.sh b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-tiled/build-perlmutter.sh new file mode 100755 index 0000000000..683efe7709 --- /dev/null +++ b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-tiled/build-perlmutter.sh @@ -0,0 +1,18 @@ +#!/bin/bash + + +export DPCPP_ROOT=$PSCRATCH/llvm-build/install +export NV_HOME=/opt/nvidia/hpc_sdk/Linux_x86_64/22.7/cuda/11.7 +export PATH=$DPCPP_ROOT/bin:$PATH +export LD_LIBRARY_PATH=$DPCPP_ROOT/lib:$NV_HOME/lib64:$LD_LIBRARY_PATH +rm -rf build +mkdir build +cd build +cmake .. -DCUDA=1 -DNV_HOME=$NV_HOME +make + +## Need to run this under sbatch or salloc with your credentials +#echo "Running on gpu" +#ONEAPI_DEVICE_SELECTOR=cuda:0 ./vector-addition-tiled + +#echo "expected result: 128000" diff --git a/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-tiled/sample.json b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-tiled/sample.json new file mode 100644 index 0000000000..67fcd017c2 --- /dev/null +++ b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-tiled/sample.json @@ -0,0 +1,68 @@ +{ + "guid": "161F74A8-65E4-4844-B3FD-56E494660E7B", + "name": "Base: Vector Addition Tiled", + "categories": ["Toolkit/Get Started", "Toolkit/oneAPI Direct Programming/DPC++/Dense Linear Algebra", "Toolkit/oneAPI Tools/Advisor"], + "description": "This sample demonstrates the use of local accessors", + "toolchain": ["icpx"], + "languages": [{"cpp": {"properties": {"projectOptions": [{"projectType": "makefile"}]}}}], + "targetDevice": ["CPU", "GPU"], + "os": ["linux"], + "builder": ["make"], + "ciTests": { + "linux": [ + { + "id": "default", + "steps": [ + "mkdir build", + "cd build", + "cmake ..", + "make", + "./vector-add-buffers", + "make clean" + ] + }, + { + "id": "cpu", + "env": [ + "export SYCL_DEVICE_TYPE=CPU" + ], + "steps": [ + "mkdir build", + "cd build", + "cmake ..", + "make", + "./vector-add-buffers", + "make clean" + ] + }, + { + "id": "gpu", + "env": [ + "export SYCL_DEVICE_TYPE=GPU" + ], + "steps": [ + "mkdir build", + "cd build", + "cmake ..", + "make", + "./vector-add-buffers", + "make clean" + ] + }, + { + "id": "HOST", + "env": [ + "export SYCL_DEVICE_TYPE=HOST" + ], + "steps": [ + "mkdir build", + "cd build", + "cmake ..", + "make", + "./vector-add-buffers", + "make clean" + ] + } + }, + "expertise": "Getting Started" +} diff --git a/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-tiled/src/CMakeLists.txt b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-tiled/src/CMakeLists.txt new file mode 100755 index 0000000000..ec972fd99d --- /dev/null +++ b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-tiled/src/CMakeLists.txt @@ -0,0 +1,41 @@ +# This is a Windows-specific flag that enables exception handling in host code +if(WIN32) + set(WIN_FLAG "/EHsc") +endif() +set(SOURCE_FILE vector-addition-tiled.cpp) +set(TARGET_NAME vector-addition-tiled) + +# +# SECTION 1 +# This section defines rules to create a cpu-gpu make target +# This can safely be removed if your project is only targetting FPGAs +# + +if(DEFINED CUDA AND(NOT(CUDA EQUAL 0))) + string(CONCAT COMPILE_FLAGS + "-fsycl -fsycl-targets=nvptx64-nvidia-cuda" + " -DSYCL2020_DISABLE_DEPRECATION_WARNINGS" + " --cuda-path=${NV_HOME}" + " -Wall" + ) + set(LINK_FLAGS + "-fsycl -fsycl-targets=nvptx64-nvidia-cuda --cuda-path=${NV_HOME}") +else() + set(COMPILE_FLAGS "-fsycl -Wall ${WIN_FLAG}") + set(LINK_FLAGS "-fsycl") +endif() + +# To compile in a single command: +# icpx -fsycl .cpp -o .fpga_emu +# CMake executes: +# [compile] icpx -fsycl -o .cpp.o -c .cpp +# [link] icpx -fsycl .cpp.o -o .fpga_emu +add_executable(${TARGET_NAME} ${SOURCE_FILE}) +set_target_properties(${TARGET_NAME} PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS}") +set_target_properties(${TARGET_NAME} PROPERTIES LINK_FLAGS "${LINK_FLAGS}") +add_custom_target(cpu-gpu DEPENDS ${TARGET_NAME}) + +# +# End of SECTION 1 +# + diff --git a/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-tiled/src/vector-addition-tiled.cpp b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-tiled/src/vector-addition-tiled.cpp new file mode 100644 index 0000000000..e2bdf8ed76 --- /dev/null +++ b/DirectProgramming/DPC++/DenseLinearAlgebra/vector-addition-tiled/src/vector-addition-tiled.cpp @@ -0,0 +1,134 @@ +/* + * Codeplay's ComputeCpp SDK + * + * vector-addition-tiled.cpp + * + * Description: + * Samples of tiled vector addition kernels + * + **************************************************************************/ + +#include + +#include + +using namespace cl; + +/* Loads float data from a and b into tile pointers. In this sample, the + * tile pointers point to local memory. */ +void loadTiles(const float* a, const float* b, float* tile1, float* tile2, + size_t id, size_t tile_i) { + tile1[tile_i] = a[id]; + tile2[tile_i] = b[id]; +} + +/* Sums the values from local memory. */ +void vecAdd(float* tile1, float* tile2, size_t tile_i) { + tile1[tile_i] += tile2[tile_i]; +} + +/* In this sample, loads from local to store back to global memory. */ +void storeTile(float* c, float* tile1, size_t id, size_t tile_i) { + c[id] = tile1[tile_i]; +} + +class TiledVecAdd; +class TiledVecAddDMA; + +/* First computes sum via normal tiled load, then by DMA. */ +int main(int argc, char* argv[]) { + constexpr const size_t N = 128000; // this is the total vector size + constexpr const size_t T = 32; // this is the tile size + static constexpr auto read = sycl::access::mode::read; + static constexpr auto write = sycl::access::mode::write; + static constexpr auto dwrite = sycl::access::mode::discard_write; + const sycl::range<1> VecSize{N}; + const sycl::range<1> TileSize{T}; + + sycl::queue myQueue; + auto context = myQueue.get_context(); + + sycl::buffer bufA{VecSize}; + sycl::buffer bufB{VecSize}; + sycl::buffer bufC{VecSize}; + + { + auto h_a = bufA.get_access(); + auto h_b = bufB.get_access(); + for (auto i = 0u; i < N; i++) { + h_a[i] = sin(i) * sin(i); + h_b[i] = cos(i) * cos(i); + } + } + + { + auto cg = [&](sycl::handler& h) { + auto a = bufA.get_access(h); + auto b = bufB.get_access(h); + auto c = bufC.get_access(h); + sycl::local_accessor tile1(TileSize, h); + sycl::local_accessor tile2(TileSize, h); + + h.parallel_for( + sycl::nd_range<1>(VecSize, TileSize), [=](sycl::nd_item<1> i) { + loadTiles(&a[0], &b[0], &tile1[0], &tile2[0], i.get_global_id(0), + i.get_local_id(0)); + i.barrier(); + vecAdd(&tile1[0], &tile2[0], i.get_local_id(0)); + i.barrier(); + storeTile(&c[0], &tile1[0], i.get_global_id(0), i.get_local_id(0)); + }); + }; + myQueue.submit(cg); + } + + { + auto h_c = bufC.get_access(); + float sum = 0.0f; + for (auto i = 0u; i < N; i++) { + sum += h_c[i]; + } + std::cout << "total result: " << sum << std::endl; + } + + { + auto cg = [&](sycl::handler& h) { + auto a = bufA.get_access(h); + auto b = bufB.get_access(h); + auto c = bufC.get_access(h); + sycl::local_accessor tile1(TileSize, h); + sycl::local_accessor tile2(TileSize, h); + + h.parallel_for( + sycl::nd_range<1>(VecSize, TileSize), [=](sycl::nd_item<1> i) { + auto event1 = i.async_work_group_copy( + tile1.get_pointer(), a.get_pointer() + i.get_global_id(0), + TileSize[0]); + auto event2 = i.async_work_group_copy( + tile2.get_pointer(), b.get_pointer() + i.get_global_id(0), + TileSize[0]); + i.wait_for(event1, event2); + i.barrier(); + vecAdd(&tile1[0], &tile2[0], i.get_local_id(0)); + i.barrier(); + auto event3 = + i.async_work_group_copy(c.get_pointer() + i.get_global_id(0), + tile1.get_pointer(), TileSize[0]); + i.wait_for(event3); + i.barrier(); + }); + }; + myQueue.submit(cg); + } + + { + auto h_c = bufC.get_access(); + float sum = 0.0f; + for (auto i = 0u; i < N; i++) { + sum += h_c[i]; + } + std::cout << "total result: " << sum << std::endl; + } + + return 0; +}