Skip to content

Commit 2d48548

Browse files
authored
added module12 localmem atomics to oneAPI-essentials (#1099)
* added module12 localmem atomics to oneAPI-essentials * updated oneAPI Essentials readme and makefile with module12 * removed gamma-correction sample.json
1 parent 1e4bbc7 commit 2d48548

30 files changed

+1914
-25
lines changed

DirectProgramming/DPC++/Jupyter/oneapi-essentials-training/07_DPCPP_Library/gamma-correction/sample.json

-10
This file was deleted.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
Copyright 2020 Intel Corporation
2+
3+
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4+
5+
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6+
7+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
8+

DirectProgramming/DPC++/Jupyter/oneapi-essentials-training/12_DPCPP_Local_Memory_And_Atomics/LocalMemory_Atomics.ipynb

+853
Large diffs are not rendered by default.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
## Title
2+
SYCL Local Memory and Atomics: This is part 12 of the oneAPI essentials training series
3+
4+
## Requirements
5+
| Optimized for | Description
6+
|:--- |:---
7+
| OS | Linux* Ubuntu 18.04, 20 Windows* 10
8+
| Hardware | Skylake with GEN9 or newer
9+
| Software | Intel® oneAPI DPC++ Compiler, Jupyter Notebooks, Intel Devcloud
10+
11+
## Purpose
12+
This hands-on exercise demonstrates SYCL Atomic Operations and Local Memory Usage. You will learn how to use perform reductions using atomic operation and also learn how to use local memory to optimize for performance.
13+
14+
## License
15+
Code samples are licensed under the MIT license. See [License.txt](https://github.com/oneapi-src/oneAPI-samples/blob/master/License.txt) for details.
16+
17+
Third party program Licenses can be found here: [third-party-programs.txt](https://github.com/oneapi-src/oneAPI-samples/blob/master/third-party-programs.txt)
18+
19+
## Install Directions
20+
21+
The Jupyter notebooks are tested and can be run on Intel Devcloud.
22+
Below are the steps to access these Jupyter notebooks on Intel Devcloud
23+
1. Register on [Intel Devcloud](https://intelsoftwaresites.secure.force.com/devcloud/oneapi)
24+
2. Go to the "Terminal" in the Intel Devcloud
25+
3. Type in the below command to download the oneAPI-essentials series notebooks into your Devcloud account
26+
/data/oneapi_workshop/get_jupyter_notebooks.sh
27+
4. Navigate to oneAPI_Essentials folder and open the Welcome.ipynb, click on "DPC++ Reductions" notebook and follow the instructions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
//==============================================================
2+
// Copyright © Intel Corporation
3+
//
4+
// SPDX-License-Identifier: MIT
5+
// =============================================================
6+
#include <CL/sycl.hpp>
7+
8+
using namespace sycl;
9+
10+
static constexpr size_t N = 1024; // global size
11+
12+
int main() {
13+
queue q;
14+
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
15+
16+
auto data = malloc_shared<int>(N, q);
17+
for (int i = 0; i < N; i++) data[i] = i;
18+
auto min = malloc_shared<int>(1, q);
19+
auto max = malloc_shared<int>(1, q);
20+
min[0] = 0;
21+
max[0] = 0;
22+
23+
//# Reduction Kernel using atomics
24+
q.parallel_for(N, [=](auto i) {
25+
//# STEP 1: create atomic reference for min and max
26+
27+
//# YOUR CODE GOES HERE
28+
29+
30+
31+
32+
//# STEP 2: add atomic operation for min and max computation
33+
34+
//# YOUR CODE GOES HERE
35+
36+
37+
38+
}).wait();
39+
40+
auto mid = 0.0;
41+
//# STEP 3: Compute mid-range using the min and max
42+
43+
//# YOUR CODE GOES HERE
44+
45+
46+
47+
48+
std::cout << "Minimum = " << min[0] << "\n";
49+
std::cout << "Maximum = " << max[0] << "\n";
50+
std::cout << "Mid-Range = " << mid << "\n";
51+
52+
return 0;
53+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
//==============================================================
2+
// Copyright © Intel Corporation
3+
//
4+
// SPDX-License-Identifier: MIT
5+
// =============================================================
6+
#include <CL/sycl.hpp>
7+
8+
using namespace cl::sycl;
9+
10+
int main() {
11+
queue q;
12+
13+
//# Print the device info
14+
std::cout << "device name : " << q.get_device().get_info<info::device::name>() << "\n";
15+
std::cout << "local_mem_size: " << q.get_device().get_info<info::device::local_mem_size>() << "\n";
16+
17+
auto local_mem_type = q.get_device().get_info<info::device::local_mem_type>();
18+
if(local_mem_type == info::local_mem_type::local)
19+
std::cout << "local_mem_type: info::local_mem_type::local" << "\n";
20+
else if(local_mem_type == info::local_mem_type::global)
21+
std::cout << "local_mem_type: info::local_mem_type::global" << "\n";
22+
else if(local_mem_type == info::local_mem_type::none)
23+
std::cout << "local_mem_type: info::local_mem_type::none" << "\n";
24+
25+
return 0;
26+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,86 @@
1+
//==============================================================
2+
// Copyright © 2021 Intel Corporation
3+
//
4+
// SPDX-License-Identifier: MIT
5+
// =============================================================
6+
7+
8+
#include <CL/sycl.hpp>
9+
#include <iomanip>
10+
11+
using namespace sycl;
12+
13+
int main() {
14+
15+
size_t N = 16;
16+
std::cout << "MATRIX_SIZE : " << N << "x" << N << std::endl;
17+
18+
//# Define vectors for matrices
19+
std::vector<float> matrix_a(N*N);
20+
std::vector<float> matrix_b(N*N);
21+
std::vector<float> matrix_c(N*N);
22+
std::vector<float> matrix_d(N*N);
23+
24+
//# Initialize matrices with values
25+
float v1 = 2.f;
26+
float v2 = 3.f;
27+
for (int i=0; i<N; i++)
28+
for (int j=0; j<N; j++){
29+
matrix_a[i*N+j] = v1++;
30+
matrix_b[i*N+j] = v2++;
31+
matrix_c[i*N+j] = 0.f;
32+
matrix_d[i*N+j] = 0.f;
33+
}
34+
35+
//# Define queue with default device for offloading computation
36+
queue q;
37+
std::cout << "Offload Device : " << q.get_device().get_info<info::device::name>() << std::endl;
38+
39+
//# Create buffers for matrices
40+
buffer a(matrix_a);
41+
buffer b(matrix_b);
42+
buffer c(matrix_c);
43+
44+
//# Submit command groups to execute on device
45+
q.submit([&](handler &h){
46+
//# Create accessors to copy buffers to the device
47+
accessor A(a, h, read_only);
48+
accessor B(b, h, read_only);
49+
accessor C(c, h, write_only);
50+
51+
//# Define size for ND-range and work-group size
52+
range<2> global_size(N,N);
53+
range<2> work_group_size(N,N);
54+
55+
//# Parallel Compute Matrix Multiplication
56+
h.parallel_for(nd_range<2>{global_size, work_group_size}, [=](nd_item<2> item){
57+
const int i = item.get_global_id(0);
58+
const int j = item.get_global_id(1);
59+
60+
//# matrix multiplication computation from local memory
61+
float temp = 0.f;
62+
for (int k = 0; k < N; k++) {
63+
temp += A[i*N+k] * B[k*N+j];
64+
}
65+
C[i*N+j] = temp;
66+
});
67+
});
68+
host_accessor ha(c, read_only);
69+
70+
//# Print Output and Verification
71+
auto FAIL = 0;
72+
for (int i=0; i<N; i++){
73+
for (int j=0; j<N; j++){
74+
for(int k=0; k<N; k++){
75+
matrix_d[i*N+j] += matrix_a[i*N+k] * matrix_b[k*N+j];
76+
}
77+
if(matrix_d[i*N+j] != matrix_c[i*N+j]) FAIL = 1;
78+
std::cout << std::setw(6) << matrix_c[i*N+j] << " ";
79+
}
80+
std::cout << "\n";
81+
}
82+
if(FAIL == 1) std::cout << "FAIL\n"; else std::cout << "PASS\n";
83+
84+
return 0;
85+
}
86+
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,99 @@
1+
//==============================================================
2+
// Copyright © Intel Corporation
3+
//
4+
// SPDX-License-Identifier: MIT
5+
// =============================================================
6+
7+
8+
#include <CL/sycl.hpp>
9+
#include <iomanip>
10+
11+
using namespace sycl;
12+
13+
int main() {
14+
15+
size_t N = 16;
16+
std::cout << "MATRIX_SIZE : " << N << "x" << N << std::endl;
17+
18+
//# Define vectors for matrices
19+
std::vector<float> matrix_a(N*N);
20+
std::vector<float> matrix_b(N*N);
21+
std::vector<float> matrix_c(N*N);
22+
std::vector<float> matrix_d(N*N);
23+
24+
//# Initialize matrices with values
25+
float v1 = 2.f;
26+
float v2 = 3.f;
27+
for (int i=0; i<N; i++)
28+
for (int j=0; j<N; j++){
29+
matrix_a[i*N+j] = v1++;
30+
matrix_b[i*N+j] = v2++;
31+
matrix_c[i*N+j] = 0.f;
32+
matrix_d[i*N+j] = 0.f;
33+
}
34+
35+
//# Define queue with default device for offloading computation
36+
queue q;
37+
std::cout << "Offload Device : " << q.get_device().get_info<info::device::name>() << std::endl;
38+
39+
//# Create buffers for matrices
40+
buffer a(matrix_a);
41+
buffer b(matrix_b);
42+
buffer c(matrix_c);
43+
44+
//# Submit command groups to execute on device
45+
q.submit([&](handler &h){
46+
//# Create accessors to copy buffers to the device
47+
accessor A(a, h, read_only);
48+
accessor B(b, h, read_only);
49+
accessor C(c, h, write_only);
50+
51+
//# Define size for ND-range and work-group size
52+
range<2> global_size(N,N);
53+
range<2> work_group_size(N,N);
54+
55+
//# Create local accessors
56+
accessor<float, 2, access::mode::read_write, access::target::local> A_local(range<2>(N, N), h);
57+
accessor<float, 2, access::mode::read_write, access::target::local> B_local(range<2>(N, N), h);
58+
59+
//# Parallel Compute Matrix Multiplication
60+
h.parallel_for(nd_range<2>{global_size, work_group_size}, [=](nd_item<2> item){
61+
const int i = item.get_global_id(0);
62+
const int j = item.get_global_id(1);
63+
const int x = item.get_local_id(0);
64+
const int y = item.get_local_id(1);
65+
66+
//# copy from global to local memory
67+
A_local[x][y] = A[i * N + j];
68+
B_local[x][y] = B[i * N + j];
69+
70+
//# barrier to sychronize local memory copy across all work items
71+
group_barrier(item.get_group());
72+
73+
//# matrix multiplication computation from local memory
74+
float temp = 0.f;
75+
for (int k = 0; k < N; k++) {
76+
temp += A_local[x][k] * B_local[k][y];
77+
}
78+
C[i*N+j] = temp;
79+
});
80+
});
81+
host_accessor ha(c, read_only);
82+
83+
//# Print Output and Verification
84+
auto FAIL = 0;
85+
for (int i=0; i<N; i++){
86+
for (int j=0; j<N; j++){
87+
for(int k=0; k<N; k++){
88+
matrix_d[i*N+j] += matrix_a[i*N+k] * matrix_b[k*N+j];
89+
}
90+
if(matrix_d[i*N+j] != matrix_c[i*N+j]) FAIL = 1;
91+
std::cout << std::setw(6) << matrix_c[i*N+j] << " ";
92+
}
93+
std::cout << "\n";
94+
}
95+
if(FAIL == 1) std::cout << "FAIL\n"; else std::cout << "PASS\n";
96+
97+
return 0;
98+
}
99+
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
//==============================================================
2+
// Copyright © Intel Corporation
3+
//
4+
// SPDX-License-Identifier: MIT
5+
// =============================================================
6+
#include <CL/sycl.hpp>
7+
8+
using namespace sycl;
9+
10+
static constexpr size_t N = 1024; // global size
11+
12+
int main() {
13+
queue q;
14+
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
15+
16+
std::vector<int> data(N);
17+
for (int i = 0; i < N; i++) data[i] = i;
18+
int sum = 0;
19+
{
20+
//# create buffers for data and sum
21+
buffer buf_data(data);
22+
buffer buf_sum(&sum, range(1));
23+
24+
//# Reduction Kernel using atomics
25+
q.submit([&](auto &h) {
26+
accessor data_acc(buf_data, h, sycl::read_only);
27+
accessor sum_acc(buf_sum, h);
28+
29+
h.parallel_for(N, [=](auto i) {
30+
auto sum_atomic = atomic_ref<int, memory_order::relaxed, memory_scope::device, access::address_space::global_space>(sum_acc[0]);
31+
sum_atomic += data_acc[i];
32+
});
33+
});
34+
}
35+
std::cout << "Sum = " << sum << "\n";
36+
37+
return 0;
38+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
//==============================================================
2+
// Copyright © Intel Corporation
3+
//
4+
// SPDX-License-Identifier: MIT
5+
// =============================================================
6+
#include <CL/sycl.hpp>
7+
8+
using namespace sycl;
9+
10+
static constexpr size_t N = 1024; // global size
11+
12+
int main() {
13+
queue q;
14+
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
15+
16+
auto data = malloc_shared<int>(N, q);
17+
for (int i = 0; i < N; i++) data[i] = i;
18+
auto sum = malloc_shared<int>(1, q);
19+
sum[0] = 0;
20+
21+
//# Reduction Kernel using atomics
22+
q.parallel_for(N, [=](auto i) {
23+
auto sum_atomic = atomic_ref<int, memory_order::relaxed, memory_scope::device, access::address_space::global_space>(sum[0]);
24+
sum_atomic += data[i];
25+
}).wait();
26+
27+
std::cout << "Sum = " << sum[0] << "\n";
28+
return 0;
29+
}

0 commit comments

Comments
 (0)