Skip to content

Commit b6f7c09

Browse files
committed
add all dir
1 parent 297e5a5 commit b6f7c09

File tree

29 files changed

+2326
-2
lines changed

29 files changed

+2326
-2
lines changed

.github/labeler.yml

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
'task:all':
2+
- "tasks/all/**"
13
'task:mpi':
24
- "tasks/mpi/**"
35
'task:omp':

docs/user_guide/submit_work.rst

+3-2
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,21 @@
11
How to submit your work
22
========================
33

4-
- There are ``mpi``, ``omp``, ``seq``, ``stl``, ``tbb`` folders in the ``tasks`` directory. Move to a folder of your task. Create a directory named ``<last name>_<first letter of name>_<short task name>``.
4+
- There are ``all``, ``mpi``, ``omp``, ``seq``, ``stl``, ``tbb`` folders in the ``tasks`` directory. Move to a folder of your task. Create a directory named ``<last name>_<first letter of name>_<short task name>``.
55

66
Example: ``seq/nesterov_a_vector_sum``. Please name all tasks **with the same** name directory. If the ``seq`` task is named ``seq/nesterov_a_vector_sum``, then the ``omp`` task must be named ``omp/nesterov_a_vector_sum``.
77

88
- Navigate into the newly created folder and begin your work on the task. The folder must contain only 4 directories with files:
99

10+
- ``data`` - Directory with own data files for functional testing of the task.
1011
- ``func_tests`` - Directory with Google tests for functional testing of the task.
1112
- ``include`` - Directory for header files with function prototypes.
1213
- ``perf_tests`` - Directory with Google tests for performance testing. The number of tests must be 2: ``run_task`` and ``run_pipeline``.
1314
- ``src`` - Directory with source files containing the function implementations.
1415

1516
- There must be 10 executable files for running:
1617

17-
- ``<mpi, omp, seq, stl, tbb>_<func, perf>_tests``. For example, ``omp_perf_tests`` - an executable file for performance tests of OpenMP practice tasks.
18+
- ``<all, mpi, omp, seq, stl, tbb>_<func, perf>_tests``. For example, ``omp_perf_tests`` - an executable file for performance tests of OpenMP practice tasks.
1819

1920
- All prototypes and classes in the ``include`` directory must be namespace-escaped. Name your namespace as follows:
2021

tasks/CMakeLists.txt

+29
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,12 @@ else ()
3030
message(WARNING "TBB tasks not build!")
3131
endif ()
3232

33+
if (USE_MPI AND USE_OMP AND USE_SEQ AND USE_STL AND USE_TBB)
34+
list(APPEND LIST_OF_TASKS "all")
35+
else ()
36+
message(WARNING "ALL tasks not build!")
37+
endif ()
38+
3339
add_compile_definitions(PATH_TO_PPC_PROJECT="${CMAKE_SOURCE_DIR}")
3440

3541
foreach(TASK_TYPE ${LIST_OF_TASKS})
@@ -101,6 +107,29 @@ foreach(TASK_TYPE ${LIST_OF_TASKS})
101107
target_link_libraries(${EXEC_FUNC} PUBLIC boost_mpi boost_serialization)
102108
endif ()
103109
elseif ("${MODULE_NAME}" STREQUAL "tbb")
110+
add_dependencies(${EXEC_FUNC} ppc_onetbb)
111+
target_link_directories(${EXEC_FUNC} PUBLIC ${CMAKE_BINARY_DIR}/ppc_onetbb/install/lib)
112+
if(NOT MSVC)
113+
target_link_libraries(${EXEC_FUNC} PUBLIC tbb)
114+
endif()
115+
elseif ("${MODULE_NAME}" STREQUAL "all")
116+
target_link_libraries(${EXEC_FUNC} PUBLIC Threads::Threads)
117+
target_link_libraries(${EXEC_FUNC} PUBLIC ${OpenMP_libomp_LIBRARY})
118+
if( MPI_COMPILE_FLAGS )
119+
set_target_properties(${EXEC_FUNC} PROPERTIES COMPILE_FLAGS "${MPI_COMPILE_FLAGS}")
120+
endif( MPI_COMPILE_FLAGS )
121+
122+
if( MPI_LINK_FLAGS )
123+
set_target_properties(${EXEC_FUNC} PROPERTIES LINK_FLAGS "${MPI_LINK_FLAGS}")
124+
endif( MPI_LINK_FLAGS )
125+
target_link_libraries(${EXEC_FUNC} PUBLIC ${MPI_LIBRARIES})
126+
127+
add_dependencies(${EXEC_FUNC} ppc_boost)
128+
target_link_directories(${EXEC_FUNC} PUBLIC ${CMAKE_BINARY_DIR}/ppc_boost/install/lib)
129+
if (NOT MSVC)
130+
target_link_libraries(${EXEC_FUNC} PUBLIC boost_mpi boost_serialization)
131+
endif ()
132+
104133
add_dependencies(${EXEC_FUNC} ppc_onetbb)
105134
target_link_directories(${EXEC_FUNC} PUBLIC ${CMAKE_BINARY_DIR}/ppc_onetbb/install/lib)
106135
if(NOT MSVC)

tasks/all/example_mpi/data/test.txt

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
120
+292
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,292 @@
1+
#include <gtest/gtest.h>
2+
3+
#include <boost/mpi/communicator.hpp>
4+
#include <boost/mpi/environment.hpp>
5+
#include <fstream>
6+
#include <random>
7+
#include <vector>
8+
9+
#include "mpi/example/include/ops_mpi.hpp"
10+
11+
namespace {
12+
std::vector<int> GetRandomVector(int sz) {
13+
std::random_device dev;
14+
std::mt19937 gen(dev());
15+
std::vector<int> vec(sz);
16+
for (int i = 0; i < sz; i++) {
17+
vec[i] = gen() % 100 - 50;
18+
}
19+
return vec;
20+
}
21+
} // namespace
22+
23+
TEST(Parallel_Operations_MPI, Test_Sum) {
24+
boost::mpi::communicator world;
25+
std::vector<int> global_vec;
26+
std::vector<int32_t> global_sum(1, 0);
27+
// Create TaskData
28+
auto task_data_par = std::make_shared<ppc::core::TaskData>();
29+
30+
if (world.rank() == 0) {
31+
const int count_size_vector = 120;
32+
global_vec = GetRandomVector(count_size_vector);
33+
task_data_par->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_vec.data()));
34+
task_data_par->inputs_count.emplace_back(global_vec.size());
35+
task_data_par->outputs.emplace_back(reinterpret_cast<uint8_t*>(global_sum.data()));
36+
task_data_par->outputs_count.emplace_back(global_sum.size());
37+
}
38+
39+
nesterov_a_test_task_mpi::TestMPITaskParallel test_mpi_task_parallel(task_data_par, "+");
40+
ASSERT_EQ(test_mpi_task_parallel.validation(), true);
41+
test_mpi_task_parallel.pre_processing();
42+
test_mpi_task_parallel.run();
43+
test_mpi_task_parallel.post_processing();
44+
45+
if (world.rank() == 0) {
46+
// Create data
47+
std::vector<int32_t> reference_sum(1, 0);
48+
49+
// Create TaskData
50+
auto task_data_seq = std::make_shared<ppc::core::TaskData>();
51+
task_data_seq->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_vec.data()));
52+
task_data_seq->inputs_count.emplace_back(global_vec.size());
53+
task_data_seq->outputs.emplace_back(reinterpret_cast<uint8_t*>(reference_sum.data()));
54+
task_data_seq->outputs_count.emplace_back(reference_sum.size());
55+
56+
// Create Task
57+
nesterov_a_test_task_mpi::TestMPITaskSequential test_mpi_task_sequential(task_data_seq, "+");
58+
ASSERT_EQ(test_mpi_task_sequential.validation(), true);
59+
test_mpi_task_sequential.pre_processing();
60+
test_mpi_task_sequential.run();
61+
test_mpi_task_sequential.post_processing();
62+
63+
ASSERT_EQ(reference_sum[0], global_sum[0]);
64+
}
65+
}
66+
67+
TEST(Parallel_Operations_MPI, Test_Diff) {
68+
boost::mpi::communicator world;
69+
std::vector<int> global_vec;
70+
std::vector<int32_t> global_diff(1, 0);
71+
// Create TaskData
72+
auto task_data_par = std::make_shared<ppc::core::TaskData>();
73+
74+
if (world.rank() == 0) {
75+
const int count_size_vector = 240;
76+
global_vec = GetRandomVector(count_size_vector);
77+
task_data_par->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_vec.data()));
78+
task_data_par->inputs_count.emplace_back(global_vec.size());
79+
task_data_par->outputs.emplace_back(reinterpret_cast<uint8_t*>(global_diff.data()));
80+
task_data_par->outputs_count.emplace_back(global_diff.size());
81+
}
82+
83+
nesterov_a_test_task_mpi::TestMPITaskParallel test_mpi_task_parallel(task_data_par, "-");
84+
ASSERT_EQ(test_mpi_task_parallel.validation(), true);
85+
test_mpi_task_parallel.pre_processing();
86+
test_mpi_task_parallel.run();
87+
test_mpi_task_parallel.post_processing();
88+
89+
if (world.rank() == 0) {
90+
// Create data
91+
std::vector<int32_t> reference_diff(1, 0);
92+
93+
// Create TaskData
94+
auto task_data_seq = std::make_shared<ppc::core::TaskData>();
95+
task_data_seq->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_vec.data()));
96+
task_data_seq->inputs_count.emplace_back(global_vec.size());
97+
task_data_seq->outputs.emplace_back(reinterpret_cast<uint8_t*>(reference_diff.data()));
98+
task_data_seq->outputs_count.emplace_back(reference_diff.size());
99+
100+
// Create Task
101+
nesterov_a_test_task_mpi::TestMPITaskSequential test_mpi_task_sequential(task_data_seq, "-");
102+
ASSERT_EQ(test_mpi_task_sequential.validation(), true);
103+
test_mpi_task_sequential.pre_processing();
104+
test_mpi_task_sequential.run();
105+
test_mpi_task_sequential.post_processing();
106+
107+
ASSERT_EQ(reference_diff[0], global_diff[0]);
108+
}
109+
}
110+
111+
TEST(Parallel_Operations_MPI, Test_Diff_2) {
112+
boost::mpi::communicator world;
113+
std::vector<int> global_vec;
114+
std::vector<int32_t> global_diff(1, 0);
115+
// Create TaskData
116+
auto task_data_par = std::make_shared<ppc::core::TaskData>();
117+
118+
if (world.rank() == 0) {
119+
const int count_size_vector = 120;
120+
global_vec = GetRandomVector(count_size_vector);
121+
task_data_par->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_vec.data()));
122+
task_data_par->inputs_count.emplace_back(global_vec.size());
123+
task_data_par->outputs.emplace_back(reinterpret_cast<uint8_t*>(global_diff.data()));
124+
task_data_par->outputs_count.emplace_back(global_diff.size());
125+
}
126+
127+
nesterov_a_test_task_mpi::TestMPITaskParallel test_mpi_task_parallel(task_data_par, "-");
128+
ASSERT_EQ(test_mpi_task_parallel.validation(), true);
129+
test_mpi_task_parallel.pre_processing();
130+
test_mpi_task_parallel.run();
131+
test_mpi_task_parallel.post_processing();
132+
133+
if (world.rank() == 0) {
134+
// Create data
135+
std::vector<int32_t> reference_diff(1, 0);
136+
137+
// Create TaskData
138+
auto task_data_seq = std::make_shared<ppc::core::TaskData>();
139+
task_data_seq->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_vec.data()));
140+
task_data_seq->inputs_count.emplace_back(global_vec.size());
141+
task_data_seq->outputs.emplace_back(reinterpret_cast<uint8_t*>(reference_diff.data()));
142+
task_data_seq->outputs_count.emplace_back(reference_diff.size());
143+
144+
// Create Task
145+
nesterov_a_test_task_mpi::TestMPITaskSequential test_mpi_task_sequential(task_data_seq, "-");
146+
ASSERT_EQ(test_mpi_task_sequential.validation(), true);
147+
test_mpi_task_sequential.pre_processing();
148+
test_mpi_task_sequential.run();
149+
test_mpi_task_sequential.post_processing();
150+
151+
ASSERT_EQ(reference_diff[0], global_diff[0]);
152+
}
153+
}
154+
155+
TEST(Parallel_Operations_MPI, Test_Max) {
156+
boost::mpi::communicator world;
157+
std::vector<int> global_vec;
158+
std::vector<int32_t> global_max(1, 0);
159+
// Create TaskData
160+
auto task_data_par = std::make_shared<ppc::core::TaskData>();
161+
162+
if (world.rank() == 0) {
163+
const int count_size_vector = 240;
164+
global_vec = GetRandomVector(count_size_vector);
165+
task_data_par->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_vec.data()));
166+
task_data_par->inputs_count.emplace_back(global_vec.size());
167+
task_data_par->outputs.emplace_back(reinterpret_cast<uint8_t*>(global_max.data()));
168+
task_data_par->outputs_count.emplace_back(global_max.size());
169+
}
170+
171+
nesterov_a_test_task_mpi::TestMPITaskParallel test_mpi_task_parallel(task_data_par, "max");
172+
ASSERT_EQ(test_mpi_task_parallel.validation(), true);
173+
test_mpi_task_parallel.pre_processing();
174+
test_mpi_task_parallel.run();
175+
test_mpi_task_parallel.post_processing();
176+
177+
if (world.rank() == 0) {
178+
// Create data
179+
std::vector<int32_t> reference_max(1, 0);
180+
181+
// Create TaskData
182+
auto task_data_seq = std::make_shared<ppc::core::TaskData>();
183+
task_data_seq->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_vec.data()));
184+
task_data_seq->inputs_count.emplace_back(global_vec.size());
185+
task_data_seq->outputs.emplace_back(reinterpret_cast<uint8_t*>(reference_max.data()));
186+
task_data_seq->outputs_count.emplace_back(reference_max.size());
187+
188+
// Create Task
189+
nesterov_a_test_task_mpi::TestMPITaskSequential test_mpi_task_sequential(task_data_seq, "max");
190+
ASSERT_EQ(test_mpi_task_sequential.validation(), true);
191+
test_mpi_task_sequential.pre_processing();
192+
test_mpi_task_sequential.run();
193+
test_mpi_task_sequential.post_processing();
194+
195+
ASSERT_EQ(reference_max[0], global_max[0]);
196+
}
197+
}
198+
199+
TEST(Parallel_Operations_MPI, Test_Max_2) {
200+
boost::mpi::communicator world;
201+
std::vector<int> global_vec;
202+
std::vector<int32_t> global_max(1, 0);
203+
// Create TaskData
204+
auto task_data_par = std::make_shared<ppc::core::TaskData>();
205+
206+
if (world.rank() == 0) {
207+
const int count_size_vector = 120;
208+
global_vec = GetRandomVector(count_size_vector);
209+
task_data_par->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_vec.data()));
210+
task_data_par->inputs_count.emplace_back(global_vec.size());
211+
task_data_par->outputs.emplace_back(reinterpret_cast<uint8_t*>(global_max.data()));
212+
task_data_par->outputs_count.emplace_back(global_max.size());
213+
}
214+
215+
nesterov_a_test_task_mpi::TestMPITaskParallel test_mpi_task_parallel(task_data_par, "max");
216+
ASSERT_EQ(test_mpi_task_parallel.validation(), true);
217+
test_mpi_task_parallel.pre_processing();
218+
test_mpi_task_parallel.run();
219+
test_mpi_task_parallel.post_processing();
220+
221+
if (world.rank() == 0) {
222+
// Create data
223+
std::vector<int32_t> reference_max(1, 0);
224+
225+
// Create TaskData
226+
auto task_data_seq = std::make_shared<ppc::core::TaskData>();
227+
task_data_seq->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_vec.data()));
228+
task_data_seq->inputs_count.emplace_back(global_vec.size());
229+
task_data_seq->outputs.emplace_back(reinterpret_cast<uint8_t*>(reference_max.data()));
230+
task_data_seq->outputs_count.emplace_back(reference_max.size());
231+
232+
// Create Task
233+
nesterov_a_test_task_mpi::TestMPITaskSequential test_mpi_task_sequential(task_data_seq, "max");
234+
ASSERT_EQ(test_mpi_task_sequential.validation(), true);
235+
test_mpi_task_sequential.pre_processing();
236+
test_mpi_task_sequential.run();
237+
test_mpi_task_sequential.post_processing();
238+
239+
ASSERT_EQ(reference_max[0], global_max[0]);
240+
}
241+
}
242+
243+
TEST(Parallel_Operations_MPI, Test_Max_2_File) {
244+
boost::mpi::communicator world;
245+
std::vector<int> global_vec;
246+
std::vector<int32_t> global_max(1, 0);
247+
// Create TaskData
248+
auto task_data_par = std::make_shared<ppc::core::TaskData>();
249+
250+
if (world.rank() == 0) {
251+
std::string line;
252+
std::ifstream test_file(ppc::core::GetAbsolutePath("mpi/example/data/test.txt"));
253+
if (test_file.is_open()) {
254+
getline(test_file, line);
255+
}
256+
test_file.close();
257+
258+
const int count_size_vector = std::stoi(line);
259+
global_vec = GetRandomVector(count_size_vector);
260+
task_data_par->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_vec.data()));
261+
task_data_par->inputs_count.emplace_back(global_vec.size());
262+
task_data_par->outputs.emplace_back(reinterpret_cast<uint8_t*>(global_max.data()));
263+
task_data_par->outputs_count.emplace_back(global_max.size());
264+
}
265+
266+
nesterov_a_test_task_mpi::TestMPITaskParallel test_mpi_task_parallel(task_data_par, "max");
267+
ASSERT_EQ(test_mpi_task_parallel.validation(), true);
268+
test_mpi_task_parallel.pre_processing();
269+
test_mpi_task_parallel.run();
270+
test_mpi_task_parallel.post_processing();
271+
272+
if (world.rank() == 0) {
273+
// Create data
274+
std::vector<int32_t> reference_max(1, 0);
275+
276+
// Create TaskData
277+
auto task_data_seq = std::make_shared<ppc::core::TaskData>();
278+
task_data_seq->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_vec.data()));
279+
task_data_seq->inputs_count.emplace_back(global_vec.size());
280+
task_data_seq->outputs.emplace_back(reinterpret_cast<uint8_t*>(reference_max.data()));
281+
task_data_seq->outputs_count.emplace_back(reference_max.size());
282+
283+
// Create Task
284+
nesterov_a_test_task_mpi::TestMPITaskSequential test_mpi_task_sequential(task_data_seq, "max");
285+
ASSERT_EQ(test_mpi_task_sequential.validation(), true);
286+
test_mpi_task_sequential.pre_processing();
287+
test_mpi_task_sequential.run();
288+
test_mpi_task_sequential.post_processing();
289+
290+
ASSERT_EQ(reference_max[0], global_max[0]);
291+
}
292+
}

0 commit comments

Comments
 (0)