Skip to content

Apply clang-tidy on both headers and sources #206

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Jan 13, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ Checks: >
-bugprone-narrowing-conversions,
-clang-analyzer-optin.cplusplus.UninitializedObject,
-misc-const-correctness,
-misc-header-include-cycle,
-misc-include-cleaner,
-misc-no-recursion,
-misc-non-private-member-variables-in-classes,
Expand Down
24 changes: 12 additions & 12 deletions modules/core/perf/func_tests/perf_tests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ TEST(perf_tests, check_perf_pipeline) {
std::vector<uint32_t> in(2000, 1);
std::vector<uint32_t> out(1, 0);

// Create TaskData
// Create task_data
auto task_data = std::make_shared<ppc::core::TaskData>();
task_data->inputs.emplace_back(reinterpret_cast<uint8_t *>(in.data()));
task_data->inputs_count.emplace_back(in.size());
Expand All @@ -33,7 +33,7 @@ TEST(perf_tests, check_perf_pipeline) {

// Get perf statistic
ppc::core::Perf::PrintPerfStatistic(perf_results);
ASSERT_LE(perf_results->time_sec, ppc::core::PerfResults::MAX_TIME);
ASSERT_LE(perf_results->time_sec, ppc::core::PerfResults::kMaxTime);
EXPECT_EQ(out[0], in.size());
}

Expand All @@ -42,7 +42,7 @@ TEST(perf_tests, check_perf_pipeline_float) {
std::vector<float> in(2000, 1);
std::vector<float> out(1, 0);

// Create TaskData
// Create task_data
auto task_data = std::make_shared<ppc::core::TaskData>();
task_data->inputs.emplace_back(reinterpret_cast<uint8_t *>(in.data()));
task_data->inputs_count.emplace_back(in.size());
Expand All @@ -65,7 +65,7 @@ TEST(perf_tests, check_perf_pipeline_float) {

// Get perf statistic
ppc::core::Perf::PrintPerfStatistic(perf_results);
ASSERT_LE(perf_results->time_sec, ppc::core::PerfResults::MAX_TIME);
ASSERT_LE(perf_results->time_sec, ppc::core::PerfResults::kMaxTime);
EXPECT_EQ(out[0], in.size());
}

Expand All @@ -74,7 +74,7 @@ TEST(perf_tests, check_perf_pipeline_uint8_t_slow_test) {
std::vector<uint8_t> in(128, 1);
std::vector<uint8_t> out(1, 0);

// Create TaskData
// Create task_data
auto task_data = std::make_shared<ppc::core::TaskData>();
task_data->inputs.emplace_back(reinterpret_cast<uint8_t *>(in.data()));
task_data->inputs_count.emplace_back(in.size());
Expand Down Expand Up @@ -102,7 +102,7 @@ TEST(perf_tests, check_perf_pipeline_uint8_t_slow_test) {

// Get perf statistic
ASSERT_ANY_THROW(ppc::core::Perf::PrintPerfStatistic(perf_results));
ASSERT_GE(perf_results->time_sec, ppc::core::PerfResults::MAX_TIME);
ASSERT_GE(perf_results->time_sec, ppc::core::PerfResults::kMaxTime);
EXPECT_EQ(out[0], in.size());
}

Expand All @@ -111,7 +111,7 @@ TEST(perf_tests, check_perf_task) {
std::vector<uint32_t> in(2000, 1);
std::vector<uint32_t> out(1, 0);

// Create TaskData
// Create task_data
auto task_data = std::make_shared<ppc::core::TaskData>();
task_data->inputs.emplace_back(reinterpret_cast<uint8_t *>(in.data()));
task_data->inputs_count.emplace_back(in.size());
Expand All @@ -133,9 +133,9 @@ TEST(perf_tests, check_perf_task) {
perf_analyzer.TaskRun(perf_attr, perf_results);

// Get perf statistic
perf_results->type_of_running = ppc::core::PerfResults::NONE;
perf_results->type_of_running = ppc::core::PerfResults::kNone;
ppc::core::Perf::PrintPerfStatistic(perf_results);
ASSERT_LE(perf_results->time_sec, ppc::core::PerfResults::MAX_TIME);
ASSERT_LE(perf_results->time_sec, ppc::core::PerfResults::kMaxTime);
EXPECT_EQ(out[0], in.size());
}

Expand All @@ -144,7 +144,7 @@ TEST(perf_tests, check_perf_task_float) {
std::vector<float> in(2000, 1);
std::vector<float> out(1, 0);

// Create TaskData
// Create task_data
auto task_data = std::make_shared<ppc::core::TaskData>();
task_data->inputs.emplace_back(reinterpret_cast<uint8_t *>(in.data()));
task_data->inputs_count.emplace_back(in.size());
Expand All @@ -166,8 +166,8 @@ TEST(perf_tests, check_perf_task_float) {
perf_analyzer.TaskRun(perf_attr, perf_results);

// Get perf statistic
perf_results->type_of_running = ppc::core::PerfResults::PIPELINE;
perf_results->type_of_running = ppc::core::PerfResults::kPipeline;
ppc::core::Perf::PrintPerfStatistic(perf_results);
ASSERT_LE(perf_results->time_sec, ppc::core::PerfResults::MAX_TIME);
ASSERT_LE(perf_results->time_sec, ppc::core::PerfResults::kMaxTime);
EXPECT_EQ(out[0], in.size());
}
22 changes: 11 additions & 11 deletions modules/core/perf/func_tests/test_task.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,25 +16,25 @@ namespace ppc::test::perf {
template <class T>
class TestTask : public ppc::core::Task {
public:
explicit TestTask(ppc::core::TaskDataPtr taskData_) : Task(taskData_) {}
explicit TestTask(ppc::core::TaskDataPtr task_data) : Task(task_data) {}

bool pre_processing_impl() override {
input_ = reinterpret_cast<T *>(taskData->inputs[0]);
output_ = reinterpret_cast<T *>(taskData->outputs[0]);
bool PreProcessingImpl() override {
input_ = reinterpret_cast<T *>(task_data->inputs[0]);
output_ = reinterpret_cast<T *>(task_data->outputs[0]);
output_[0] = 0;
return true;
}

bool validation_impl() override { return taskData->outputs_count[0] == 1; }
bool ValidationImpl() override { return task_data->outputs_count[0] == 1; }

bool run_impl() override {
for (unsigned i = 0; i < taskData->inputs_count[0]; i++) {
bool RunImpl() override {
for (unsigned i = 0; i < task_data->inputs_count[0]; i++) {
output_[0] += input_[i];
}
return true;
}

bool post_processing_impl() override { return true; }
bool PostProcessingImpl() override { return true; }

private:
T *input_{};
Expand All @@ -44,11 +44,11 @@ class TestTask : public ppc::core::Task {
template <class T>
class FakePerfTask : public TestTask<T> {
public:
explicit FakePerfTask(ppc::core::TaskDataPtr perfTaskData_) : TestTask<T>(perfTaskData_) {}
explicit FakePerfTask(ppc::core::TaskDataPtr perf_task_data) : TestTask<T>(perf_task_data) {}

bool run_impl() override {
bool RunImpl() override {
std::this_thread::sleep_for(20000ms);
return TestTask<T>::run_impl();
return TestTask<T>::RunImpl();
}
};

Expand Down
18 changes: 8 additions & 10 deletions modules/core/perf/include/perf.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,7 @@

#include "core/task/include/task.hpp"

namespace ppc {
namespace core {
namespace ppc::core {

struct PerfAttr {
// count of task's running
Expand All @@ -20,8 +19,8 @@ struct PerfAttr {
struct PerfResults {
// measurement of task's time (in seconds)
double time_sec = 0.0;
enum TypeOfRunning { PIPELINE, TASK_RUN, NONE } type_of_running = NONE;
constexpr const static double MAX_TIME = 10.0;
enum TypeOfRunning : uint8_t { kPipeline, kTaskRun, kNone } type_of_running = kNone;
constexpr const static double kMaxTime = 10.0;
};

class Perf {
Expand All @@ -31,22 +30,21 @@ class Perf {
// Set task with initialized task and initialized data for performance
// analysis c
void SetTask(const std::shared_ptr<Task>& task_ptr);
// Check performance of full task's pipeline: pre_processing() ->
// validation() -> run() -> post_processing()
// Check performance of full task's pipeline: PreProcessing() ->
// Validation() -> Run() -> PostProcessing()
void PipelineRun(const std::shared_ptr<PerfAttr>& perf_attr,
const std::shared_ptr<ppc::core::PerfResults>& perf_results);
// Check performance of task's run() function
// Check performance of task's Run() function
void TaskRun(const std::shared_ptr<PerfAttr>& perf_attr, const std::shared_ptr<ppc::core::PerfResults>& perf_results);
// Pint results for automation checkers
static void PrintPerfStatistic(const std::shared_ptr<PerfResults>& perf_results);

private:
std::shared_ptr<Task> task;
std::shared_ptr<Task> task_;
static void CommonRun(const std::shared_ptr<PerfAttr>& perf_attr, const std::function<void()>& pipeline,
const std::shared_ptr<ppc::core::PerfResults>& perf_results);
};

} // namespace core
} // namespace ppc
} // namespace ppc::core

#endif // MODULES_CORE_INCLUDE_PERF_HPP_
42 changes: 21 additions & 21 deletions modules/core/perf/src/perf.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,38 +10,38 @@
ppc::core::Perf::Perf(const std::shared_ptr<Task>& task_ptr) { SetTask(task_ptr); }

void ppc::core::Perf::SetTask(const std::shared_ptr<Task>& task_ptr) {
task_ptr->get_data()->state_of_testing = TaskData::StateOfTesting::PERF;
this->task = task_ptr;
task_ptr->GetData()->state_of_testing = TaskData::StateOfTesting::kPerf;
this->task_ = task_ptr;
}

void ppc::core::Perf::PipelineRun(const std::shared_ptr<PerfAttr>& perf_attr,
const std::shared_ptr<ppc::core::PerfResults>& perf_results) {
perf_results->type_of_running = PerfResults::TypeOfRunning::PIPELINE;
perf_results->type_of_running = PerfResults::TypeOfRunning::kPipeline;

CommonRun(
perf_attr,
[&]() {
task->validation();
task->pre_processing();
task->run();
task->post_processing();
task_->Validation();
task_->PreProcessing();
task_->Run();
task_->PostProcessing();
},
perf_results);
}

void ppc::core::Perf::TaskRun(const std::shared_ptr<PerfAttr>& perf_attr,
const std::shared_ptr<ppc::core::PerfResults>& perf_results) {
perf_results->type_of_running = PerfResults::TypeOfRunning::TASK_RUN;
perf_results->type_of_running = PerfResults::TypeOfRunning::kTaskRun;

task->validation();
task->pre_processing();
CommonRun(perf_attr, [&]() { task->run(); }, perf_results);
task->post_processing();
task_->Validation();
task_->PreProcessing();
CommonRun(perf_attr, [&]() { task_->Run(); }, perf_results);
task_->PostProcessing();

task->validation();
task->pre_processing();
task->run();
task->post_processing();
task_->Validation();
task_->PreProcessing();
task_->Run();
task_->PostProcessing();
}

void ppc::core::Perf::CommonRun(const std::shared_ptr<PerfAttr>& perf_attr, const std::function<void()>& pipeline,
Expand All @@ -62,11 +62,11 @@ void ppc::core::Perf::PrintPerfStatistic(const std::shared_ptr<PerfResults>& per

auto time_secs = perf_results->time_sec;

if (perf_results->type_of_running == PerfResults::TypeOfRunning::TASK_RUN) {
if (perf_results->type_of_running == PerfResults::TypeOfRunning::kTaskRun) {
type_test_name = "task_run";
} else if (perf_results->type_of_running == PerfResults::TypeOfRunning::PIPELINE) {
} else if (perf_results->type_of_running == PerfResults::TypeOfRunning::kPipeline) {
type_test_name = "pipeline";
} else if (perf_results->type_of_running == PerfResults::TypeOfRunning::NONE) {
} else if (perf_results->type_of_running == PerfResults::TypeOfRunning::kNone) {
type_test_name = "none";
}

Expand All @@ -77,13 +77,13 @@ void ppc::core::Perf::PrintPerfStatistic(const std::shared_ptr<PerfResults>& per
relative_path.erase(last_found_position, relative_path.length() - 1);

std::stringstream perf_res_str;
if (time_secs < PerfResults::MAX_TIME) {
if (time_secs < PerfResults::kMaxTime) {
perf_res_str << std::fixed << std::setprecision(10) << time_secs;
std::cout << relative_path << ":" << type_test_name << ":" << perf_res_str.str() << '\n';
} else {
std::stringstream err_msg;
err_msg << '\n' << "Task execute time need to be: ";
err_msg << "time < " << PerfResults::MAX_TIME << " secs." << '\n';
err_msg << "time < " << PerfResults::kMaxTime << " secs." << '\n';
err_msg << "Original time in secs: " << time_secs << '\n';
perf_res_str << std::fixed << std::setprecision(10) << -1.0;
std::cout << relative_path << ":" << type_test_name << ":" << perf_res_str.str() << '\n';
Expand Down
Loading
Loading