From b574a41a64fbb977766b58638986a9f4347216b1 Mon Sep 17 00:00:00 2001 From: Bartek Kryza Date: Wed, 1 May 2024 19:01:25 +0200 Subject: [PATCH] Added test case for cuda_kernel and cuda_device callee types in callee_type filter (#263) --- docs/diagram_filters.md | 2 + src/common/model/diagram_filter.cc | 14 ++++++ src/config/config.cc | 4 ++ src/config/config.h | 4 +- src/config/schema.h | 2 + src/config/yaml_decoders.cc | 4 ++ tests/CMakeLists.txt | 2 +- tests/t20051/.clang-uml | 14 ++++++ tests/t20051/t20051.cu | 38 ++++++++++++++ tests/t20051/t20051.cuh | 9 ++++ tests/t20051/test_case.h | 81 ++++++++++++++++++++++++++++++ tests/test_cases.cc | 1 + 12 files changed, 173 insertions(+), 2 deletions(-) create mode 100644 tests/t20051/.clang-uml create mode 100644 tests/t20051/t20051.cu create mode 100644 tests/t20051/t20051.cuh create mode 100644 tests/t20051/test_case.h diff --git a/docs/diagram_filters.md b/docs/diagram_filters.md index 4e996f4f..d312ab7b 100644 --- a/docs/diagram_filters.md +++ b/docs/diagram_filters.md @@ -258,6 +258,8 @@ The following callee types are supported: * `function` * `function_template` * `lambda` + * `cuda_kernel` + * `cuda_device` ## dependants and dependencies diff --git a/src/common/model/diagram_filter.cc b/src/common/model/diagram_filter.cc index 3e807d92..431fc2b5 100644 --- a/src/common/model/diagram_filter.cc +++ b/src/common/model/diagram_filter.cc @@ -455,6 +455,16 @@ tvl::value_t callee_filter::match( return dynamic_cast(p) != nullptr; }; + auto is_cuda_kernel = [](const participant *p) { + const auto *f = dynamic_cast(p); + return (f != nullptr) && (f->is_cuda_kernel()); + }; + + auto is_cuda_device = [](const participant *p) { + const auto *f = dynamic_cast(p); + return (f != nullptr) && (f->is_cuda_device()); + }; + switch (ct) { case config::callee_type::method: return p.type_name() == "method"; @@ -477,6 +487,10 @@ tvl::value_t callee_filter::match( return p.type_name() == "function_template"; case config::callee_type::lambda: return p.type_name() == "method" && is_lambda((method &)p); + case config::callee_type::cuda_kernel: + return is_cuda_kernel(&p); + case config::callee_type::cuda_device: + return is_cuda_device(&p); } return false; diff --git a/src/config/config.cc b/src/config/config.cc index 0eb83f91..627b8ce3 100644 --- a/src/config/config.cc +++ b/src/config/config.cc @@ -106,6 +106,10 @@ std::string to_string(callee_type mt) return "function_template"; case callee_type::lambda: return "lambda"; + case callee_type::cuda_kernel: + return "cuda_kernel"; + case callee_type::cuda_device: + return "cuda_device"; } assert(false); diff --git a/src/config/config.h b/src/config/config.h index e76a8bef..ee69c0a1 100644 --- a/src/config/config.h +++ b/src/config/config.h @@ -78,7 +78,9 @@ enum class callee_type { method, function, function_template, - lambda + lambda, + cuda_kernel, + cuda_device }; std::string to_string(callee_type mt); diff --git a/src/config/schema.h b/src/config/schema.h index 2b0eabb1..1768774c 100644 --- a/src/config/schema.h +++ b/src/config/schema.h @@ -112,6 +112,8 @@ types: - function - function_template - lambda + - cuda_kernel + - cuda_device context_filter_match_t: match: radius: int diff --git a/src/config/yaml_decoders.cc b/src/config/yaml_decoders.cc index a63271d0..6f4e4c1b 100644 --- a/src/config/yaml_decoders.cc +++ b/src/config/yaml_decoders.cc @@ -312,6 +312,10 @@ template <> struct convert { rhs = callee_type::method; else if (val == to_string(callee_type::lambda)) rhs = callee_type::lambda; + else if (val == to_string(callee_type::cuda_kernel)) + rhs = callee_type::cuda_kernel; + else if (val == to_string(callee_type::cuda_device)) + rhs = callee_type::cuda_device; else return false; diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 3ab3cfce..f2c94d8e 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -9,7 +9,7 @@ file(GLOB_RECURSE TEST_CONFIG_YMLS test_config_data/*.yml set(TEST_CASES_REQUIRING_CXX20 t00056 t00058 t00059 t00065 t00069) set(TEST_CASES_REQUIRING_CXX20_MODULES t00070 t00071 t00072 t30012 t30013 t30014 t30015) -set(TEST_CASES_REQUIRING_CUDA t20049 t20050) +set(TEST_CASES_REQUIRING_CUDA t20049 t20050 t20051) if(ENABLE_CXX_MODULES_TEST_CASES) message(STATUS "Enabling C++ modules test cases") diff --git a/tests/t20051/.clang-uml b/tests/t20051/.clang-uml new file mode 100644 index 00000000..51b64589 --- /dev/null +++ b/tests/t20051/.clang-uml @@ -0,0 +1,14 @@ +diagrams: + t20051_sequence: + type: sequence + glob: + - t20051.cu + include: + namespaces: + - clanguml::t20051 + exclude: + callee_types: + - cuda_device + using_namespace: clanguml::t20051 + from: + - function: "clanguml::t20051::tmain()" \ No newline at end of file diff --git a/tests/t20051/t20051.cu b/tests/t20051/t20051.cu new file mode 100644 index 00000000..1935d3c0 --- /dev/null +++ b/tests/t20051/t20051.cu @@ -0,0 +1,38 @@ +#include "t20051.cuh" + +namespace clanguml { +namespace t20051 { + +constexpr unsigned long N{1000}; + +template __device__ T add(T a, T b) { return a + b; } + +__device__ float square(float a) { return a * a; } + +__global__ void vector_square_add(float *out, float *a, float *b, int n) +{ + for (int i = 0; i < n; i++) { + out[i] = add(square(a[i]), square(b[i])); + } +} + +int tmain() +{ + float *a, *b, *out; + + a = (float *)malloc(sizeof(float) * N); + b = (float *)malloc(sizeof(float) * N); + out = (float *)malloc(sizeof(float) * N); + + for (int i = 0; i < N; i++) { + a[i] = 1.0f; + b[i] = 2.0f; + } + + vector_square_add<<<1, 1>>>(out, a, b, N); + + return 0; +} + +} +} \ No newline at end of file diff --git a/tests/t20051/t20051.cuh b/tests/t20051/t20051.cuh new file mode 100644 index 00000000..cdf0460e --- /dev/null +++ b/tests/t20051/t20051.cuh @@ -0,0 +1,9 @@ +namespace clanguml { +namespace t20051 { + +__device__ float square(float a); + +__global__ void vector_add(float *out, float *a, float *b, int n); + +} +} \ No newline at end of file diff --git a/tests/t20051/test_case.h b/tests/t20051/test_case.h new file mode 100644 index 00000000..0c3806f0 --- /dev/null +++ b/tests/t20051/test_case.h @@ -0,0 +1,81 @@ +/** + * tests/t20051/test_case.h + * + * Copyright (c) 2021-2024 Bartek Kryza + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +TEST_CASE("t20051", "[test-case][sequence]") +{ + auto [config, db] = load_config("t20051"); + + auto diagram = config.diagrams["t20051_sequence"]; + + REQUIRE(diagram->name == "t20051_sequence"); + + auto model = generate_sequence_diagram(*db, diagram); + + REQUIRE(model->name() == "t20051_sequence"); + + { + auto src = generate_sequence_puml(diagram, *model); + AliasMatcher _A(src); + + REQUIRE_THAT(src, StartsWith("@startuml")); + REQUIRE_THAT(src, EndsWith("@enduml\n")); + + REQUIRE_THAT(src, + HasCall(_A("tmain()"), + _A("vector_square_add(float *,float *,float *,int)"), "")); + REQUIRE_THAT(src, + !HasCall(_A("vector_square_add(float *,float *,float *,int)"), + _A("square(float)"), "")); + REQUIRE_THAT(src, + !HasCall(_A("vector_square_add(float *,float *,float *,int)"), + _A("add(float,float)"), "")); + + save_puml(config.output_directory(), diagram->name + ".puml", src); + } + + { + auto j = generate_sequence_json(diagram, *model); + + using namespace json; + + save_json(config.output_directory(), diagram->name + ".json", j); + } + + { + auto src = generate_sequence_mermaid(diagram, *model); + + mermaid::SequenceDiagramAliasMatcher _A(src); + using mermaid::HasCall; + + REQUIRE_THAT(src, + HasCall(_A("tmain()"), + _A("<< CUDA Kernel >>
vector_square_add(float *,float " + "*,float *,int)"), + "")); + REQUIRE_THAT(src, + !HasCall(_A("<< CUDA Kernel >>
vector_square_add(float *,float " + "*,float *,int)"), + _A("<< CUDA Device >>
square(float)"), "")); + REQUIRE_THAT(src, + !HasCall(_A("<< CUDA Kernel >>
vector_square_add(float *,float " + "*,float *,int)"), + _A("<< CUDA Device >>
add(float,float)"), "")); + + save_mermaid(config.output_directory(), diagram->name + ".mmd", src); + } +} \ No newline at end of file diff --git a/tests/test_cases.cc b/tests/test_cases.cc index 983dbccd..080737f3 100644 --- a/tests/test_cases.cc +++ b/tests/test_cases.cc @@ -477,6 +477,7 @@ using namespace clanguml::test::matchers; #include "t20048/test_case.h" #include "t20049/test_case.h" #include "t20050/test_case.h" +#include "t20051/test_case.h" /// /// Package diagram tests