Added test case for cuda_kernel and cuda_device callee types in callee_type filter (#263)

This commit is contained in:
Bartek Kryza
2024-05-01 19:01:25 +02:00
parent 67363013fe
commit b574a41a64
12 changed files with 173 additions and 2 deletions

View File

@@ -258,6 +258,8 @@ The following callee types are supported:
* `function` * `function`
* `function_template` * `function_template`
* `lambda` * `lambda`
* `cuda_kernel`
* `cuda_device`
## dependants and dependencies ## dependants and dependencies

View File

@@ -455,6 +455,16 @@ tvl::value_t callee_filter::match(
return dynamic_cast<const function *>(p) != nullptr; return dynamic_cast<const function *>(p) != nullptr;
}; };
auto is_cuda_kernel = [](const participant *p) {
const auto *f = dynamic_cast<const function *>(p);
return (f != nullptr) && (f->is_cuda_kernel());
};
auto is_cuda_device = [](const participant *p) {
const auto *f = dynamic_cast<const function *>(p);
return (f != nullptr) && (f->is_cuda_device());
};
switch (ct) { switch (ct) {
case config::callee_type::method: case config::callee_type::method:
return p.type_name() == "method"; return p.type_name() == "method";
@@ -477,6 +487,10 @@ tvl::value_t callee_filter::match(
return p.type_name() == "function_template"; return p.type_name() == "function_template";
case config::callee_type::lambda: case config::callee_type::lambda:
return p.type_name() == "method" && is_lambda((method &)p); return p.type_name() == "method" && is_lambda((method &)p);
case config::callee_type::cuda_kernel:
return is_cuda_kernel(&p);
case config::callee_type::cuda_device:
return is_cuda_device(&p);
} }
return false; return false;

View File

@@ -106,6 +106,10 @@ std::string to_string(callee_type mt)
return "function_template"; return "function_template";
case callee_type::lambda: case callee_type::lambda:
return "lambda"; return "lambda";
case callee_type::cuda_kernel:
return "cuda_kernel";
case callee_type::cuda_device:
return "cuda_device";
} }
assert(false); assert(false);

View File

@@ -78,7 +78,9 @@ enum class callee_type {
method, method,
function, function,
function_template, function_template,
lambda lambda,
cuda_kernel,
cuda_device
}; };
std::string to_string(callee_type mt); std::string to_string(callee_type mt);

View File

@@ -112,6 +112,8 @@ types:
- function - function
- function_template - function_template
- lambda - lambda
- cuda_kernel
- cuda_device
context_filter_match_t: context_filter_match_t:
match: match:
radius: int radius: int

View File

@@ -312,6 +312,10 @@ template <> struct convert<callee_type> {
rhs = callee_type::method; rhs = callee_type::method;
else if (val == to_string(callee_type::lambda)) else if (val == to_string(callee_type::lambda))
rhs = callee_type::lambda; rhs = callee_type::lambda;
else if (val == to_string(callee_type::cuda_kernel))
rhs = callee_type::cuda_kernel;
else if (val == to_string(callee_type::cuda_device))
rhs = callee_type::cuda_device;
else else
return false; return false;

View File

@@ -9,7 +9,7 @@ file(GLOB_RECURSE TEST_CONFIG_YMLS test_config_data/*.yml
set(TEST_CASES_REQUIRING_CXX20 t00056 t00058 t00059 t00065 t00069) set(TEST_CASES_REQUIRING_CXX20 t00056 t00058 t00059 t00065 t00069)
set(TEST_CASES_REQUIRING_CXX20_MODULES t00070 t00071 t00072 set(TEST_CASES_REQUIRING_CXX20_MODULES t00070 t00071 t00072
t30012 t30013 t30014 t30015) t30012 t30013 t30014 t30015)
set(TEST_CASES_REQUIRING_CUDA t20049 t20050) set(TEST_CASES_REQUIRING_CUDA t20049 t20050 t20051)
if(ENABLE_CXX_MODULES_TEST_CASES) if(ENABLE_CXX_MODULES_TEST_CASES)
message(STATUS "Enabling C++ modules test cases") message(STATUS "Enabling C++ modules test cases")

14
tests/t20051/.clang-uml Normal file
View File

@@ -0,0 +1,14 @@
diagrams:
t20051_sequence:
type: sequence
glob:
- t20051.cu
include:
namespaces:
- clanguml::t20051
exclude:
callee_types:
- cuda_device
using_namespace: clanguml::t20051
from:
- function: "clanguml::t20051::tmain()"

38
tests/t20051/t20051.cu Normal file
View File

@@ -0,0 +1,38 @@
#include "t20051.cuh"
namespace clanguml {
namespace t20051 {
constexpr unsigned long N{1000};
template <typename T> __device__ T add(T a, T b) { return a + b; }
__device__ float square(float a) { return a * a; }
__global__ void vector_square_add(float *out, float *a, float *b, int n)
{
for (int i = 0; i < n; i++) {
out[i] = add(square(a[i]), square(b[i]));
}
}
int tmain()
{
float *a, *b, *out;
a = (float *)malloc(sizeof(float) * N);
b = (float *)malloc(sizeof(float) * N);
out = (float *)malloc(sizeof(float) * N);
for (int i = 0; i < N; i++) {
a[i] = 1.0f;
b[i] = 2.0f;
}
vector_square_add<<<1, 1>>>(out, a, b, N);
return 0;
}
}
}

9
tests/t20051/t20051.cuh Normal file
View File

@@ -0,0 +1,9 @@
namespace clanguml {
namespace t20051 {
__device__ float square(float a);
__global__ void vector_add(float *out, float *a, float *b, int n);
}
}

81
tests/t20051/test_case.h Normal file
View File

@@ -0,0 +1,81 @@
/**
* tests/t20051/test_case.h
*
* Copyright (c) 2021-2024 Bartek Kryza <bkryza@gmail.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
TEST_CASE("t20051", "[test-case][sequence]")
{
auto [config, db] = load_config("t20051");
auto diagram = config.diagrams["t20051_sequence"];
REQUIRE(diagram->name == "t20051_sequence");
auto model = generate_sequence_diagram(*db, diagram);
REQUIRE(model->name() == "t20051_sequence");
{
auto src = generate_sequence_puml(diagram, *model);
AliasMatcher _A(src);
REQUIRE_THAT(src, StartsWith("@startuml"));
REQUIRE_THAT(src, EndsWith("@enduml\n"));
REQUIRE_THAT(src,
HasCall(_A("tmain()"),
_A("vector_square_add(float *,float *,float *,int)"), ""));
REQUIRE_THAT(src,
!HasCall(_A("vector_square_add(float *,float *,float *,int)"),
_A("square(float)"), ""));
REQUIRE_THAT(src,
!HasCall(_A("vector_square_add(float *,float *,float *,int)"),
_A("add<float>(float,float)"), ""));
save_puml(config.output_directory(), diagram->name + ".puml", src);
}
{
auto j = generate_sequence_json(diagram, *model);
using namespace json;
save_json(config.output_directory(), diagram->name + ".json", j);
}
{
auto src = generate_sequence_mermaid(diagram, *model);
mermaid::SequenceDiagramAliasMatcher _A(src);
using mermaid::HasCall;
REQUIRE_THAT(src,
HasCall(_A("tmain()"),
_A("<< CUDA Kernel >><br>vector_square_add(float *,float "
"*,float *,int)"),
""));
REQUIRE_THAT(src,
!HasCall(_A("<< CUDA Kernel >><br>vector_square_add(float *,float "
"*,float *,int)"),
_A("<< CUDA Device >><br>square(float)"), ""));
REQUIRE_THAT(src,
!HasCall(_A("<< CUDA Kernel >><br>vector_square_add(float *,float "
"*,float *,int)"),
_A("<< CUDA Device >><br>add<float>(float,float)"), ""));
save_mermaid(config.output_directory(), diagram->name + ".mmd", src);
}
}

View File

@@ -477,6 +477,7 @@ using namespace clanguml::test::matchers;
#include "t20048/test_case.h" #include "t20048/test_case.h"
#include "t20049/test_case.h" #include "t20049/test_case.h"
#include "t20050/test_case.h" #include "t20050/test_case.h"
#include "t20051/test_case.h"
/// ///
/// Package diagram tests /// Package diagram tests