Added support for CUDA calls in sequence diagrams (#263)

This commit is contained in:
Bartek Kryza
2024-05-01 18:18:23 +02:00
parent dfb4f38ded
commit 67363013fe
25 changed files with 543 additions and 46 deletions

View File

@@ -154,7 +154,20 @@ add_subdirectory(src)
# #
option(BUILD_TESTS "" ON) option(BUILD_TESTS "" ON)
option(ENABLE_CXX_MODULES_TEST_CASES "" OFF) option(ENABLE_CXX_MODULES_TEST_CASES "" OFF)
option(ENABLE_CUDA_TEST_CASES "" OFF)
#
# Setup CUDA if available
#
if(ENABLE_CUDA_TEST_CASES)
include(CheckLanguage)
check_language(CUDA)
if(DEFINED CMAKE_CUDA_COMPILER)
set(ENABLE_CUDA_TEST_CASES ON)
endif(DEFINED CMAKE_CUDA_COMPILER)
endif(ENABLE_CUDA_TEST_CASES)
if(BUILD_TESTS) if(BUILD_TESTS)
enable_testing() enable_testing()
add_subdirectory(tests) add_subdirectory(tests)
endif(BUILD_TESTS) endif(BUILD_TESTS)

View File

@@ -39,6 +39,7 @@ CMAKE_EXE_LINKER_FLAGS ?=
CMAKE_GENERATOR ?= Unix Makefiles CMAKE_GENERATOR ?= Unix Makefiles
ENABLE_CXX_MODULES_TEST_CASES ?= OFF ENABLE_CXX_MODULES_TEST_CASES ?= OFF
ENABLE_CUDA_TEST_CASES ?= OFF
GIT_VERSION ?= $(shell git describe --tags --always --abbrev=7) GIT_VERSION ?= $(shell git describe --tags --always --abbrev=7)
PKG_VERSION ?= $(shell git describe --tags --always --abbrev=7 | tr - .) PKG_VERSION ?= $(shell git describe --tags --always --abbrev=7 | tr - .)
@@ -63,6 +64,7 @@ debug/CMakeLists.txt:
-DLLVM_CONFIG_PATH=${LLVM_CONFIG_PATH} \ -DLLVM_CONFIG_PATH=${LLVM_CONFIG_PATH} \
-DLINK_LLVM_SHARED=${LLVM_SHARED} \ -DLINK_LLVM_SHARED=${LLVM_SHARED} \
-DCMAKE_PREFIX=${CMAKE_PREFIX} \ -DCMAKE_PREFIX=${CMAKE_PREFIX} \
-DENABLE_CUDA_TEST_CASES=$(ENABLE_CUDA_TEST_CASES) \
-DENABLE_CXX_MODULES_TEST_CASES=$(ENABLE_CXX_MODULES_TEST_CASES) -DENABLE_CXX_MODULES_TEST_CASES=$(ENABLE_CXX_MODULES_TEST_CASES)
release/CMakeLists.txt: release/CMakeLists.txt:
@@ -77,6 +79,7 @@ release/CMakeLists.txt:
-DLLVM_CONFIG_PATH=${LLVM_CONFIG_PATH} \ -DLLVM_CONFIG_PATH=${LLVM_CONFIG_PATH} \
-DLINK_LLVM_SHARED=${LLVM_SHARED} \ -DLINK_LLVM_SHARED=${LLVM_SHARED} \
-DCMAKE_PREFIX=${CMAKE_PREFIX} \ -DCMAKE_PREFIX=${CMAKE_PREFIX} \
-DENABLE_CUDA_TEST_CASES=$(ENABLE_CUDA_TEST_CASES) \
-DENABLE_CXX_MODULES_TEST_CASES=$(ENABLE_CXX_MODULES_TEST_CASES) -DENABLE_CXX_MODULES_TEST_CASES=$(ENABLE_CXX_MODULES_TEST_CASES)
debug_tidy/CMakeLists.txt: debug_tidy/CMakeLists.txt:
@@ -92,6 +95,7 @@ debug_tidy/CMakeLists.txt:
-DLLVM_CONFIG_PATH=${LLVM_CONFIG_PATH} \ -DLLVM_CONFIG_PATH=${LLVM_CONFIG_PATH} \
-DLINK_LLVM_SHARED=${LLVM_SHARED} \ -DLINK_LLVM_SHARED=${LLVM_SHARED} \
-DCMAKE_PREFIX=${CMAKE_PREFIX} \ -DCMAKE_PREFIX=${CMAKE_PREFIX} \
-DENABLE_CUDA_TEST_CASES=$(ENABLE_CUDA_TEST_CASES) \
-DENABLE_CXX_MODULES_TEST_CASES=$(ENABLE_CXX_MODULES_TEST_CASES) -DENABLE_CXX_MODULES_TEST_CASES=$(ENABLE_CXX_MODULES_TEST_CASES)
debug: debug/CMakeLists.txt debug: debug/CMakeLists.txt

View File

@@ -52,6 +52,7 @@ Main features supported so far include:
* Handling of template code including constexpr conditionals - [_example_](docs/test_cases/t20018.md) * Handling of template code including constexpr conditionals - [_example_](docs/test_cases/t20018.md)
* Handling of lambda expressions - [_example_](docs/test_cases/t20012.md) * Handling of lambda expressions - [_example_](docs/test_cases/t20012.md)
* Interactive links to online code to classes and call expressions - [_example_](https://raw.githubusercontent.com/bkryza/clang-uml/master/docs/test_cases/t20021_sequence.svg) * Interactive links to online code to classes and call expressions - [_example_](https://raw.githubusercontent.com/bkryza/clang-uml/master/docs/test_cases/t20021_sequence.svg)
* Support for CUDA Kernel and CUDA Device function calls - [_example_](docs/test_cases/t20050.md)
* **Package diagram generation** * **Package diagram generation**
* Generation of package diagram based on C++ namespaces - [_example_](docs/test_cases/t30001.md) * Generation of package diagram based on C++ namespaces - [_example_](docs/test_cases/t30001.md)
* Generation of package diagram based on subdirectories - [_example_](docs/test_cases/t30010.md) * Generation of package diagram based on subdirectories - [_example_](docs/test_cases/t30010.md)

View File

@@ -944,4 +944,13 @@ bool is_struct(const clang::NamedDecl *decl)
return false; return false;
} }
bool has_attr(const clang::FunctionDecl *decl, clang::attr::Kind function_attr)
{
for (const auto &attr : decl->attrs()) {
if (attr->getKind() == function_attr)
return true;
}
return false;
}
} // namespace clanguml::common } // namespace clanguml::common

View File

@@ -315,4 +315,13 @@ bool is_coroutine(const clang::FunctionDecl &decl);
*/ */
bool is_struct(const clang::NamedDecl *decl); bool is_struct(const clang::NamedDecl *decl);
/**
* Check if function declaration contains specified attributed
*
* @param decl Function declaration
* @param function_attr Clang function attribute
* @return True, if decl contains specified function attribute
*/
bool has_attr(const clang::FunctionDecl *decl, clang::attr::Kind function_attr);
} // namespace clanguml::common } // namespace clanguml::common

View File

@@ -39,6 +39,14 @@ void to_json(nlohmann::json &j, const participant &c)
if (c.type_name() == "method") { if (c.type_name() == "method") {
j["name"] = dynamic_cast<const method &>(c).method_name(); j["name"] = dynamic_cast<const method &>(c).method_name();
} }
if (c.type_name() == "function" || c.type_name() == "function_template") {
const auto &f = dynamic_cast<const function &>(c);
if (f.is_cuda_kernel())
j["is_cuda_kernel"] = true;
if (f.is_cuda_device())
j["is_cuda_device"] = true;
}
} }
void to_json(nlohmann::json &j, const activity &c) void to_json(nlohmann::json &j, const activity &c)

View File

@@ -119,12 +119,23 @@ void generator::generate_call(const message &m, std::ostream &ostr) const
} }
else if (config().combine_free_functions_into_file_participants()) { else if (config().combine_free_functions_into_file_participants()) {
if (to.value().type_name() == "function") { if (to.value().type_name() == "function") {
message = dynamic_cast<const model::function &>(to.value()) const auto &f = dynamic_cast<const model::function &>(to.value());
.message_name(render_mode);
message = f.message_name(render_mode);
if (f.is_cuda_kernel())
message = fmt::format("<< CUDA Kernel >><br>{}", message);
else if (f.is_cuda_device())
message = fmt::format("<< CUDA Device >><br>{}", message);
} }
else if (to.value().type_name() == "function_template") { else if (to.value().type_name() == "function_template") {
message = dynamic_cast<const model::function_template &>(to.value()) const auto &f = dynamic_cast<const model::function &>(to.value());
.message_name(render_mode); message = f.message_name(render_mode);
if (f.is_cuda_kernel())
message = fmt::format("<< CUDA Kernel >><br>{}", message);
else if (f.is_cuda_device())
message = fmt::format("<< CUDA Device >><br>{}", message);
} }
} }
@@ -397,11 +408,10 @@ void generator::generate_participant(
config().combine_free_functions_into_file_participants()) { config().combine_free_functions_into_file_participants()) {
// Create a single participant for all functions declared in a // Create a single participant for all functions declared in a
// single file // single file
const auto &file_path = const auto &f =
model() model().get_participant<model::function>(participant_id).value();
.get_participant<model::function>(participant_id)
.value() const auto &file_path = f.file();
.file();
assert(!file_path.empty()); assert(!file_path.empty());
@@ -427,8 +437,22 @@ void generator::generate_participant(
config().simplify_template_type(participant.full_name(false))); config().simplify_template_type(participant.full_name(false)));
common::ensure_lambda_type_is_relative(config(), participant_name); common::ensure_lambda_type_is_relative(config(), participant_name);
ostr << indent(1) << "participant " << participant.alias() << " as " ostr << indent(1) << "participant " << participant.alias() << " as ";
<< render_participant_name(participant_name);
if (participant.type_name() == "function" ||
participant.type_name() == "function_template") {
const auto &f =
model()
.get_participant<model::function>(participant_id)
.value();
if (f.is_cuda_kernel())
ostr << "<< CUDA Kernel >><br>";
else if (f.is_cuda_device())
ostr << "<< CUDA Device >><br>";
}
ostr << render_participant_name(participant_name);
ostr << '\n'; ostr << '\n';
generated_participants_.emplace(participant_id); generated_participants_.emplace(participant_id);

View File

@@ -69,12 +69,22 @@ void generator::generate_call(const message &m, std::ostream &ostr) const
} }
else if (config().combine_free_functions_into_file_participants()) { else if (config().combine_free_functions_into_file_participants()) {
if (to.value().type_name() == "function") { if (to.value().type_name() == "function") {
message = dynamic_cast<const model::function &>(to.value()) const auto &f = dynamic_cast<const model::function &>(to.value());
.message_name(render_mode); message = f.message_name(render_mode);
if (f.is_cuda_kernel())
message = fmt::format("<< CUDA Kernel >>\\n{}", message);
else if (f.is_cuda_device())
message = fmt::format("<< CUDA Device >>\\n{}", message);
} }
else if (to.value().type_name() == "function_template") { else if (to.value().type_name() == "function_template") {
message = dynamic_cast<const model::function_template &>(to.value()) const auto &f = dynamic_cast<const model::function &>(to.value());
.message_name(render_mode); message = f.message_name(render_mode);
if (f.is_cuda_kernel())
message = fmt::format("<< CUDA Kernel >>\\n{}", message);
else if (f.is_cuda_device())
message = fmt::format("<< CUDA Device >>\\n{}", message);
} }
} }
@@ -432,6 +442,15 @@ void generator::generate_participant(
ostr << "participant \"" << render_name(participant_name) << "\" as " ostr << "participant \"" << render_name(participant_name) << "\" as "
<< participant.alias(); << participant.alias();
if (const auto *function_ptr =
dynamic_cast<const model::function *>(&participant);
function_ptr) {
if (function_ptr->is_cuda_kernel())
ostr << " << CUDA Kernel >>";
else if (function_ptr->is_cuda_device())
ostr << " << CUDA Device >>";
}
if (config().generate_links) { if (config().generate_links) {
common_generator<diagram_config, diagram_model>::generate_link( common_generator<diagram_config, diagram_model>::generate_link(
ostr, participant); ostr, participant);

View File

@@ -150,6 +150,14 @@ bool function::is_operator() const { return is_operator_; }
void function::is_operator(bool o) { is_operator_ = o; } void function::is_operator(bool o) { is_operator_ = o; }
bool function::is_cuda_kernel() const { return is_cuda_kernel_; }
void function::is_cuda_kernel(bool c) { is_cuda_kernel_ = c; }
bool function::is_cuda_device() const { return is_cuda_device_; }
void function::is_cuda_device(bool c) { is_cuda_device_ = c; }
void function::return_type(const std::string &rt) { return_type_ = rt; } void function::return_type(const std::string &rt) { return_type_ = rt; }
const std::string &function::return_type() const { return return_type_; } const std::string &function::return_type() const { return return_type_; }

View File

@@ -303,6 +303,34 @@ struct function : public participant {
*/ */
void is_operator(bool o); void is_operator(bool o);
/**
* @brief Check, if a functions is a call to CUDA Kernel
*
* @return True, if the method is a CUDA kernel call
*/
bool is_cuda_kernel() const;
/**
* @brief Set whether the method is a CUDA kernel call
*
* @param v True, if the method is a CUDA kernel call
*/
void is_cuda_kernel(bool c);
/**
* @brief Check, if a functions is a call to CUDA device
*
* @return True, if the method is a CUDA device call
*/
bool is_cuda_device() const;
/**
* @brief Set whether the method is a CUDA device call
*
* @param v True, if the method is a CUDA device call
*/
void is_cuda_device(bool c);
/** /**
* @brief Set functions return type * @brief Set functions return type
* *
@@ -339,6 +367,8 @@ private:
bool is_void_{false}; bool is_void_{false};
bool is_static_{false}; bool is_static_{false};
bool is_operator_{false}; bool is_operator_{false};
bool is_cuda_kernel_{false};
bool is_cuda_device_{false};
std::string return_type_; std::string return_type_;
std::vector<std::string> parameters_; std::vector<std::string> parameters_;
}; };

View File

@@ -360,6 +360,12 @@ bool translation_unit_visitor::VisitFunctionDecl(
function_model_ptr->is_operator(declaration->isOverloadedOperator()); function_model_ptr->is_operator(declaration->isOverloadedOperator());
function_model_ptr->is_cuda_kernel(
common::has_attr(declaration, clang::attr::CUDAGlobal));
function_model_ptr->is_cuda_device(
common::has_attr(declaration, clang::attr::CUDADevice));
context().update(declaration); context().update(declaration);
context().set_caller_id(function_model_ptr->id()); context().set_caller_id(function_model_ptr->id());
@@ -531,6 +537,29 @@ bool translation_unit_visitor::TraverseCallExpr(clang::CallExpr *expr)
return true; return true;
} }
bool translation_unit_visitor::TraverseCUDAKernelCallExpr(
clang::CUDAKernelCallExpr *expr)
{
if (source_manager().isInSystemHeader(expr->getSourceRange().getBegin()))
return true;
LOG_TRACE("Entering CUDA kernel call expression at {}",
expr->getBeginLoc().printToString(source_manager()));
context().enter_callexpr(expr);
RecursiveASTVisitor<translation_unit_visitor>::TraverseCallExpr(expr);
LOG_TRACE("Leaving CUDA kernel call expression at {}",
expr->getBeginLoc().printToString(source_manager()));
context().leave_callexpr();
pop_message_to_diagram(expr);
return true;
}
bool translation_unit_visitor::TraverseCXXMemberCallExpr( bool translation_unit_visitor::TraverseCXXMemberCallExpr(
clang::CXXMemberCallExpr *expr) clang::CXXMemberCallExpr *expr)
{ {
@@ -1067,6 +1096,15 @@ bool translation_unit_visitor::VisitCallExpr(clang::CallExpr *expr)
"Message for this call expression is taken from comment directive"); "Message for this call expression is taken from comment directive");
} }
// //
// Call to a CUDA kernel function
//
else if (const auto *cuda_call_expr =
clang::dyn_cast_or_null<clang::CUDAKernelCallExpr>(expr);
cuda_call_expr != nullptr) {
if (!process_cuda_kernel_call_expression(m, cuda_call_expr))
return true;
}
//
// Call to an overloaded operator // Call to an overloaded operator
// //
else if (const auto *operator_call_expr = else if (const auto *operator_call_expr =
@@ -1250,6 +1288,43 @@ bool translation_unit_visitor::VisitCXXConstructExpr(
return true; return true;
} }
bool translation_unit_visitor::process_cuda_kernel_call_expression(
model::message &m, const clang::CUDAKernelCallExpr *expr)
{
const auto *callee_decl = expr->getCalleeDecl();
if (callee_decl == nullptr)
return false;
const auto *callee_function = callee_decl->getAsFunction();
if (callee_function == nullptr)
return false;
if (!should_include(callee_function))
return false;
// Skip free functions declared in files outside of included paths
if (config().combine_free_functions_into_file_participants() &&
!diagram().should_include(common::model::source_file{m.file()}))
return false;
auto callee_name = callee_function->getQualifiedNameAsString() + "()";
const auto maybe_id = get_unique_id(callee_function->getID());
if (!maybe_id.has_value()) {
// This is hopefully not an interesting call...
m.set_to(callee_function->getID());
}
else {
m.set_to(maybe_id.value());
}
m.set_message_name(callee_name.substr(0, callee_name.size() - 2));
return true;
}
bool translation_unit_visitor::process_operator_call_expression( bool translation_unit_visitor::process_operator_call_expression(
model::message &m, const clang::CXXOperatorCallExpr *operator_call_expr) model::message &m, const clang::CXXOperatorCallExpr *operator_call_expr)
{ {
@@ -1469,8 +1544,6 @@ bool translation_unit_visitor::process_function_call_expression(
auto callee_name = callee_function->getQualifiedNameAsString() + "()"; auto callee_name = callee_function->getQualifiedNameAsString() + "()";
std::unique_ptr<model::function_template> f_ptr;
const auto maybe_id = get_unique_id(callee_function->getID()); const auto maybe_id = get_unique_id(callee_function->getID());
if (!maybe_id.has_value()) { if (!maybe_id.has_value()) {
// This is hopefully not an interesting call... // This is hopefully not an interesting call...
@@ -1482,9 +1555,6 @@ bool translation_unit_visitor::process_function_call_expression(
m.set_message_name(callee_name.substr(0, callee_name.size() - 2)); m.set_message_name(callee_name.substr(0, callee_name.size() - 2));
if (f_ptr)
diagram().add_participant(std::move(f_ptr));
return true; return true;
} }

View File

@@ -77,9 +77,11 @@ public:
bool VisitCallExpr(clang::CallExpr *expr); bool VisitCallExpr(clang::CallExpr *expr);
bool TraverseVarDecl(clang::VarDecl *VD);
bool TraverseCallExpr(clang::CallExpr *expr); bool TraverseCallExpr(clang::CallExpr *expr);
bool TraverseVarDecl(clang::VarDecl *VD); bool TraverseCUDAKernelCallExpr(clang::CUDAKernelCallExpr *expr);
bool TraverseCXXMemberCallExpr(clang::CXXMemberCallExpr *expr); bool TraverseCXXMemberCallExpr(clang::CXXMemberCallExpr *expr);
@@ -395,6 +397,9 @@ private:
bool process_operator_call_expression(model::message &m, bool process_operator_call_expression(model::message &m,
const clang::CXXOperatorCallExpr *operator_call_expr); const clang::CXXOperatorCallExpr *operator_call_expr);
bool process_cuda_kernel_call_expression(
model::message &m, const clang::CUDAKernelCallExpr *cuda_call_expr);
/** /**
* @brief Handle a class method call expresion * @brief Handle a class method call expresion
* *

View File

@@ -9,13 +9,18 @@ file(GLOB_RECURSE TEST_CONFIG_YMLS test_config_data/*.yml
set(TEST_CASES_REQUIRING_CXX20 t00056 t00058 t00059 t00065 t00069) set(TEST_CASES_REQUIRING_CXX20 t00056 t00058 t00059 t00065 t00069)
set(TEST_CASES_REQUIRING_CXX20_MODULES t00070 t00071 t00072 set(TEST_CASES_REQUIRING_CXX20_MODULES t00070 t00071 t00072
t30012 t30013 t30014 t30015) t30012 t30013 t30014 t30015)
set(TEST_CASES_REQUIRING_CUDA t20049 t20050)
if(ENABLE_CXX_MODULES_TEST_CASES) if(ENABLE_CXX_MODULES_TEST_CASES)
message(STATUS "Enabling C++ modules test cases")
foreach(CXX20_MOD_TC ${TEST_CASES_REQUIRING_CXX20_MODULES}) foreach(CXX20_MOD_TC ${TEST_CASES_REQUIRING_CXX20_MODULES})
list(APPEND TEST_CASES_REQUIRING_CXX20 ${CXX20_MOD_TC}) list(APPEND TEST_CASES_REQUIRING_CXX20 ${CXX20_MOD_TC})
endforeach() endforeach()
set(CMAKE_CXX_SCAN_FOR_MODULES ON) set(CMAKE_CXX_SCAN_FOR_MODULES ON)
else() else()
message(STATUS "Disabling C++ modules test cases")
foreach(CXX20_MOD_TC ${TEST_CASES_REQUIRING_CXX20_MODULES}) foreach(CXX20_MOD_TC ${TEST_CASES_REQUIRING_CXX20_MODULES})
list(FILTER TEST_CASE_SOURCES list(FILTER TEST_CASE_SOURCES
EXCLUDE EXCLUDE
@@ -23,10 +28,23 @@ else()
list(FILTER TEST_CASE_CONFIGS list(FILTER TEST_CASE_CONFIGS
EXCLUDE EXCLUDE
REGEX ".*${CXX20_MOD_TC}.*") REGEX ".*${CXX20_MOD_TC}.*")
endforeach() endforeach()
endif(ENABLE_CXX_MODULES_TEST_CASES) endif(ENABLE_CXX_MODULES_TEST_CASES)
if(NOT ENABLE_CUDA_TEST_CASES)
message(STATUS "Enabling CUDA test cases")
foreach(CUDA_TC ${TEST_CASES_REQUIRING_CUDA})
list(FILTER TEST_CASE_SOURCES
EXCLUDE
REGEX ".*${CUDA_TC}.*")
list(FILTER TEST_CASE_CONFIGS
EXCLUDE
REGEX ".*${CUDA_TC}.*")
endforeach()
else()
message(STATUS "Enabling CUDA test cases")
endif(NOT ENABLE_CUDA_TEST_CASES)
set(CLANG_UML_TEST_LIBRARIES set(CLANG_UML_TEST_LIBRARIES
clang-umllib clang-umllib
${YAML_CPP_LIBRARIES} ${YAML_CPP_LIBRARIES}
@@ -39,7 +57,6 @@ endif(MSVC)
list(FIND CMAKE_CXX_COMPILE_FEATURES cxx_std_20 SUPPORTS_CXX_STD_20) list(FIND CMAKE_CXX_COMPILE_FEATURES cxx_std_20 SUPPORTS_CXX_STD_20)
message(STATUS "Enabling C++20 test cases")
# Remove test cases which require C++20 if they are not supported here # Remove test cases which require C++20 if they are not supported here
if(SUPPORTS_CXX_STD_20 EQUAL -1 if(SUPPORTS_CXX_STD_20 EQUAL -1
@@ -53,8 +70,10 @@ if(SUPPORTS_CXX_STD_20 EQUAL -1
EXCLUDE EXCLUDE
REGEX ".*${CXX20_TC}.*") REGEX ".*${CXX20_TC}.*")
endforeach() endforeach()
message(STATUS "Disabling C++20 test cases")
else() else()
set(ENABLE_CXX_STD_20_TEST_CASES 1) set(ENABLE_CXX_STD_20_TEST_CASES 1)
message(STATUS "Enabling C++20 test cases")
endif() endif()
if(APPLE) if(APPLE)

11
tests/t20049/.clang-uml Normal file
View File

@@ -0,0 +1,11 @@
diagrams:
t20049_sequence:
type: sequence
glob:
- t20049.cu
include:
namespaces:
- clanguml::t20049
using_namespace: clanguml::t20049
from:
- function: "clanguml::t20049::tmain()"

36
tests/t20049/t20049.cu Normal file
View File

@@ -0,0 +1,36 @@
#include "t20049.cuh"
namespace clanguml {
namespace t20049 {
constexpr unsigned long N{1000};
__device__ float square(float a) { return a * a; }
__global__ void vector_square_add(float *out, float *a, float *b, int n)
{
for (int i = 0; i < n; i++) {
out[i] = add(square(a[i]), square(b[i]));
}
}
int tmain()
{
float *a, *b, *out;
a = (float *)malloc(sizeof(float) * N);
b = (float *)malloc(sizeof(float) * N);
out = (float *)malloc(sizeof(float) * N);
for (int i = 0; i < N; i++) {
a[i] = 1.0f;
b[i] = 2.0f;
}
vector_square_add<<<1, 1>>>(out, a, b, N);
return 0;
}
}
}

11
tests/t20049/t20049.cuh Normal file
View File

@@ -0,0 +1,11 @@
namespace clanguml {
namespace t20049 {
template <typename T> __device__ T add(T a, T b) { return a + b; }
__device__ float square(float a);
__global__ void vector_add(float *out, float *a, float *b, int n);
}
}

81
tests/t20049/test_case.h Normal file
View File

@@ -0,0 +1,81 @@
/**
* tests/t20049/test_case.h
*
* Copyright (c) 2021-2024 Bartek Kryza <bkryza@gmail.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
TEST_CASE("t20049", "[test-case][sequence]")
{
auto [config, db] = load_config("t20049");
auto diagram = config.diagrams["t20049_sequence"];
REQUIRE(diagram->name == "t20049_sequence");
auto model = generate_sequence_diagram(*db, diagram);
REQUIRE(model->name() == "t20049_sequence");
{
auto src = generate_sequence_puml(diagram, *model);
AliasMatcher _A(src);
REQUIRE_THAT(src, StartsWith("@startuml"));
REQUIRE_THAT(src, EndsWith("@enduml\n"));
REQUIRE_THAT(src,
HasCall(_A("tmain()"),
_A("vector_square_add(float *,float *,float *,int)"), ""));
REQUIRE_THAT(src,
HasCall(_A("vector_square_add(float *,float *,float *,int)"),
_A("square(float)"), ""));
REQUIRE_THAT(src,
HasCall(_A("vector_square_add(float *,float *,float *,int)"),
_A("add<float>(float,float)"), ""));
save_puml(config.output_directory(), diagram->name + ".puml", src);
}
{
auto j = generate_sequence_json(diagram, *model);
using namespace json;
save_json(config.output_directory(), diagram->name + ".json", j);
}
{
auto src = generate_sequence_mermaid(diagram, *model);
mermaid::SequenceDiagramAliasMatcher _A(src);
using mermaid::HasCall;
REQUIRE_THAT(src,
HasCall(_A("tmain()"),
_A("<< CUDA Kernel >><br>vector_square_add(float *,float "
"*,float *,int)"),
""));
REQUIRE_THAT(src,
HasCall(_A("<< CUDA Kernel >><br>vector_square_add(float *,float "
"*,float *,int)"),
_A("<< CUDA Device >><br>square(float)"), ""));
REQUIRE_THAT(src,
HasCall(_A("<< CUDA Kernel >><br>vector_square_add(float *,float "
"*,float *,int)"),
_A("<< CUDA Device >><br>add<float>(float,float)"), ""));
save_mermaid(config.output_directory(), diagram->name + ".mmd", src);
}
}

12
tests/t20050/.clang-uml Normal file
View File

@@ -0,0 +1,12 @@
diagrams:
t20050_sequence:
type: sequence
glob:
- t20050.cu
include:
namespaces:
- clanguml::t20050
using_namespace: clanguml::t20050
combine_free_functions_into_file_participants: true
from:
- function: "clanguml::t20050::tmain()"

38
tests/t20050/t20050.cu Normal file
View File

@@ -0,0 +1,38 @@
#include "t20050.cuh"
namespace clanguml {
namespace t20050 {
constexpr unsigned long N{1000};
template <typename T> __device__ T add(T a, T b) { return a + b; }
__device__ float square(float a) { return a * a; }
__global__ void vector_square_add(float *out, float *a, float *b, int n)
{
for (int i = 0; i < n; i++) {
out[i] = add(square(a[i]), square(b[i]));
}
}
int tmain()
{
float *a, *b, *out;
a = (float *)malloc(sizeof(float) * N);
b = (float *)malloc(sizeof(float) * N);
out = (float *)malloc(sizeof(float) * N);
for (int i = 0; i < N; i++) {
a[i] = 1.0f;
b[i] = 2.0f;
}
vector_square_add<<<1, 1>>>(out, a, b, N);
return 0;
}
}
}

9
tests/t20050/t20050.cuh Normal file
View File

@@ -0,0 +1,9 @@
namespace clanguml {
namespace t20050 {
__device__ float square(float a);
__global__ void vector_add(float *out, float *a, float *b, int n);
}
}

80
tests/t20050/test_case.h Normal file
View File

@@ -0,0 +1,80 @@
/**
* tests/t20050/test_case.h
*
* Copyright (c) 2021-2024 Bartek Kryza <bkryza@gmail.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
TEST_CASE("t20050", "[test-case][sequence]")
{
auto [config, db] = load_config("t20050");
auto diagram = config.diagrams["t20050_sequence"];
REQUIRE(diagram->name == "t20050_sequence");
auto model = generate_sequence_diagram(*db, diagram);
REQUIRE(model->name() == "t20050_sequence");
{
auto src = generate_sequence_puml(diagram, *model);
AliasMatcher _A(src);
REQUIRE_THAT(src, StartsWith("@startuml"));
REQUIRE_THAT(src, EndsWith("@enduml\n"));
// Check if all calls exist
REQUIRE_THAT(src,
HasCall(_A("t20050.cu"), _A("t20050.cu"),
"<< CUDA Kernel >>\\\\nvector_square_add(float *,float *,float "
"*,int)"));
REQUIRE_THAT(src,
HasCall(_A("t20050.cu"), _A("t20050.cu"),
"<< CUDA Device >>\\\\nsquare(float)"));
REQUIRE_THAT(src,
HasCall(_A("t20050.cu"), _A("t20050.cu"),
"<< CUDA Device >>\\\\nadd<float>(float,float)"));
save_puml(config.output_directory(), diagram->name + ".puml", src);
}
{
auto j = generate_sequence_json(diagram, *model);
using namespace json;
save_json(config.output_directory(), diagram->name + ".json", j);
}
{
auto src = generate_sequence_mermaid(diagram, *model);
mermaid::SequenceDiagramAliasMatcher _A(src);
using mermaid::HasCall;
REQUIRE_THAT(src,
HasCall(_A("t20050.cu"), _A("t20050.cu"),
"<< CUDA Kernel >><br>vector_square_add(float *,float *,float "
"*,int)"));
REQUIRE_THAT(src,
HasCall(_A("t20050.cu"), _A("t20050.cu"),
"<< CUDA Device >><br>square(float)"));
REQUIRE_THAT(src,
HasCall(_A("t20050.cu"), _A("t20050.cu"),
"<< CUDA Device >><br>add<float>(float,float)"));
save_mermaid(config.output_directory(), diagram->name + ".mmd", src);
}
}

View File

@@ -475,6 +475,8 @@ using namespace clanguml::test::matchers;
#include "t20046/test_case.h" #include "t20046/test_case.h"
#include "t20047/test_case.h" #include "t20047/test_case.h"
#include "t20048/test_case.h" #include "t20048/test_case.h"
#include "t20049/test_case.h"
#include "t20050/test_case.h"
/// ///
/// Package diagram tests /// Package diagram tests

View File

@@ -169,17 +169,11 @@ public:
bool match(T const &in) const override bool match(T const &in) const override
{ {
std::istringstream fin(in);
std::string line;
std::regex r{m_is_response ? response_pattern : call_pattern}; std::regex r{m_is_response ? response_pattern : call_pattern};
std::smatch base_match;
while (std::getline(fin, line)) { std::regex_search(in, base_match, r);
std::smatch base_match; if (base_match.size() > 0)
std::regex_search(in, base_match, r); return true;
if (base_match.size() > 0)
return true;
}
return false; return false;
} }
@@ -255,17 +249,11 @@ public:
bool match(T const &in) const override bool match(T const &in) const override
{ {
std::istringstream fin(in);
std::string line;
std::regex r{m_is_response ? response_pattern : call_pattern}; std::regex r{m_is_response ? response_pattern : call_pattern};
std::smatch base_match;
while (std::getline(fin, line)) { std::regex_search(in, base_match, r);
std::smatch base_match; if (base_match.size() > 0)
std::regex_search(in, base_match, r); return true;
if (base_match.size() > 0)
return true;
}
return false; return false;
} }

View File

@@ -364,6 +364,15 @@ test_cases:
- name: t20047 - name: t20047
title: Test case for 'call' comment directive title: Test case for 'call' comment directive
description: description:
- name: t20048
title: Test case for message comments
description:
- name: t20049
title: Test case for CUDA kernel calls
description:
- name: t20050
title: Test case for CUDA kernel calls with participants combined to file
description:
Package diagrams: Package diagrams:
- name: t30001 - name: t30001
title: Basic package diagram test case title: Basic package diagram test case

View File

@@ -60,7 +60,8 @@ with open(r'tests/test_cases.yaml') as f:
tc.write("## Source code\n") tc.write("## Source code\n")
for root, dirs, files in os.walk(f'tests/{name}/'): for root, dirs, files in os.walk(f'tests/{name}/'):
for source_file in files: for source_file in files:
if source_file.endswith((".h", ".cc", ".c", ".cppm")): if source_file.endswith((
".h", ".cc", ".c", ".cppm", ".cu", ".cuh")):
if source_file == "test_case.h": if source_file == "test_case.h":
continue continue
file_path = os.path.join(root, source_file) file_path = os.path.join(root, source_file)