From 67363013fee465322b4700f9f96f2199bc1c2af9 Mon Sep 17 00:00:00 2001 From: Bartek Kryza Date: Wed, 1 May 2024 18:18:23 +0200 Subject: [PATCH] Added support for CUDA calls in sequence diagrams (#263) --- CMakeLists.txt | 17 +++- Makefile | 4 + README.md | 1 + src/common/clang_utils.cc | 9 +++ src/common/clang_utils.h | 9 +++ .../json/sequence_diagram_generator.cc | 8 ++ .../mermaid/sequence_diagram_generator.cc | 46 ++++++++--- .../plantuml/sequence_diagram_generator.cc | 27 ++++++- src/sequence_diagram/model/participant.cc | 8 ++ src/sequence_diagram/model/participant.h | 30 +++++++ .../visitor/translation_unit_visitor.cc | 80 ++++++++++++++++-- .../visitor/translation_unit_visitor.h | 7 +- tests/CMakeLists.txt | 23 +++++- tests/t20049/.clang-uml | 11 +++ tests/t20049/t20049.cu | 36 +++++++++ tests/t20049/t20049.cuh | 11 +++ tests/t20049/test_case.h | 81 +++++++++++++++++++ tests/t20050/.clang-uml | 12 +++ tests/t20050/t20050.cu | 38 +++++++++ tests/t20050/t20050.cuh | 9 +++ tests/t20050/test_case.h | 80 ++++++++++++++++++ tests/test_cases.cc | 2 + tests/test_cases.h | 28 ++----- tests/test_cases.yaml | 9 +++ util/generate_test_cases_docs.py | 3 +- 25 files changed, 543 insertions(+), 46 deletions(-) create mode 100644 tests/t20049/.clang-uml create mode 100644 tests/t20049/t20049.cu create mode 100644 tests/t20049/t20049.cuh create mode 100644 tests/t20049/test_case.h create mode 100644 tests/t20050/.clang-uml create mode 100644 tests/t20050/t20050.cu create mode 100644 tests/t20050/t20050.cuh create mode 100644 tests/t20050/test_case.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 7721ff83..6941ccbe 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -154,7 +154,20 @@ add_subdirectory(src) # option(BUILD_TESTS "" ON) option(ENABLE_CXX_MODULES_TEST_CASES "" OFF) +option(ENABLE_CUDA_TEST_CASES "" OFF) + +# +# Setup CUDA if available +# +if(ENABLE_CUDA_TEST_CASES) + include(CheckLanguage) + check_language(CUDA) + if(DEFINED CMAKE_CUDA_COMPILER) + set(ENABLE_CUDA_TEST_CASES ON) + endif(DEFINED CMAKE_CUDA_COMPILER) +endif(ENABLE_CUDA_TEST_CASES) + if(BUILD_TESTS) - enable_testing() - add_subdirectory(tests) + enable_testing() + add_subdirectory(tests) endif(BUILD_TESTS) diff --git a/Makefile b/Makefile index 7c296dba..577b6153 100644 --- a/Makefile +++ b/Makefile @@ -39,6 +39,7 @@ CMAKE_EXE_LINKER_FLAGS ?= CMAKE_GENERATOR ?= Unix Makefiles ENABLE_CXX_MODULES_TEST_CASES ?= OFF +ENABLE_CUDA_TEST_CASES ?= OFF GIT_VERSION ?= $(shell git describe --tags --always --abbrev=7) PKG_VERSION ?= $(shell git describe --tags --always --abbrev=7 | tr - .) @@ -63,6 +64,7 @@ debug/CMakeLists.txt: -DLLVM_CONFIG_PATH=${LLVM_CONFIG_PATH} \ -DLINK_LLVM_SHARED=${LLVM_SHARED} \ -DCMAKE_PREFIX=${CMAKE_PREFIX} \ + -DENABLE_CUDA_TEST_CASES=$(ENABLE_CUDA_TEST_CASES) \ -DENABLE_CXX_MODULES_TEST_CASES=$(ENABLE_CXX_MODULES_TEST_CASES) release/CMakeLists.txt: @@ -77,6 +79,7 @@ release/CMakeLists.txt: -DLLVM_CONFIG_PATH=${LLVM_CONFIG_PATH} \ -DLINK_LLVM_SHARED=${LLVM_SHARED} \ -DCMAKE_PREFIX=${CMAKE_PREFIX} \ + -DENABLE_CUDA_TEST_CASES=$(ENABLE_CUDA_TEST_CASES) \ -DENABLE_CXX_MODULES_TEST_CASES=$(ENABLE_CXX_MODULES_TEST_CASES) debug_tidy/CMakeLists.txt: @@ -92,6 +95,7 @@ debug_tidy/CMakeLists.txt: -DLLVM_CONFIG_PATH=${LLVM_CONFIG_PATH} \ -DLINK_LLVM_SHARED=${LLVM_SHARED} \ -DCMAKE_PREFIX=${CMAKE_PREFIX} \ + -DENABLE_CUDA_TEST_CASES=$(ENABLE_CUDA_TEST_CASES) \ -DENABLE_CXX_MODULES_TEST_CASES=$(ENABLE_CXX_MODULES_TEST_CASES) debug: debug/CMakeLists.txt diff --git a/README.md b/README.md index c6b697ae..b9378391 100644 --- a/README.md +++ b/README.md @@ -52,6 +52,7 @@ Main features supported so far include: * Handling of template code including constexpr conditionals - [_example_](docs/test_cases/t20018.md) * Handling of lambda expressions - [_example_](docs/test_cases/t20012.md) * Interactive links to online code to classes and call expressions - [_example_](https://raw.githubusercontent.com/bkryza/clang-uml/master/docs/test_cases/t20021_sequence.svg) + * Support for CUDA Kernel and CUDA Device function calls - [_example_](docs/test_cases/t20050.md) * **Package diagram generation** * Generation of package diagram based on C++ namespaces - [_example_](docs/test_cases/t30001.md) * Generation of package diagram based on subdirectories - [_example_](docs/test_cases/t30010.md) diff --git a/src/common/clang_utils.cc b/src/common/clang_utils.cc index 8ce1a351..addaf544 100644 --- a/src/common/clang_utils.cc +++ b/src/common/clang_utils.cc @@ -944,4 +944,13 @@ bool is_struct(const clang::NamedDecl *decl) return false; } +bool has_attr(const clang::FunctionDecl *decl, clang::attr::Kind function_attr) +{ + for (const auto &attr : decl->attrs()) { + if (attr->getKind() == function_attr) + return true; + } + + return false; +} } // namespace clanguml::common diff --git a/src/common/clang_utils.h b/src/common/clang_utils.h index 11e39488..e5bf04cf 100644 --- a/src/common/clang_utils.h +++ b/src/common/clang_utils.h @@ -315,4 +315,13 @@ bool is_coroutine(const clang::FunctionDecl &decl); */ bool is_struct(const clang::NamedDecl *decl); +/** + * Check if function declaration contains specified attributed + * + * @param decl Function declaration + * @param function_attr Clang function attribute + * @return True, if decl contains specified function attribute + */ +bool has_attr(const clang::FunctionDecl *decl, clang::attr::Kind function_attr); + } // namespace clanguml::common diff --git a/src/sequence_diagram/generators/json/sequence_diagram_generator.cc b/src/sequence_diagram/generators/json/sequence_diagram_generator.cc index 1aeef809..3c0bdfec 100644 --- a/src/sequence_diagram/generators/json/sequence_diagram_generator.cc +++ b/src/sequence_diagram/generators/json/sequence_diagram_generator.cc @@ -39,6 +39,14 @@ void to_json(nlohmann::json &j, const participant &c) if (c.type_name() == "method") { j["name"] = dynamic_cast(c).method_name(); } + + if (c.type_name() == "function" || c.type_name() == "function_template") { + const auto &f = dynamic_cast(c); + if (f.is_cuda_kernel()) + j["is_cuda_kernel"] = true; + if (f.is_cuda_device()) + j["is_cuda_device"] = true; + } } void to_json(nlohmann::json &j, const activity &c) diff --git a/src/sequence_diagram/generators/mermaid/sequence_diagram_generator.cc b/src/sequence_diagram/generators/mermaid/sequence_diagram_generator.cc index ce3d47bf..41bf9390 100644 --- a/src/sequence_diagram/generators/mermaid/sequence_diagram_generator.cc +++ b/src/sequence_diagram/generators/mermaid/sequence_diagram_generator.cc @@ -119,12 +119,23 @@ void generator::generate_call(const message &m, std::ostream &ostr) const } else if (config().combine_free_functions_into_file_participants()) { if (to.value().type_name() == "function") { - message = dynamic_cast(to.value()) - .message_name(render_mode); + const auto &f = dynamic_cast(to.value()); + + message = f.message_name(render_mode); + + if (f.is_cuda_kernel()) + message = fmt::format("<< CUDA Kernel >>
{}", message); + else if (f.is_cuda_device()) + message = fmt::format("<< CUDA Device >>
{}", message); } else if (to.value().type_name() == "function_template") { - message = dynamic_cast(to.value()) - .message_name(render_mode); + const auto &f = dynamic_cast(to.value()); + message = f.message_name(render_mode); + + if (f.is_cuda_kernel()) + message = fmt::format("<< CUDA Kernel >>
{}", message); + else if (f.is_cuda_device()) + message = fmt::format("<< CUDA Device >>
{}", message); } } @@ -397,11 +408,10 @@ void generator::generate_participant( config().combine_free_functions_into_file_participants()) { // Create a single participant for all functions declared in a // single file - const auto &file_path = - model() - .get_participant(participant_id) - .value() - .file(); + const auto &f = + model().get_participant(participant_id).value(); + + const auto &file_path = f.file(); assert(!file_path.empty()); @@ -427,8 +437,22 @@ void generator::generate_participant( config().simplify_template_type(participant.full_name(false))); common::ensure_lambda_type_is_relative(config(), participant_name); - ostr << indent(1) << "participant " << participant.alias() << " as " - << render_participant_name(participant_name); + ostr << indent(1) << "participant " << participant.alias() << " as "; + + if (participant.type_name() == "function" || + participant.type_name() == "function_template") { + const auto &f = + model() + .get_participant(participant_id) + .value(); + + if (f.is_cuda_kernel()) + ostr << "<< CUDA Kernel >>
"; + else if (f.is_cuda_device()) + ostr << "<< CUDA Device >>
"; + } + + ostr << render_participant_name(participant_name); ostr << '\n'; generated_participants_.emplace(participant_id); diff --git a/src/sequence_diagram/generators/plantuml/sequence_diagram_generator.cc b/src/sequence_diagram/generators/plantuml/sequence_diagram_generator.cc index 520abf9f..7134e891 100644 --- a/src/sequence_diagram/generators/plantuml/sequence_diagram_generator.cc +++ b/src/sequence_diagram/generators/plantuml/sequence_diagram_generator.cc @@ -69,12 +69,22 @@ void generator::generate_call(const message &m, std::ostream &ostr) const } else if (config().combine_free_functions_into_file_participants()) { if (to.value().type_name() == "function") { - message = dynamic_cast(to.value()) - .message_name(render_mode); + const auto &f = dynamic_cast(to.value()); + message = f.message_name(render_mode); + + if (f.is_cuda_kernel()) + message = fmt::format("<< CUDA Kernel >>\\n{}", message); + else if (f.is_cuda_device()) + message = fmt::format("<< CUDA Device >>\\n{}", message); } else if (to.value().type_name() == "function_template") { - message = dynamic_cast(to.value()) - .message_name(render_mode); + const auto &f = dynamic_cast(to.value()); + message = f.message_name(render_mode); + + if (f.is_cuda_kernel()) + message = fmt::format("<< CUDA Kernel >>\\n{}", message); + else if (f.is_cuda_device()) + message = fmt::format("<< CUDA Device >>\\n{}", message); } } @@ -432,6 +442,15 @@ void generator::generate_participant( ostr << "participant \"" << render_name(participant_name) << "\" as " << participant.alias(); + if (const auto *function_ptr = + dynamic_cast(&participant); + function_ptr) { + if (function_ptr->is_cuda_kernel()) + ostr << " << CUDA Kernel >>"; + else if (function_ptr->is_cuda_device()) + ostr << " << CUDA Device >>"; + } + if (config().generate_links) { common_generator::generate_link( ostr, participant); diff --git a/src/sequence_diagram/model/participant.cc b/src/sequence_diagram/model/participant.cc index 6e393c7a..cf183edb 100644 --- a/src/sequence_diagram/model/participant.cc +++ b/src/sequence_diagram/model/participant.cc @@ -150,6 +150,14 @@ bool function::is_operator() const { return is_operator_; } void function::is_operator(bool o) { is_operator_ = o; } +bool function::is_cuda_kernel() const { return is_cuda_kernel_; } + +void function::is_cuda_kernel(bool c) { is_cuda_kernel_ = c; } + +bool function::is_cuda_device() const { return is_cuda_device_; } + +void function::is_cuda_device(bool c) { is_cuda_device_ = c; } + void function::return_type(const std::string &rt) { return_type_ = rt; } const std::string &function::return_type() const { return return_type_; } diff --git a/src/sequence_diagram/model/participant.h b/src/sequence_diagram/model/participant.h index 68eaeade..92de35c8 100644 --- a/src/sequence_diagram/model/participant.h +++ b/src/sequence_diagram/model/participant.h @@ -303,6 +303,34 @@ struct function : public participant { */ void is_operator(bool o); + /** + * @brief Check, if a functions is a call to CUDA Kernel + * + * @return True, if the method is a CUDA kernel call + */ + bool is_cuda_kernel() const; + + /** + * @brief Set whether the method is a CUDA kernel call + * + * @param v True, if the method is a CUDA kernel call + */ + void is_cuda_kernel(bool c); + + /** + * @brief Check, if a functions is a call to CUDA device + * + * @return True, if the method is a CUDA device call + */ + bool is_cuda_device() const; + + /** + * @brief Set whether the method is a CUDA device call + * + * @param v True, if the method is a CUDA device call + */ + void is_cuda_device(bool c); + /** * @brief Set functions return type * @@ -339,6 +367,8 @@ private: bool is_void_{false}; bool is_static_{false}; bool is_operator_{false}; + bool is_cuda_kernel_{false}; + bool is_cuda_device_{false}; std::string return_type_; std::vector parameters_; }; diff --git a/src/sequence_diagram/visitor/translation_unit_visitor.cc b/src/sequence_diagram/visitor/translation_unit_visitor.cc index 14b5c8e1..af8454be 100644 --- a/src/sequence_diagram/visitor/translation_unit_visitor.cc +++ b/src/sequence_diagram/visitor/translation_unit_visitor.cc @@ -360,6 +360,12 @@ bool translation_unit_visitor::VisitFunctionDecl( function_model_ptr->is_operator(declaration->isOverloadedOperator()); + function_model_ptr->is_cuda_kernel( + common::has_attr(declaration, clang::attr::CUDAGlobal)); + + function_model_ptr->is_cuda_device( + common::has_attr(declaration, clang::attr::CUDADevice)); + context().update(declaration); context().set_caller_id(function_model_ptr->id()); @@ -531,6 +537,29 @@ bool translation_unit_visitor::TraverseCallExpr(clang::CallExpr *expr) return true; } +bool translation_unit_visitor::TraverseCUDAKernelCallExpr( + clang::CUDAKernelCallExpr *expr) +{ + if (source_manager().isInSystemHeader(expr->getSourceRange().getBegin())) + return true; + + LOG_TRACE("Entering CUDA kernel call expression at {}", + expr->getBeginLoc().printToString(source_manager())); + + context().enter_callexpr(expr); + + RecursiveASTVisitor::TraverseCallExpr(expr); + + LOG_TRACE("Leaving CUDA kernel call expression at {}", + expr->getBeginLoc().printToString(source_manager())); + + context().leave_callexpr(); + + pop_message_to_diagram(expr); + + return true; +} + bool translation_unit_visitor::TraverseCXXMemberCallExpr( clang::CXXMemberCallExpr *expr) { @@ -1067,6 +1096,15 @@ bool translation_unit_visitor::VisitCallExpr(clang::CallExpr *expr) "Message for this call expression is taken from comment directive"); } // + // Call to a CUDA kernel function + // + else if (const auto *cuda_call_expr = + clang::dyn_cast_or_null(expr); + cuda_call_expr != nullptr) { + if (!process_cuda_kernel_call_expression(m, cuda_call_expr)) + return true; + } + // // Call to an overloaded operator // else if (const auto *operator_call_expr = @@ -1250,6 +1288,43 @@ bool translation_unit_visitor::VisitCXXConstructExpr( return true; } +bool translation_unit_visitor::process_cuda_kernel_call_expression( + model::message &m, const clang::CUDAKernelCallExpr *expr) +{ + const auto *callee_decl = expr->getCalleeDecl(); + + if (callee_decl == nullptr) + return false; + + const auto *callee_function = callee_decl->getAsFunction(); + + if (callee_function == nullptr) + return false; + + if (!should_include(callee_function)) + return false; + + // Skip free functions declared in files outside of included paths + if (config().combine_free_functions_into_file_participants() && + !diagram().should_include(common::model::source_file{m.file()})) + return false; + + auto callee_name = callee_function->getQualifiedNameAsString() + "()"; + + const auto maybe_id = get_unique_id(callee_function->getID()); + if (!maybe_id.has_value()) { + // This is hopefully not an interesting call... + m.set_to(callee_function->getID()); + } + else { + m.set_to(maybe_id.value()); + } + + m.set_message_name(callee_name.substr(0, callee_name.size() - 2)); + + return true; +} + bool translation_unit_visitor::process_operator_call_expression( model::message &m, const clang::CXXOperatorCallExpr *operator_call_expr) { @@ -1469,8 +1544,6 @@ bool translation_unit_visitor::process_function_call_expression( auto callee_name = callee_function->getQualifiedNameAsString() + "()"; - std::unique_ptr f_ptr; - const auto maybe_id = get_unique_id(callee_function->getID()); if (!maybe_id.has_value()) { // This is hopefully not an interesting call... @@ -1482,9 +1555,6 @@ bool translation_unit_visitor::process_function_call_expression( m.set_message_name(callee_name.substr(0, callee_name.size() - 2)); - if (f_ptr) - diagram().add_participant(std::move(f_ptr)); - return true; } diff --git a/src/sequence_diagram/visitor/translation_unit_visitor.h b/src/sequence_diagram/visitor/translation_unit_visitor.h index 98944838..7678fb98 100644 --- a/src/sequence_diagram/visitor/translation_unit_visitor.h +++ b/src/sequence_diagram/visitor/translation_unit_visitor.h @@ -77,9 +77,11 @@ public: bool VisitCallExpr(clang::CallExpr *expr); + bool TraverseVarDecl(clang::VarDecl *VD); + bool TraverseCallExpr(clang::CallExpr *expr); - bool TraverseVarDecl(clang::VarDecl *VD); + bool TraverseCUDAKernelCallExpr(clang::CUDAKernelCallExpr *expr); bool TraverseCXXMemberCallExpr(clang::CXXMemberCallExpr *expr); @@ -395,6 +397,9 @@ private: bool process_operator_call_expression(model::message &m, const clang::CXXOperatorCallExpr *operator_call_expr); + bool process_cuda_kernel_call_expression( + model::message &m, const clang::CUDAKernelCallExpr *cuda_call_expr); + /** * @brief Handle a class method call expresion * diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index ffc38e33..3ab3cfce 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -9,13 +9,18 @@ file(GLOB_RECURSE TEST_CONFIG_YMLS test_config_data/*.yml set(TEST_CASES_REQUIRING_CXX20 t00056 t00058 t00059 t00065 t00069) set(TEST_CASES_REQUIRING_CXX20_MODULES t00070 t00071 t00072 t30012 t30013 t30014 t30015) +set(TEST_CASES_REQUIRING_CUDA t20049 t20050) if(ENABLE_CXX_MODULES_TEST_CASES) + message(STATUS "Enabling C++ modules test cases") + foreach(CXX20_MOD_TC ${TEST_CASES_REQUIRING_CXX20_MODULES}) list(APPEND TEST_CASES_REQUIRING_CXX20 ${CXX20_MOD_TC}) endforeach() set(CMAKE_CXX_SCAN_FOR_MODULES ON) else() + message(STATUS "Disabling C++ modules test cases") + foreach(CXX20_MOD_TC ${TEST_CASES_REQUIRING_CXX20_MODULES}) list(FILTER TEST_CASE_SOURCES EXCLUDE @@ -23,10 +28,23 @@ else() list(FILTER TEST_CASE_CONFIGS EXCLUDE REGEX ".*${CXX20_MOD_TC}.*") - endforeach() endif(ENABLE_CXX_MODULES_TEST_CASES) +if(NOT ENABLE_CUDA_TEST_CASES) + message(STATUS "Enabling CUDA test cases") + foreach(CUDA_TC ${TEST_CASES_REQUIRING_CUDA}) + list(FILTER TEST_CASE_SOURCES + EXCLUDE + REGEX ".*${CUDA_TC}.*") + list(FILTER TEST_CASE_CONFIGS + EXCLUDE + REGEX ".*${CUDA_TC}.*") + endforeach() +else() + message(STATUS "Enabling CUDA test cases") +endif(NOT ENABLE_CUDA_TEST_CASES) + set(CLANG_UML_TEST_LIBRARIES clang-umllib ${YAML_CPP_LIBRARIES} @@ -39,7 +57,6 @@ endif(MSVC) list(FIND CMAKE_CXX_COMPILE_FEATURES cxx_std_20 SUPPORTS_CXX_STD_20) -message(STATUS "Enabling C++20 test cases") # Remove test cases which require C++20 if they are not supported here if(SUPPORTS_CXX_STD_20 EQUAL -1 @@ -53,8 +70,10 @@ if(SUPPORTS_CXX_STD_20 EQUAL -1 EXCLUDE REGEX ".*${CXX20_TC}.*") endforeach() + message(STATUS "Disabling C++20 test cases") else() set(ENABLE_CXX_STD_20_TEST_CASES 1) + message(STATUS "Enabling C++20 test cases") endif() if(APPLE) diff --git a/tests/t20049/.clang-uml b/tests/t20049/.clang-uml new file mode 100644 index 00000000..2061996e --- /dev/null +++ b/tests/t20049/.clang-uml @@ -0,0 +1,11 @@ +diagrams: + t20049_sequence: + type: sequence + glob: + - t20049.cu + include: + namespaces: + - clanguml::t20049 + using_namespace: clanguml::t20049 + from: + - function: "clanguml::t20049::tmain()" diff --git a/tests/t20049/t20049.cu b/tests/t20049/t20049.cu new file mode 100644 index 00000000..5a2106d9 --- /dev/null +++ b/tests/t20049/t20049.cu @@ -0,0 +1,36 @@ +#include "t20049.cuh" + +namespace clanguml { +namespace t20049 { + +constexpr unsigned long N{1000}; + +__device__ float square(float a) { return a * a; } + +__global__ void vector_square_add(float *out, float *a, float *b, int n) +{ + for (int i = 0; i < n; i++) { + out[i] = add(square(a[i]), square(b[i])); + } +} + +int tmain() +{ + float *a, *b, *out; + + a = (float *)malloc(sizeof(float) * N); + b = (float *)malloc(sizeof(float) * N); + out = (float *)malloc(sizeof(float) * N); + + for (int i = 0; i < N; i++) { + a[i] = 1.0f; + b[i] = 2.0f; + } + + vector_square_add<<<1, 1>>>(out, a, b, N); + + return 0; +} + +} +} \ No newline at end of file diff --git a/tests/t20049/t20049.cuh b/tests/t20049/t20049.cuh new file mode 100644 index 00000000..d006e517 --- /dev/null +++ b/tests/t20049/t20049.cuh @@ -0,0 +1,11 @@ +namespace clanguml { +namespace t20049 { + +template __device__ T add(T a, T b) { return a + b; } + +__device__ float square(float a); + +__global__ void vector_add(float *out, float *a, float *b, int n); + +} +} \ No newline at end of file diff --git a/tests/t20049/test_case.h b/tests/t20049/test_case.h new file mode 100644 index 00000000..f73722b9 --- /dev/null +++ b/tests/t20049/test_case.h @@ -0,0 +1,81 @@ +/** + * tests/t20049/test_case.h + * + * Copyright (c) 2021-2024 Bartek Kryza + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +TEST_CASE("t20049", "[test-case][sequence]") +{ + auto [config, db] = load_config("t20049"); + + auto diagram = config.diagrams["t20049_sequence"]; + + REQUIRE(diagram->name == "t20049_sequence"); + + auto model = generate_sequence_diagram(*db, diagram); + + REQUIRE(model->name() == "t20049_sequence"); + + { + auto src = generate_sequence_puml(diagram, *model); + AliasMatcher _A(src); + + REQUIRE_THAT(src, StartsWith("@startuml")); + REQUIRE_THAT(src, EndsWith("@enduml\n")); + + REQUIRE_THAT(src, + HasCall(_A("tmain()"), + _A("vector_square_add(float *,float *,float *,int)"), "")); + REQUIRE_THAT(src, + HasCall(_A("vector_square_add(float *,float *,float *,int)"), + _A("square(float)"), "")); + REQUIRE_THAT(src, + HasCall(_A("vector_square_add(float *,float *,float *,int)"), + _A("add(float,float)"), "")); + + save_puml(config.output_directory(), diagram->name + ".puml", src); + } + + { + auto j = generate_sequence_json(diagram, *model); + + using namespace json; + + save_json(config.output_directory(), diagram->name + ".json", j); + } + + { + auto src = generate_sequence_mermaid(diagram, *model); + + mermaid::SequenceDiagramAliasMatcher _A(src); + using mermaid::HasCall; + + REQUIRE_THAT(src, + HasCall(_A("tmain()"), + _A("<< CUDA Kernel >>
vector_square_add(float *,float " + "*,float *,int)"), + "")); + REQUIRE_THAT(src, + HasCall(_A("<< CUDA Kernel >>
vector_square_add(float *,float " + "*,float *,int)"), + _A("<< CUDA Device >>
square(float)"), "")); + REQUIRE_THAT(src, + HasCall(_A("<< CUDA Kernel >>
vector_square_add(float *,float " + "*,float *,int)"), + _A("<< CUDA Device >>
add(float,float)"), "")); + + save_mermaid(config.output_directory(), diagram->name + ".mmd", src); + } +} \ No newline at end of file diff --git a/tests/t20050/.clang-uml b/tests/t20050/.clang-uml new file mode 100644 index 00000000..8195bcc1 --- /dev/null +++ b/tests/t20050/.clang-uml @@ -0,0 +1,12 @@ +diagrams: + t20050_sequence: + type: sequence + glob: + - t20050.cu + include: + namespaces: + - clanguml::t20050 + using_namespace: clanguml::t20050 + combine_free_functions_into_file_participants: true + from: + - function: "clanguml::t20050::tmain()" \ No newline at end of file diff --git a/tests/t20050/t20050.cu b/tests/t20050/t20050.cu new file mode 100644 index 00000000..83f147cb --- /dev/null +++ b/tests/t20050/t20050.cu @@ -0,0 +1,38 @@ +#include "t20050.cuh" + +namespace clanguml { +namespace t20050 { + +constexpr unsigned long N{1000}; + +template __device__ T add(T a, T b) { return a + b; } + +__device__ float square(float a) { return a * a; } + +__global__ void vector_square_add(float *out, float *a, float *b, int n) +{ + for (int i = 0; i < n; i++) { + out[i] = add(square(a[i]), square(b[i])); + } +} + +int tmain() +{ + float *a, *b, *out; + + a = (float *)malloc(sizeof(float) * N); + b = (float *)malloc(sizeof(float) * N); + out = (float *)malloc(sizeof(float) * N); + + for (int i = 0; i < N; i++) { + a[i] = 1.0f; + b[i] = 2.0f; + } + + vector_square_add<<<1, 1>>>(out, a, b, N); + + return 0; +} + +} +} \ No newline at end of file diff --git a/tests/t20050/t20050.cuh b/tests/t20050/t20050.cuh new file mode 100644 index 00000000..cafe8ca4 --- /dev/null +++ b/tests/t20050/t20050.cuh @@ -0,0 +1,9 @@ +namespace clanguml { +namespace t20050 { + +__device__ float square(float a); + +__global__ void vector_add(float *out, float *a, float *b, int n); + +} +} \ No newline at end of file diff --git a/tests/t20050/test_case.h b/tests/t20050/test_case.h new file mode 100644 index 00000000..935f3e8a --- /dev/null +++ b/tests/t20050/test_case.h @@ -0,0 +1,80 @@ +/** + * tests/t20050/test_case.h + * + * Copyright (c) 2021-2024 Bartek Kryza + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +TEST_CASE("t20050", "[test-case][sequence]") +{ + auto [config, db] = load_config("t20050"); + + auto diagram = config.diagrams["t20050_sequence"]; + + REQUIRE(diagram->name == "t20050_sequence"); + + auto model = generate_sequence_diagram(*db, diagram); + + REQUIRE(model->name() == "t20050_sequence"); + + { + auto src = generate_sequence_puml(diagram, *model); + AliasMatcher _A(src); + + REQUIRE_THAT(src, StartsWith("@startuml")); + REQUIRE_THAT(src, EndsWith("@enduml\n")); + + // Check if all calls exist + REQUIRE_THAT(src, + HasCall(_A("t20050.cu"), _A("t20050.cu"), + "<< CUDA Kernel >>\\\\nvector_square_add(float *,float *,float " + "*,int)")); + REQUIRE_THAT(src, + HasCall(_A("t20050.cu"), _A("t20050.cu"), + "<< CUDA Device >>\\\\nsquare(float)")); + REQUIRE_THAT(src, + HasCall(_A("t20050.cu"), _A("t20050.cu"), + "<< CUDA Device >>\\\\nadd(float,float)")); + + save_puml(config.output_directory(), diagram->name + ".puml", src); + } + + { + auto j = generate_sequence_json(diagram, *model); + + using namespace json; + + save_json(config.output_directory(), diagram->name + ".json", j); + } + + { + auto src = generate_sequence_mermaid(diagram, *model); + + mermaid::SequenceDiagramAliasMatcher _A(src); + using mermaid::HasCall; + + REQUIRE_THAT(src, + HasCall(_A("t20050.cu"), _A("t20050.cu"), + "<< CUDA Kernel >>
vector_square_add(float *,float *,float " + "*,int)")); + REQUIRE_THAT(src, + HasCall(_A("t20050.cu"), _A("t20050.cu"), + "<< CUDA Device >>
square(float)")); + REQUIRE_THAT(src, + HasCall(_A("t20050.cu"), _A("t20050.cu"), + "<< CUDA Device >>
add(float,float)")); + + save_mermaid(config.output_directory(), diagram->name + ".mmd", src); + } +} \ No newline at end of file diff --git a/tests/test_cases.cc b/tests/test_cases.cc index 1d24eec7..983dbccd 100644 --- a/tests/test_cases.cc +++ b/tests/test_cases.cc @@ -475,6 +475,8 @@ using namespace clanguml::test::matchers; #include "t20046/test_case.h" #include "t20047/test_case.h" #include "t20048/test_case.h" +#include "t20049/test_case.h" +#include "t20050/test_case.h" /// /// Package diagram tests diff --git a/tests/test_cases.h b/tests/test_cases.h index 6cc3cb06..b8e61102 100644 --- a/tests/test_cases.h +++ b/tests/test_cases.h @@ -169,17 +169,11 @@ public: bool match(T const &in) const override { - std::istringstream fin(in); - std::string line; - std::regex r{m_is_response ? response_pattern : call_pattern}; - - while (std::getline(fin, line)) { - std::smatch base_match; - std::regex_search(in, base_match, r); - if (base_match.size() > 0) - return true; - } + std::smatch base_match; + std::regex_search(in, base_match, r); + if (base_match.size() > 0) + return true; return false; } @@ -255,17 +249,11 @@ public: bool match(T const &in) const override { - std::istringstream fin(in); - std::string line; - std::regex r{m_is_response ? response_pattern : call_pattern}; - - while (std::getline(fin, line)) { - std::smatch base_match; - std::regex_search(in, base_match, r); - if (base_match.size() > 0) - return true; - } + std::smatch base_match; + std::regex_search(in, base_match, r); + if (base_match.size() > 0) + return true; return false; } diff --git a/tests/test_cases.yaml b/tests/test_cases.yaml index a9f7954e..f2deb8ad 100644 --- a/tests/test_cases.yaml +++ b/tests/test_cases.yaml @@ -364,6 +364,15 @@ test_cases: - name: t20047 title: Test case for 'call' comment directive description: + - name: t20048 + title: Test case for message comments + description: + - name: t20049 + title: Test case for CUDA kernel calls + description: + - name: t20050 + title: Test case for CUDA kernel calls with participants combined to file + description: Package diagrams: - name: t30001 title: Basic package diagram test case diff --git a/util/generate_test_cases_docs.py b/util/generate_test_cases_docs.py index 03fcd97a..70fc4076 100755 --- a/util/generate_test_cases_docs.py +++ b/util/generate_test_cases_docs.py @@ -60,7 +60,8 @@ with open(r'tests/test_cases.yaml') as f: tc.write("## Source code\n") for root, dirs, files in os.walk(f'tests/{name}/'): for source_file in files: - if source_file.endswith((".h", ".cc", ".c", ".cppm")): + if source_file.endswith(( + ".h", ".cc", ".c", ".cppm", ".cu", ".cuh")): if source_file == "test_case.h": continue file_path = os.path.join(root, source_file)