Added support for CUDA calls in sequence diagrams (#263)
This commit is contained in:
@@ -944,4 +944,13 @@ bool is_struct(const clang::NamedDecl *decl)
|
||||
return false;
|
||||
}
|
||||
|
||||
bool has_attr(const clang::FunctionDecl *decl, clang::attr::Kind function_attr)
|
||||
{
|
||||
for (const auto &attr : decl->attrs()) {
|
||||
if (attr->getKind() == function_attr)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
} // namespace clanguml::common
|
||||
|
||||
@@ -315,4 +315,13 @@ bool is_coroutine(const clang::FunctionDecl &decl);
|
||||
*/
|
||||
bool is_struct(const clang::NamedDecl *decl);
|
||||
|
||||
/**
|
||||
* Check if function declaration contains specified attributed
|
||||
*
|
||||
* @param decl Function declaration
|
||||
* @param function_attr Clang function attribute
|
||||
* @return True, if decl contains specified function attribute
|
||||
*/
|
||||
bool has_attr(const clang::FunctionDecl *decl, clang::attr::Kind function_attr);
|
||||
|
||||
} // namespace clanguml::common
|
||||
|
||||
@@ -39,6 +39,14 @@ void to_json(nlohmann::json &j, const participant &c)
|
||||
if (c.type_name() == "method") {
|
||||
j["name"] = dynamic_cast<const method &>(c).method_name();
|
||||
}
|
||||
|
||||
if (c.type_name() == "function" || c.type_name() == "function_template") {
|
||||
const auto &f = dynamic_cast<const function &>(c);
|
||||
if (f.is_cuda_kernel())
|
||||
j["is_cuda_kernel"] = true;
|
||||
if (f.is_cuda_device())
|
||||
j["is_cuda_device"] = true;
|
||||
}
|
||||
}
|
||||
|
||||
void to_json(nlohmann::json &j, const activity &c)
|
||||
|
||||
@@ -119,12 +119,23 @@ void generator::generate_call(const message &m, std::ostream &ostr) const
|
||||
}
|
||||
else if (config().combine_free_functions_into_file_participants()) {
|
||||
if (to.value().type_name() == "function") {
|
||||
message = dynamic_cast<const model::function &>(to.value())
|
||||
.message_name(render_mode);
|
||||
const auto &f = dynamic_cast<const model::function &>(to.value());
|
||||
|
||||
message = f.message_name(render_mode);
|
||||
|
||||
if (f.is_cuda_kernel())
|
||||
message = fmt::format("<< CUDA Kernel >><br>{}", message);
|
||||
else if (f.is_cuda_device())
|
||||
message = fmt::format("<< CUDA Device >><br>{}", message);
|
||||
}
|
||||
else if (to.value().type_name() == "function_template") {
|
||||
message = dynamic_cast<const model::function_template &>(to.value())
|
||||
.message_name(render_mode);
|
||||
const auto &f = dynamic_cast<const model::function &>(to.value());
|
||||
message = f.message_name(render_mode);
|
||||
|
||||
if (f.is_cuda_kernel())
|
||||
message = fmt::format("<< CUDA Kernel >><br>{}", message);
|
||||
else if (f.is_cuda_device())
|
||||
message = fmt::format("<< CUDA Device >><br>{}", message);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -397,11 +408,10 @@ void generator::generate_participant(
|
||||
config().combine_free_functions_into_file_participants()) {
|
||||
// Create a single participant for all functions declared in a
|
||||
// single file
|
||||
const auto &file_path =
|
||||
model()
|
||||
.get_participant<model::function>(participant_id)
|
||||
.value()
|
||||
.file();
|
||||
const auto &f =
|
||||
model().get_participant<model::function>(participant_id).value();
|
||||
|
||||
const auto &file_path = f.file();
|
||||
|
||||
assert(!file_path.empty());
|
||||
|
||||
@@ -427,8 +437,22 @@ void generator::generate_participant(
|
||||
config().simplify_template_type(participant.full_name(false)));
|
||||
common::ensure_lambda_type_is_relative(config(), participant_name);
|
||||
|
||||
ostr << indent(1) << "participant " << participant.alias() << " as "
|
||||
<< render_participant_name(participant_name);
|
||||
ostr << indent(1) << "participant " << participant.alias() << " as ";
|
||||
|
||||
if (participant.type_name() == "function" ||
|
||||
participant.type_name() == "function_template") {
|
||||
const auto &f =
|
||||
model()
|
||||
.get_participant<model::function>(participant_id)
|
||||
.value();
|
||||
|
||||
if (f.is_cuda_kernel())
|
||||
ostr << "<< CUDA Kernel >><br>";
|
||||
else if (f.is_cuda_device())
|
||||
ostr << "<< CUDA Device >><br>";
|
||||
}
|
||||
|
||||
ostr << render_participant_name(participant_name);
|
||||
ostr << '\n';
|
||||
|
||||
generated_participants_.emplace(participant_id);
|
||||
|
||||
@@ -69,12 +69,22 @@ void generator::generate_call(const message &m, std::ostream &ostr) const
|
||||
}
|
||||
else if (config().combine_free_functions_into_file_participants()) {
|
||||
if (to.value().type_name() == "function") {
|
||||
message = dynamic_cast<const model::function &>(to.value())
|
||||
.message_name(render_mode);
|
||||
const auto &f = dynamic_cast<const model::function &>(to.value());
|
||||
message = f.message_name(render_mode);
|
||||
|
||||
if (f.is_cuda_kernel())
|
||||
message = fmt::format("<< CUDA Kernel >>\\n{}", message);
|
||||
else if (f.is_cuda_device())
|
||||
message = fmt::format("<< CUDA Device >>\\n{}", message);
|
||||
}
|
||||
else if (to.value().type_name() == "function_template") {
|
||||
message = dynamic_cast<const model::function_template &>(to.value())
|
||||
.message_name(render_mode);
|
||||
const auto &f = dynamic_cast<const model::function &>(to.value());
|
||||
message = f.message_name(render_mode);
|
||||
|
||||
if (f.is_cuda_kernel())
|
||||
message = fmt::format("<< CUDA Kernel >>\\n{}", message);
|
||||
else if (f.is_cuda_device())
|
||||
message = fmt::format("<< CUDA Device >>\\n{}", message);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -432,6 +442,15 @@ void generator::generate_participant(
|
||||
ostr << "participant \"" << render_name(participant_name) << "\" as "
|
||||
<< participant.alias();
|
||||
|
||||
if (const auto *function_ptr =
|
||||
dynamic_cast<const model::function *>(&participant);
|
||||
function_ptr) {
|
||||
if (function_ptr->is_cuda_kernel())
|
||||
ostr << " << CUDA Kernel >>";
|
||||
else if (function_ptr->is_cuda_device())
|
||||
ostr << " << CUDA Device >>";
|
||||
}
|
||||
|
||||
if (config().generate_links) {
|
||||
common_generator<diagram_config, diagram_model>::generate_link(
|
||||
ostr, participant);
|
||||
|
||||
@@ -150,6 +150,14 @@ bool function::is_operator() const { return is_operator_; }
|
||||
|
||||
void function::is_operator(bool o) { is_operator_ = o; }
|
||||
|
||||
bool function::is_cuda_kernel() const { return is_cuda_kernel_; }
|
||||
|
||||
void function::is_cuda_kernel(bool c) { is_cuda_kernel_ = c; }
|
||||
|
||||
bool function::is_cuda_device() const { return is_cuda_device_; }
|
||||
|
||||
void function::is_cuda_device(bool c) { is_cuda_device_ = c; }
|
||||
|
||||
void function::return_type(const std::string &rt) { return_type_ = rt; }
|
||||
|
||||
const std::string &function::return_type() const { return return_type_; }
|
||||
|
||||
@@ -303,6 +303,34 @@ struct function : public participant {
|
||||
*/
|
||||
void is_operator(bool o);
|
||||
|
||||
/**
|
||||
* @brief Check, if a functions is a call to CUDA Kernel
|
||||
*
|
||||
* @return True, if the method is a CUDA kernel call
|
||||
*/
|
||||
bool is_cuda_kernel() const;
|
||||
|
||||
/**
|
||||
* @brief Set whether the method is a CUDA kernel call
|
||||
*
|
||||
* @param v True, if the method is a CUDA kernel call
|
||||
*/
|
||||
void is_cuda_kernel(bool c);
|
||||
|
||||
/**
|
||||
* @brief Check, if a functions is a call to CUDA device
|
||||
*
|
||||
* @return True, if the method is a CUDA device call
|
||||
*/
|
||||
bool is_cuda_device() const;
|
||||
|
||||
/**
|
||||
* @brief Set whether the method is a CUDA device call
|
||||
*
|
||||
* @param v True, if the method is a CUDA device call
|
||||
*/
|
||||
void is_cuda_device(bool c);
|
||||
|
||||
/**
|
||||
* @brief Set functions return type
|
||||
*
|
||||
@@ -339,6 +367,8 @@ private:
|
||||
bool is_void_{false};
|
||||
bool is_static_{false};
|
||||
bool is_operator_{false};
|
||||
bool is_cuda_kernel_{false};
|
||||
bool is_cuda_device_{false};
|
||||
std::string return_type_;
|
||||
std::vector<std::string> parameters_;
|
||||
};
|
||||
|
||||
@@ -360,6 +360,12 @@ bool translation_unit_visitor::VisitFunctionDecl(
|
||||
|
||||
function_model_ptr->is_operator(declaration->isOverloadedOperator());
|
||||
|
||||
function_model_ptr->is_cuda_kernel(
|
||||
common::has_attr(declaration, clang::attr::CUDAGlobal));
|
||||
|
||||
function_model_ptr->is_cuda_device(
|
||||
common::has_attr(declaration, clang::attr::CUDADevice));
|
||||
|
||||
context().update(declaration);
|
||||
|
||||
context().set_caller_id(function_model_ptr->id());
|
||||
@@ -531,6 +537,29 @@ bool translation_unit_visitor::TraverseCallExpr(clang::CallExpr *expr)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool translation_unit_visitor::TraverseCUDAKernelCallExpr(
|
||||
clang::CUDAKernelCallExpr *expr)
|
||||
{
|
||||
if (source_manager().isInSystemHeader(expr->getSourceRange().getBegin()))
|
||||
return true;
|
||||
|
||||
LOG_TRACE("Entering CUDA kernel call expression at {}",
|
||||
expr->getBeginLoc().printToString(source_manager()));
|
||||
|
||||
context().enter_callexpr(expr);
|
||||
|
||||
RecursiveASTVisitor<translation_unit_visitor>::TraverseCallExpr(expr);
|
||||
|
||||
LOG_TRACE("Leaving CUDA kernel call expression at {}",
|
||||
expr->getBeginLoc().printToString(source_manager()));
|
||||
|
||||
context().leave_callexpr();
|
||||
|
||||
pop_message_to_diagram(expr);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool translation_unit_visitor::TraverseCXXMemberCallExpr(
|
||||
clang::CXXMemberCallExpr *expr)
|
||||
{
|
||||
@@ -1067,6 +1096,15 @@ bool translation_unit_visitor::VisitCallExpr(clang::CallExpr *expr)
|
||||
"Message for this call expression is taken from comment directive");
|
||||
}
|
||||
//
|
||||
// Call to a CUDA kernel function
|
||||
//
|
||||
else if (const auto *cuda_call_expr =
|
||||
clang::dyn_cast_or_null<clang::CUDAKernelCallExpr>(expr);
|
||||
cuda_call_expr != nullptr) {
|
||||
if (!process_cuda_kernel_call_expression(m, cuda_call_expr))
|
||||
return true;
|
||||
}
|
||||
//
|
||||
// Call to an overloaded operator
|
||||
//
|
||||
else if (const auto *operator_call_expr =
|
||||
@@ -1250,6 +1288,43 @@ bool translation_unit_visitor::VisitCXXConstructExpr(
|
||||
return true;
|
||||
}
|
||||
|
||||
bool translation_unit_visitor::process_cuda_kernel_call_expression(
|
||||
model::message &m, const clang::CUDAKernelCallExpr *expr)
|
||||
{
|
||||
const auto *callee_decl = expr->getCalleeDecl();
|
||||
|
||||
if (callee_decl == nullptr)
|
||||
return false;
|
||||
|
||||
const auto *callee_function = callee_decl->getAsFunction();
|
||||
|
||||
if (callee_function == nullptr)
|
||||
return false;
|
||||
|
||||
if (!should_include(callee_function))
|
||||
return false;
|
||||
|
||||
// Skip free functions declared in files outside of included paths
|
||||
if (config().combine_free_functions_into_file_participants() &&
|
||||
!diagram().should_include(common::model::source_file{m.file()}))
|
||||
return false;
|
||||
|
||||
auto callee_name = callee_function->getQualifiedNameAsString() + "()";
|
||||
|
||||
const auto maybe_id = get_unique_id(callee_function->getID());
|
||||
if (!maybe_id.has_value()) {
|
||||
// This is hopefully not an interesting call...
|
||||
m.set_to(callee_function->getID());
|
||||
}
|
||||
else {
|
||||
m.set_to(maybe_id.value());
|
||||
}
|
||||
|
||||
m.set_message_name(callee_name.substr(0, callee_name.size() - 2));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool translation_unit_visitor::process_operator_call_expression(
|
||||
model::message &m, const clang::CXXOperatorCallExpr *operator_call_expr)
|
||||
{
|
||||
@@ -1469,8 +1544,6 @@ bool translation_unit_visitor::process_function_call_expression(
|
||||
|
||||
auto callee_name = callee_function->getQualifiedNameAsString() + "()";
|
||||
|
||||
std::unique_ptr<model::function_template> f_ptr;
|
||||
|
||||
const auto maybe_id = get_unique_id(callee_function->getID());
|
||||
if (!maybe_id.has_value()) {
|
||||
// This is hopefully not an interesting call...
|
||||
@@ -1482,9 +1555,6 @@ bool translation_unit_visitor::process_function_call_expression(
|
||||
|
||||
m.set_message_name(callee_name.substr(0, callee_name.size() - 2));
|
||||
|
||||
if (f_ptr)
|
||||
diagram().add_participant(std::move(f_ptr));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -77,9 +77,11 @@ public:
|
||||
|
||||
bool VisitCallExpr(clang::CallExpr *expr);
|
||||
|
||||
bool TraverseVarDecl(clang::VarDecl *VD);
|
||||
|
||||
bool TraverseCallExpr(clang::CallExpr *expr);
|
||||
|
||||
bool TraverseVarDecl(clang::VarDecl *VD);
|
||||
bool TraverseCUDAKernelCallExpr(clang::CUDAKernelCallExpr *expr);
|
||||
|
||||
bool TraverseCXXMemberCallExpr(clang::CXXMemberCallExpr *expr);
|
||||
|
||||
@@ -395,6 +397,9 @@ private:
|
||||
bool process_operator_call_expression(model::message &m,
|
||||
const clang::CXXOperatorCallExpr *operator_call_expr);
|
||||
|
||||
bool process_cuda_kernel_call_expression(
|
||||
model::message &m, const clang::CUDAKernelCallExpr *cuda_call_expr);
|
||||
|
||||
/**
|
||||
* @brief Handle a class method call expresion
|
||||
*
|
||||
|
||||
Reference in New Issue
Block a user