summaryrefslogtreecommitdiff
path: root/lib/Sema/SemaCUDA.cpp
diff options
context:
space:
mode:
authorJustin Lebar <jlebar@google.com>2016-10-13 18:45:08 +0000
committerJustin Lebar <jlebar@google.com>2016-10-13 18:45:08 +0000
commitfff8e6c7b6f6f8ebc8c9d8e8282e2f12d1fa565a (patch)
tree881c2029e0f0447633b38362353700ac723ee8bd /lib/Sema/SemaCUDA.cpp
parentd1494637bbb364bbf5bdb753e668c0323e4ec67b (diff)
downloadclang-fff8e6c7b6f6f8ebc8c9d8e8282e2f12d1fa565a.tar.gz
[CUDA] Add Sema::CUDADiagBuilder and Sema::CUDADiagIf{Device,Host}Code().
Summary: Together these let you easily create diagnostics that - are never emitted for host code - are always emitted for __device__ and __global__ functions, and - are emitted for __host__ __device__ functions iff these functions are codegen'ed. At the moment there are only three diagnostics that need this treatment, but I have more to add, and it's not sustainable to write code for emitting every such diagnostic twice, and from a special wrapper in SemaCUDA.cpp. While we're at it, don't emit the function name in err_cuda_device_exceptions: It's not necessary to print it, and making this work in the new framework in the face of a null value for dyn_cast<FunctionDecl>(CurContext) isn't worth the effort. Reviewers: rnk Subscribers: cfe-commits, tra Differential Revision: https://reviews.llvm.org/D25139 git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@284143 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Sema/SemaCUDA.cpp')
-rw-r--r--lib/Sema/SemaCUDA.cpp155
1 files changed, 83 insertions, 72 deletions
diff --git a/lib/Sema/SemaCUDA.cpp b/lib/Sema/SemaCUDA.cpp
index 2a66124080..717fe4ad52 100644
--- a/lib/Sema/SemaCUDA.cpp
+++ b/lib/Sema/SemaCUDA.cpp
@@ -18,6 +18,7 @@
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
@@ -55,6 +56,10 @@ ExprResult Sema::ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
/// IdentifyCUDATarget - Determine the CUDA compilation target for this function
Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const FunctionDecl *D) {
+ // Code that lives outside a function is run on the host.
+ if (D == nullptr)
+ return CFT_Host;
+
if (D->hasAttr<CUDAInvalidTargetAttr>())
return CFT_InvalidTarget;
@@ -108,9 +113,8 @@ Sema::CUDAFunctionPreference
Sema::IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
assert(Callee && "Callee must be valid.");
+ CUDAFunctionTarget CallerTarget = IdentifyCUDATarget(Caller);
CUDAFunctionTarget CalleeTarget = IdentifyCUDATarget(Callee);
- CUDAFunctionTarget CallerTarget =
- (Caller != nullptr) ? IdentifyCUDATarget(Caller) : Sema::CFT_Host;
// If one of the targets is invalid, the check always fails, no matter what
// the other target is.
@@ -484,88 +488,95 @@ void Sema::maybeAddCUDAHostDeviceAttrs(Scope *S, FunctionDecl *NewD,
NewD->addAttr(CUDADeviceAttr::CreateImplicit(Context));
}
-bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
- assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
- assert(Callee && "Callee may not be null.");
- FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext);
- if (!Caller)
- return true;
-
- Sema::CUDAFunctionPreference Pref = IdentifyCUDAPreference(Caller, Callee);
- if (Pref == Sema::CFP_Never) {
- Diag(Loc, diag::err_ref_bad_target) << IdentifyCUDATarget(Callee) << Callee
- << IdentifyCUDATarget(Caller);
- Diag(Callee->getLocation(), diag::note_previous_decl) << Callee;
- return false;
+Sema::CUDADiagBuilder::CUDADiagBuilder(Kind K, SourceLocation Loc,
+ unsigned DiagID, FunctionDecl *Fn,
+ Sema &S) {
+ switch (K) {
+ case K_Nop:
+ break;
+ case K_Immediate:
+ ImmediateDiagBuilder.emplace(S.Diag(Loc, DiagID));
+ break;
+ case K_Deferred:
+ assert(Fn && "Must have a function to attach the deferred diag to.");
+ PartialDiagInfo.emplace(Loc, S.PDiag(DiagID), Fn);
+ break;
}
+}
- // Insert into LocsWithCUDADeferredDiags to avoid emitting duplicate deferred
- // diagnostics for the same location. Duplicate deferred diags are otherwise
- // tricky to avoid, because, unlike with regular errors, sema checking
- // proceeds unhindered when we omit a deferred diagnostic.
- if (Pref == Sema::CFP_WrongSide &&
- LocsWithCUDACallDeferredDiags.insert(Loc.getRawEncoding()).second) {
- // We have to do this odd dance to create our PartialDiagnostic because we
- // want its storage to be allocated with operator new, not in an arena.
- PartialDiagnostic ErrPD{PartialDiagnostic::NullDiagnostic()};
- ErrPD.Reset(diag::err_ref_bad_target);
- ErrPD << IdentifyCUDATarget(Callee) << Callee << IdentifyCUDATarget(Caller);
- Caller->addDeferredDiag({Loc, std::move(ErrPD)});
-
- PartialDiagnostic NotePD{PartialDiagnostic::NullDiagnostic()};
- NotePD.Reset(diag::note_previous_decl);
- NotePD << Callee;
- Caller->addDeferredDiag({Callee->getLocation(), std::move(NotePD)});
-
- // This is not immediately an error, so return true. The deferred errors
- // will be emitted if and when Caller is codegen'ed.
- return true;
+Sema::CUDADiagBuilder Sema::CUDADiagIfDeviceCode(SourceLocation Loc,
+ unsigned DiagID) {
+ assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
+ CUDADiagBuilder::Kind DiagKind;
+ switch (CurrentCUDATarget()) {
+ case CFT_Global:
+ case CFT_Device:
+ DiagKind = CUDADiagBuilder::K_Immediate;
+ break;
+ case CFT_HostDevice:
+ DiagKind = getLangOpts().CUDAIsDevice ? CUDADiagBuilder::K_Deferred
+ : CUDADiagBuilder::K_Nop;
+ break;
+ default:
+ DiagKind = CUDADiagBuilder::K_Nop;
}
- return true;
+ return CUDADiagBuilder(DiagKind, Loc, DiagID,
+ dyn_cast<FunctionDecl>(CurContext), *this);
}
-bool Sema::CheckCUDAExceptionExpr(SourceLocation Loc, StringRef ExprTy) {
+Sema::CUDADiagBuilder Sema::CUDADiagIfHostCode(SourceLocation Loc,
+ unsigned DiagID) {
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
- FunctionDecl *CurFn = dyn_cast<FunctionDecl>(CurContext);
- if (!CurFn)
- return true;
- CUDAFunctionTarget Target = IdentifyCUDATarget(CurFn);
-
- // Raise an error immediately if this is a __global__ or __device__ function.
- // If it's a __host__ __device__ function, enqueue a deferred error which will
- // be emitted if the function is codegen'ed for device.
- if (Target == CFT_Global || Target == CFT_Device) {
- Diag(Loc, diag::err_cuda_device_exceptions) << ExprTy << Target << CurFn;
- return false;
- }
- if (Target == CFT_HostDevice && getLangOpts().CUDAIsDevice) {
- PartialDiagnostic ErrPD{PartialDiagnostic::NullDiagnostic()};
- ErrPD.Reset(diag::err_cuda_device_exceptions);
- ErrPD << ExprTy << Target << CurFn;
- CurFn->addDeferredDiag({Loc, std::move(ErrPD)});
- return false;
+ CUDADiagBuilder::Kind DiagKind;
+ switch (CurrentCUDATarget()) {
+ case CFT_Host:
+ DiagKind = CUDADiagBuilder::K_Immediate;
+ break;
+ case CFT_HostDevice:
+ DiagKind = getLangOpts().CUDAIsDevice ? CUDADiagBuilder::K_Nop
+ : CUDADiagBuilder::K_Deferred;
+ break;
+ default:
+ DiagKind = CUDADiagBuilder::K_Nop;
}
- return true;
+ return CUDADiagBuilder(DiagKind, Loc, DiagID,
+ dyn_cast<FunctionDecl>(CurContext), *this);
}
-bool Sema::CheckCUDAVLA(SourceLocation Loc) {
+bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
- FunctionDecl *CurFn = dyn_cast<FunctionDecl>(CurContext);
- if (!CurFn)
+ assert(Callee && "Callee may not be null.");
+ FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext);
+ if (!Caller)
return true;
- CUDAFunctionTarget Target = IdentifyCUDATarget(CurFn);
- if (Target == CFT_Global || Target == CFT_Device) {
- Diag(Loc, diag::err_cuda_vla) << Target;
- return false;
- }
- if (Target == CFT_HostDevice && getLangOpts().CUDAIsDevice) {
- PartialDiagnostic ErrPD{PartialDiagnostic::NullDiagnostic()};
- ErrPD.Reset(diag::err_cuda_vla);
- ErrPD << Target;
- CurFn->addDeferredDiag({Loc, std::move(ErrPD)});
- return false;
+
+ CUDADiagBuilder::Kind DiagKind;
+ switch (IdentifyCUDAPreference(Caller, Callee)) {
+ case CFP_Never:
+ DiagKind = CUDADiagBuilder::K_Immediate;
+ break;
+ case CFP_WrongSide:
+ assert(Caller && "WrongSide calls require a non-null caller");
+ DiagKind = CUDADiagBuilder::K_Deferred;
+ break;
+ default:
+ DiagKind = CUDADiagBuilder::K_Nop;
}
- return true;
+
+ // Avoid emitting this error twice for the same location. Using a hashtable
+ // like this is unfortunate, but because we must continue parsing as normal
+ // after encountering a deferred error, it's otherwise very tricky for us to
+ // ensure that we only emit this deferred error once.
+ if (!LocsWithCUDACallDiags.insert(Loc.getRawEncoding()).second)
+ return true;
+
+ bool IsImmediateErr =
+ CUDADiagBuilder(DiagKind, Loc, diag::err_ref_bad_target, Caller, *this)
+ << IdentifyCUDATarget(Callee) << Callee << IdentifyCUDATarget(Caller);
+ CUDADiagBuilder(DiagKind, Callee->getLocation(), diag::note_previous_decl,
+ Caller, *this)
+ << Callee;
+ return !IsImmediateErr;
}
void Sema::CUDASetLambdaAttrs(CXXMethodDecl *Method) {