| //===--- SemaChecking.cpp - Extra Semantic Checking -----------------------===// |
| // |
| // The LLVM Compiler Infrastructure |
| // |
| // This file is distributed under the University of Illinois Open Source |
| // License. See LICENSE.TXT for details. |
| // |
| //===----------------------------------------------------------------------===// |
| // |
| // This file implements extra semantic analysis beyond what is enforced |
| // by the C type system. |
| // |
| //===----------------------------------------------------------------------===// |
| |
| #include "clang/Sema/SemaInternal.h" |
| #include "clang/AST/ASTContext.h" |
| #include "clang/AST/CharUnits.h" |
| #include "clang/AST/DeclCXX.h" |
| #include "clang/AST/DeclObjC.h" |
| #include "clang/AST/EvaluatedExprVisitor.h" |
| #include "clang/AST/Expr.h" |
| #include "clang/AST/ExprCXX.h" |
| #include "clang/AST/ExprObjC.h" |
| #include "clang/AST/StmtCXX.h" |
| #include "clang/AST/StmtObjC.h" |
| #include "clang/Analysis/Analyses/FormatString.h" |
| #include "clang/Basic/CharInfo.h" |
| #include "clang/Basic/TargetBuiltins.h" |
| #include "clang/Basic/TargetInfo.h" |
| #include "clang/Lex/Preprocessor.h" |
| #include "clang/Sema/Initialization.h" |
| #include "clang/Sema/Lookup.h" |
| #include "clang/Sema/ScopeInfo.h" |
| #include "clang/Sema/Sema.h" |
| #include "llvm/ADT/STLExtras.h" |
| #include "llvm/ADT/SmallBitVector.h" |
| #include "llvm/ADT/SmallString.h" |
| #include "llvm/Support/ConvertUTF.h" |
| #include "llvm/Support/raw_ostream.h" |
| #include <limits> |
| using namespace clang; |
| using namespace sema; |
| |
| SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, |
| unsigned ByteNo) const { |
| return SL->getLocationOfByte(ByteNo, PP.getSourceManager(), |
| PP.getLangOpts(), PP.getTargetInfo()); |
| } |
| |
| /// Checks that a call expression's argument count is the desired number. |
| /// This is useful when doing custom type-checking. Returns true on error. |
| static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) { |
| unsigned argCount = call->getNumArgs(); |
| if (argCount == desiredArgCount) return false; |
| |
| if (argCount < desiredArgCount) |
| return S.Diag(call->getLocEnd(), diag::err_typecheck_call_too_few_args) |
| << 0 /*function call*/ << desiredArgCount << argCount |
| << call->getSourceRange(); |
| |
| // Highlight all the excess arguments. |
| SourceRange range(call->getArg(desiredArgCount)->getLocStart(), |
| call->getArg(argCount - 1)->getLocEnd()); |
| |
| return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args) |
| << 0 /*function call*/ << desiredArgCount << argCount |
| << call->getArg(1)->getSourceRange(); |
| } |
| |
| /// Check that the first argument to __builtin_annotation is an integer |
| /// and the second argument is a non-wide string literal. |
| static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { |
| if (checkArgCount(S, TheCall, 2)) |
| return true; |
| |
| // First argument should be an integer. |
| Expr *ValArg = TheCall->getArg(0); |
| QualType Ty = ValArg->getType(); |
| if (!Ty->isIntegerType()) { |
| S.Diag(ValArg->getLocStart(), diag::err_builtin_annotation_first_arg) |
| << ValArg->getSourceRange(); |
| return true; |
| } |
| |
| // Second argument should be a constant string. |
| Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); |
| StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); |
| if (!Literal || !Literal->isAscii()) { |
| S.Diag(StrArg->getLocStart(), diag::err_builtin_annotation_second_arg) |
| << StrArg->getSourceRange(); |
| return true; |
| } |
| |
| TheCall->setType(Ty); |
| return false; |
| } |
| |
| /// Check that the argument to __builtin_addressof is a glvalue, and set the |
| /// result type to the corresponding pointer type. |
| static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { |
| if (checkArgCount(S, TheCall, 1)) |
| return true; |
| |
| ExprResult Arg(S.Owned(TheCall->getArg(0))); |
| QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getLocStart()); |
| if (ResultType.isNull()) |
| return true; |
| |
| TheCall->setArg(0, Arg.take()); |
| TheCall->setType(ResultType); |
| return false; |
| } |
| |
| ExprResult |
| Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { |
| ExprResult TheCallResult(Owned(TheCall)); |
| |
| // Find out if any arguments are required to be integer constant expressions. |
| unsigned ICEArguments = 0; |
| ASTContext::GetBuiltinTypeError Error; |
| Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); |
| if (Error != ASTContext::GE_None) |
| ICEArguments = 0; // Don't diagnose previously diagnosed errors. |
| |
| // If any arguments are required to be ICE's, check and diagnose. |
| for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { |
| // Skip arguments not required to be ICE's. |
| if ((ICEArguments & (1 << ArgNo)) == 0) continue; |
| |
| llvm::APSInt Result; |
| if (SemaBuiltinConstantArg(TheCall, ArgNo, Result)) |
| return true; |
| ICEArguments &= ~(1 << ArgNo); |
| } |
| |
| switch (BuiltinID) { |
| case Builtin::BI__builtin___CFStringMakeConstantString: |
| assert(TheCall->getNumArgs() == 1 && |
| "Wrong # arguments to builtin CFStringMakeConstantString"); |
| if (CheckObjCString(TheCall->getArg(0))) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_stdarg_start: |
| case Builtin::BI__builtin_va_start: |
| case Builtin::BI__va_start: |
| if (SemaBuiltinVAStart(TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_isgreater: |
| case Builtin::BI__builtin_isgreaterequal: |
| case Builtin::BI__builtin_isless: |
| case Builtin::BI__builtin_islessequal: |
| case Builtin::BI__builtin_islessgreater: |
| case Builtin::BI__builtin_isunordered: |
| if (SemaBuiltinUnorderedCompare(TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_fpclassify: |
| if (SemaBuiltinFPClassification(TheCall, 6)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_isfinite: |
| case Builtin::BI__builtin_isinf: |
| case Builtin::BI__builtin_isinf_sign: |
| case Builtin::BI__builtin_isnan: |
| case Builtin::BI__builtin_isnormal: |
| if (SemaBuiltinFPClassification(TheCall, 1)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_shufflevector: |
| return SemaBuiltinShuffleVector(TheCall); |
| // TheCall will be freed by the smart pointer here, but that's fine, since |
| // SemaBuiltinShuffleVector guts it, but then doesn't release it. |
| case Builtin::BI__builtin_prefetch: |
| if (SemaBuiltinPrefetch(TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_object_size: |
| if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_longjmp: |
| if (SemaBuiltinLongjmp(TheCall)) |
| return ExprError(); |
| break; |
| |
| case Builtin::BI__builtin_classify_type: |
| if (checkArgCount(*this, TheCall, 1)) return true; |
| TheCall->setType(Context.IntTy); |
| break; |
| case Builtin::BI__builtin_constant_p: |
| if (checkArgCount(*this, TheCall, 1)) return true; |
| TheCall->setType(Context.IntTy); |
| break; |
| case Builtin::BI__sync_fetch_and_add: |
| case Builtin::BI__sync_fetch_and_add_1: |
| case Builtin::BI__sync_fetch_and_add_2: |
| case Builtin::BI__sync_fetch_and_add_4: |
| case Builtin::BI__sync_fetch_and_add_8: |
| case Builtin::BI__sync_fetch_and_add_16: |
| case Builtin::BI__sync_fetch_and_sub: |
| case Builtin::BI__sync_fetch_and_sub_1: |
| case Builtin::BI__sync_fetch_and_sub_2: |
| case Builtin::BI__sync_fetch_and_sub_4: |
| case Builtin::BI__sync_fetch_and_sub_8: |
| case Builtin::BI__sync_fetch_and_sub_16: |
| case Builtin::BI__sync_fetch_and_or: |
| case Builtin::BI__sync_fetch_and_or_1: |
| case Builtin::BI__sync_fetch_and_or_2: |
| case Builtin::BI__sync_fetch_and_or_4: |
| case Builtin::BI__sync_fetch_and_or_8: |
| case Builtin::BI__sync_fetch_and_or_16: |
| case Builtin::BI__sync_fetch_and_and: |
| case Builtin::BI__sync_fetch_and_and_1: |
| case Builtin::BI__sync_fetch_and_and_2: |
| case Builtin::BI__sync_fetch_and_and_4: |
| case Builtin::BI__sync_fetch_and_and_8: |
| case Builtin::BI__sync_fetch_and_and_16: |
| case Builtin::BI__sync_fetch_and_xor: |
| case Builtin::BI__sync_fetch_and_xor_1: |
| case Builtin::BI__sync_fetch_and_xor_2: |
| case Builtin::BI__sync_fetch_and_xor_4: |
| case Builtin::BI__sync_fetch_and_xor_8: |
| case Builtin::BI__sync_fetch_and_xor_16: |
| case Builtin::BI__sync_add_and_fetch: |
| case Builtin::BI__sync_add_and_fetch_1: |
| case Builtin::BI__sync_add_and_fetch_2: |
| case Builtin::BI__sync_add_and_fetch_4: |
| case Builtin::BI__sync_add_and_fetch_8: |
| case Builtin::BI__sync_add_and_fetch_16: |
| case Builtin::BI__sync_sub_and_fetch: |
| case Builtin::BI__sync_sub_and_fetch_1: |
| case Builtin::BI__sync_sub_and_fetch_2: |
| case Builtin::BI__sync_sub_and_fetch_4: |
| case Builtin::BI__sync_sub_and_fetch_8: |
| case Builtin::BI__sync_sub_and_fetch_16: |
| case Builtin::BI__sync_and_and_fetch: |
| case Builtin::BI__sync_and_and_fetch_1: |
| case Builtin::BI__sync_and_and_fetch_2: |
| case Builtin::BI__sync_and_and_fetch_4: |
| case Builtin::BI__sync_and_and_fetch_8: |
| case Builtin::BI__sync_and_and_fetch_16: |
| case Builtin::BI__sync_or_and_fetch: |
| case Builtin::BI__sync_or_and_fetch_1: |
| case Builtin::BI__sync_or_and_fetch_2: |
| case Builtin::BI__sync_or_and_fetch_4: |
| case Builtin::BI__sync_or_and_fetch_8: |
| case Builtin::BI__sync_or_and_fetch_16: |
| case Builtin::BI__sync_xor_and_fetch: |
| case Builtin::BI__sync_xor_and_fetch_1: |
| case Builtin::BI__sync_xor_and_fetch_2: |
| case Builtin::BI__sync_xor_and_fetch_4: |
| case Builtin::BI__sync_xor_and_fetch_8: |
| case Builtin::BI__sync_xor_and_fetch_16: |
| case Builtin::BI__sync_val_compare_and_swap: |
| case Builtin::BI__sync_val_compare_and_swap_1: |
| case Builtin::BI__sync_val_compare_and_swap_2: |
| case Builtin::BI__sync_val_compare_and_swap_4: |
| case Builtin::BI__sync_val_compare_and_swap_8: |
| case Builtin::BI__sync_val_compare_and_swap_16: |
| case Builtin::BI__sync_bool_compare_and_swap: |
| case Builtin::BI__sync_bool_compare_and_swap_1: |
| case Builtin::BI__sync_bool_compare_and_swap_2: |
| case Builtin::BI__sync_bool_compare_and_swap_4: |
| case Builtin::BI__sync_bool_compare_and_swap_8: |
| case Builtin::BI__sync_bool_compare_and_swap_16: |
| case Builtin::BI__sync_lock_test_and_set: |
| case Builtin::BI__sync_lock_test_and_set_1: |
| case Builtin::BI__sync_lock_test_and_set_2: |
| case Builtin::BI__sync_lock_test_and_set_4: |
| case Builtin::BI__sync_lock_test_and_set_8: |
| case Builtin::BI__sync_lock_test_and_set_16: |
| case Builtin::BI__sync_lock_release: |
| case Builtin::BI__sync_lock_release_1: |
| case Builtin::BI__sync_lock_release_2: |
| case Builtin::BI__sync_lock_release_4: |
| case Builtin::BI__sync_lock_release_8: |
| case Builtin::BI__sync_lock_release_16: |
| case Builtin::BI__sync_swap: |
| case Builtin::BI__sync_swap_1: |
| case Builtin::BI__sync_swap_2: |
| case Builtin::BI__sync_swap_4: |
| case Builtin::BI__sync_swap_8: |
| case Builtin::BI__sync_swap_16: |
| return SemaBuiltinAtomicOverloaded(TheCallResult); |
| #define BUILTIN(ID, TYPE, ATTRS) |
| #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ |
| case Builtin::BI##ID: \ |
| return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); |
| #include "clang/Basic/Builtins.def" |
| case Builtin::BI__builtin_annotation: |
| if (SemaBuiltinAnnotation(*this, TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_addressof: |
| if (SemaBuiltinAddressof(*this, TheCall)) |
| return ExprError(); |
| break; |
| } |
| |
| // Since the target specific builtins for each arch overlap, only check those |
| // of the arch we are compiling for. |
| if (BuiltinID >= Builtin::FirstTSBuiltin) { |
| switch (Context.getTargetInfo().getTriple().getArch()) { |
| case llvm::Triple::arm: |
| case llvm::Triple::armeb: |
| case llvm::Triple::thumb: |
| case llvm::Triple::thumbeb: |
| if (CheckARMBuiltinFunctionCall(BuiltinID, TheCall)) |
| return ExprError(); |
| break; |
| case llvm::Triple::arm64: |
| if (CheckARM64BuiltinFunctionCall(BuiltinID, TheCall)) |
| return ExprError(); |
| break; |
| case llvm::Triple::aarch64: |
| case llvm::Triple::aarch64_be: |
| if (CheckAArch64BuiltinFunctionCall(BuiltinID, TheCall)) |
| return ExprError(); |
| break; |
| case llvm::Triple::mips: |
| case llvm::Triple::mipsel: |
| case llvm::Triple::mips64: |
| case llvm::Triple::mips64el: |
| if (CheckMipsBuiltinFunctionCall(BuiltinID, TheCall)) |
| return ExprError(); |
| break; |
| case llvm::Triple::x86: |
| case llvm::Triple::x86_64: |
| if (CheckX86BuiltinFunctionCall(BuiltinID, TheCall)) |
| return ExprError(); |
| break; |
| default: |
| break; |
| } |
| } |
| |
| return TheCallResult; |
| } |
| |
| // Get the valid immediate range for the specified NEON type code. |
| static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { |
| NeonTypeFlags Type(t); |
| int IsQuad = ForceQuad ? true : Type.isQuad(); |
| switch (Type.getEltType()) { |
| case NeonTypeFlags::Int8: |
| case NeonTypeFlags::Poly8: |
| return shift ? 7 : (8 << IsQuad) - 1; |
| case NeonTypeFlags::Int16: |
| case NeonTypeFlags::Poly16: |
| return shift ? 15 : (4 << IsQuad) - 1; |
| case NeonTypeFlags::Int32: |
| return shift ? 31 : (2 << IsQuad) - 1; |
| case NeonTypeFlags::Int64: |
| case NeonTypeFlags::Poly64: |
| return shift ? 63 : (1 << IsQuad) - 1; |
| case NeonTypeFlags::Poly128: |
| return shift ? 127 : (1 << IsQuad) - 1; |
| case NeonTypeFlags::Float16: |
| assert(!shift && "cannot shift float types!"); |
| return (4 << IsQuad) - 1; |
| case NeonTypeFlags::Float32: |
| assert(!shift && "cannot shift float types!"); |
| return (2 << IsQuad) - 1; |
| case NeonTypeFlags::Float64: |
| assert(!shift && "cannot shift float types!"); |
| return (1 << IsQuad) - 1; |
| } |
| llvm_unreachable("Invalid NeonTypeFlag!"); |
| } |
| |
| /// getNeonEltType - Return the QualType corresponding to the elements of |
| /// the vector type specified by the NeonTypeFlags. This is used to check |
| /// the pointer arguments for Neon load/store intrinsics. |
| static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, |
| bool IsPolyUnsigned, bool IsInt64Long) { |
| switch (Flags.getEltType()) { |
| case NeonTypeFlags::Int8: |
| return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; |
| case NeonTypeFlags::Int16: |
| return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; |
| case NeonTypeFlags::Int32: |
| return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; |
| case NeonTypeFlags::Int64: |
| if (IsInt64Long) |
| return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; |
| else |
| return Flags.isUnsigned() ? Context.UnsignedLongLongTy |
| : Context.LongLongTy; |
| case NeonTypeFlags::Poly8: |
| return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; |
| case NeonTypeFlags::Poly16: |
| return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; |
| case NeonTypeFlags::Poly64: |
| return Context.UnsignedLongTy; |
| case NeonTypeFlags::Poly128: |
| break; |
| case NeonTypeFlags::Float16: |
| return Context.HalfTy; |
| case NeonTypeFlags::Float32: |
| return Context.FloatTy; |
| case NeonTypeFlags::Float64: |
| return Context.DoubleTy; |
| } |
| llvm_unreachable("Invalid NeonTypeFlag!"); |
| } |
| |
| bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { |
| llvm::APSInt Result; |
| uint64_t mask = 0; |
| unsigned TV = 0; |
| int PtrArgNum = -1; |
| bool HasConstPtr = false; |
| switch (BuiltinID) { |
| #define GET_NEON_OVERLOAD_CHECK |
| #include "clang/Basic/arm_neon.inc" |
| #undef GET_NEON_OVERLOAD_CHECK |
| } |
| |
| // For NEON intrinsics which are overloaded on vector element type, validate |
| // the immediate which specifies which variant to emit. |
| unsigned ImmArg = TheCall->getNumArgs()-1; |
| if (mask) { |
| if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) |
| return true; |
| |
| TV = Result.getLimitedValue(64); |
| if ((TV > 63) || (mask & (1ULL << TV)) == 0) |
| return Diag(TheCall->getLocStart(), diag::err_invalid_neon_type_code) |
| << TheCall->getArg(ImmArg)->getSourceRange(); |
| } |
| |
| if (PtrArgNum >= 0) { |
| // Check that pointer arguments have the specified type. |
| Expr *Arg = TheCall->getArg(PtrArgNum); |
| if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) |
| Arg = ICE->getSubExpr(); |
| ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); |
| QualType RHSTy = RHS.get()->getType(); |
| |
| llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch(); |
| bool IsPolyUnsigned = |
| Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::arm64; |
| bool IsInt64Long = |
| Context.getTargetInfo().getInt64Type() == TargetInfo::SignedLong; |
| QualType EltTy = |
| getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); |
| if (HasConstPtr) |
| EltTy = EltTy.withConst(); |
| QualType LHSTy = Context.getPointerType(EltTy); |
| AssignConvertType ConvTy; |
| ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); |
| if (RHS.isInvalid()) |
| return true; |
| if (DiagnoseAssignmentResult(ConvTy, Arg->getLocStart(), LHSTy, RHSTy, |
| RHS.get(), AA_Assigning)) |
| return true; |
| } |
| |
| // For NEON intrinsics which take an immediate value as part of the |
| // instruction, range check them here. |
| unsigned i = 0, l = 0, u = 0; |
| switch (BuiltinID) { |
| default: |
| return false; |
| #define GET_NEON_IMMEDIATE_CHECK |
| #include "clang/Basic/arm_neon.inc" |
| #undef GET_NEON_IMMEDIATE_CHECK |
| } |
| |
| return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); |
| } |
| |
| bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, |
| CallExpr *TheCall) { |
| if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall)) |
| return true; |
| |
| return false; |
| } |
| |
| bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, |
| unsigned MaxWidth) { |
| assert((BuiltinID == ARM::BI__builtin_arm_ldrex || |
| BuiltinID == ARM::BI__builtin_arm_strex || |
| BuiltinID == ARM64::BI__builtin_arm_ldrex || |
| BuiltinID == ARM64::BI__builtin_arm_strex) && |
| "unexpected ARM builtin"); |
| bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || |
| BuiltinID == ARM64::BI__builtin_arm_ldrex; |
| |
| DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); |
| |
| // Ensure that we have the proper number of arguments. |
| if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) |
| return true; |
| |
| // Inspect the pointer argument of the atomic builtin. This should always be |
| // a pointer type, whose element is an integral scalar or pointer type. |
| // Because it is a pointer type, we don't have to worry about any implicit |
| // casts here. |
| Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); |
| ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); |
| if (PointerArgRes.isInvalid()) |
| return true; |
| PointerArg = PointerArgRes.take(); |
| |
| const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); |
| if (!pointerType) { |
| Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer) |
| << PointerArg->getType() << PointerArg->getSourceRange(); |
| return true; |
| } |
| |
| // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next |
| // task is to insert the appropriate casts into the AST. First work out just |
| // what the appropriate type is. |
| QualType ValType = pointerType->getPointeeType(); |
| QualType AddrType = ValType.getUnqualifiedType().withVolatile(); |
| if (IsLdrex) |
| AddrType.addConst(); |
| |
| // Issue a warning if the cast is dodgy. |
| CastKind CastNeeded = CK_NoOp; |
| if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { |
| CastNeeded = CK_BitCast; |
| Diag(DRE->getLocStart(), diag::ext_typecheck_convert_discards_qualifiers) |
| << PointerArg->getType() |
| << Context.getPointerType(AddrType) |
| << AA_Passing << PointerArg->getSourceRange(); |
| } |
| |
| // Finally, do the cast and replace the argument with the corrected version. |
| AddrType = Context.getPointerType(AddrType); |
| PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); |
| if (PointerArgRes.isInvalid()) |
| return true; |
| PointerArg = PointerArgRes.take(); |
| |
| TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); |
| |
| // In general, we allow ints, floats and pointers to be loaded and stored. |
| if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && |
| !ValType->isBlockPointerType() && !ValType->isFloatingType()) { |
| Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer_intfltptr) |
| << PointerArg->getType() << PointerArg->getSourceRange(); |
| return true; |
| } |
| |
| // But ARM doesn't have instructions to deal with 128-bit versions. |
| if (Context.getTypeSize(ValType) > MaxWidth) { |
| assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); |
| Diag(DRE->getLocStart(), diag::err_atomic_exclusive_builtin_pointer_size) |
| << PointerArg->getType() << PointerArg->getSourceRange(); |
| return true; |
| } |
| |
| switch (ValType.getObjCLifetime()) { |
| case Qualifiers::OCL_None: |
| case Qualifiers::OCL_ExplicitNone: |
| // okay |
| break; |
| |
| case Qualifiers::OCL_Weak: |
| case Qualifiers::OCL_Strong: |
| case Qualifiers::OCL_Autoreleasing: |
| Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership) |
| << ValType << PointerArg->getSourceRange(); |
| return true; |
| } |
| |
| |
| if (IsLdrex) { |
| TheCall->setType(ValType); |
| return false; |
| } |
| |
| // Initialize the argument to be stored. |
| ExprResult ValArg = TheCall->getArg(0); |
| InitializedEntity Entity = InitializedEntity::InitializeParameter( |
| Context, ValType, /*consume*/ false); |
| ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); |
| if (ValArg.isInvalid()) |
| return true; |
| TheCall->setArg(0, ValArg.get()); |
| |
| // __builtin_arm_strex always returns an int. It's marked as such in the .def, |
| // but the custom checker bypasses all default analysis. |
| TheCall->setType(Context.IntTy); |
| return false; |
| } |
| |
| bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { |
| llvm::APSInt Result; |
| |
| if (BuiltinID == ARM::BI__builtin_arm_ldrex || |
| BuiltinID == ARM::BI__builtin_arm_strex) { |
| return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); |
| } |
| |
| if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall)) |
| return true; |
| |
| // For NEON intrinsics which take an immediate value as part of the |
| // instruction, range check them here. |
| unsigned i = 0, l = 0, u = 0; |
| switch (BuiltinID) { |
| default: return false; |
| case ARM::BI__builtin_arm_ssat: i = 1; l = 1; u = 31; break; |
| case ARM::BI__builtin_arm_usat: i = 1; u = 31; break; |
| case ARM::BI__builtin_arm_vcvtr_f: |
| case ARM::BI__builtin_arm_vcvtr_d: i = 1; u = 1; break; |
| case ARM::BI__builtin_arm_dmb: |
| case ARM::BI__builtin_arm_dsb: l = 0; u = 15; break; |
| } |
| |
| // FIXME: VFP Intrinsics should error if VFP not present. |
| return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); |
| } |
| |
| bool Sema::CheckARM64BuiltinFunctionCall(unsigned BuiltinID, |
| CallExpr *TheCall) { |
| llvm::APSInt Result; |
| |
| if (BuiltinID == ARM64::BI__builtin_arm_ldrex || |
| BuiltinID == ARM64::BI__builtin_arm_strex) { |
| return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); |
| } |
| |
| if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall)) |
| return true; |
| |
| return false; |
| } |
| |
| bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { |
| unsigned i = 0, l = 0, u = 0; |
| switch (BuiltinID) { |
| default: return false; |
| case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; |
| case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; |
| case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; |
| case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; |
| case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; |
| case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; |
| case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; |
| } |
| |
| return SemaBuiltinConstantArgRange(TheCall, i, l, u); |
| } |
| |
| bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { |
| switch (BuiltinID) { |
| case X86::BI_mm_prefetch: |
| // This is declared to take (const char*, int) |
| return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3); |
| } |
| return false; |
| } |
| |
| /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo |
| /// parameter with the FormatAttr's correct format_idx and firstDataArg. |
| /// Returns true when the format fits the function and the FormatStringInfo has |
| /// been populated. |
| bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, |
| FormatStringInfo *FSI) { |
| FSI->HasVAListArg = Format->getFirstArg() == 0; |
| FSI->FormatIdx = Format->getFormatIdx() - 1; |
| FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1; |
| |
| // The way the format attribute works in GCC, the implicit this argument |
| // of member functions is counted. However, it doesn't appear in our own |
| // lists, so decrement format_idx in that case. |
| if (IsCXXMember) { |
| if(FSI->FormatIdx == 0) |
| return false; |
| --FSI->FormatIdx; |
| if (FSI->FirstDataArg != 0) |
| --FSI->FirstDataArg; |
| } |
| return true; |
| } |
| |
| /// Checks if a the given expression evaluates to null. |
| /// |
| /// \brief Returns true if the value evaluates to null. |
| static bool CheckNonNullExpr(Sema &S, |
| const Expr *Expr) { |
| // As a special case, transparent unions initialized with zero are |
| // considered null for the purposes of the nonnull attribute. |
| if (const RecordType *UT = Expr->getType()->getAsUnionType()) { |
| if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) |
| if (const CompoundLiteralExpr *CLE = |
| dyn_cast<CompoundLiteralExpr>(Expr)) |
| if (const InitListExpr *ILE = |
| dyn_cast<InitListExpr>(CLE->getInitializer())) |
| Expr = ILE->getInit(0); |
| } |
| |
| bool Result; |
| return (!Expr->isValueDependent() && |
| Expr->EvaluateAsBooleanCondition(Result, S.Context) && |
| !Result); |
| } |
| |
| static void CheckNonNullArgument(Sema &S, |
| const Expr *ArgExpr, |
| SourceLocation CallSiteLoc) { |
| if (CheckNonNullExpr(S, ArgExpr)) |
| S.Diag(CallSiteLoc, diag::warn_null_arg) << ArgExpr->getSourceRange(); |
| } |
| |
| static void CheckNonNullArguments(Sema &S, |
| const NamedDecl *FDecl, |
| const Expr * const *ExprArgs, |
| SourceLocation CallSiteLoc) { |
| // Check the attributes attached to the method/function itself. |
| for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { |
| for (NonNullAttr::args_iterator i = NonNull->args_begin(), |
| e = NonNull->args_end(); |
| i != e; ++i) { |
| CheckNonNullArgument(S, ExprArgs[*i], CallSiteLoc); |
| } |
| } |
| |
| // Check the attributes on the parameters. |
| ArrayRef<ParmVarDecl*> parms; |
| if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) |
| parms = FD->parameters(); |
| else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(FDecl)) |
| parms = MD->parameters(); |
| |
| unsigned argIndex = 0; |
| for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); |
| I != E; ++I, ++argIndex) { |
| const ParmVarDecl *PVD = *I; |
| if (PVD->hasAttr<NonNullAttr>()) |
| CheckNonNullArgument(S, ExprArgs[argIndex], CallSiteLoc); |
| } |
| } |
| |
| /// Handles the checks for format strings, non-POD arguments to vararg |
| /// functions, and NULL arguments passed to non-NULL parameters. |
| void Sema::checkCall(NamedDecl *FDecl, ArrayRef<const Expr *> Args, |
| unsigned NumParams, bool IsMemberFunction, |
| SourceLocation Loc, SourceRange Range, |
| VariadicCallType CallType) { |
| // FIXME: We should check as much as we can in the template definition. |
| if (CurContext->isDependentContext()) |
| return; |
| |
| // Printf and scanf checking. |
| llvm::SmallBitVector CheckedVarArgs; |
| if (FDecl) { |
| for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { |
| // Only create vector if there are format attributes. |
| CheckedVarArgs.resize(Args.size()); |
| |
| CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, |
| CheckedVarArgs); |
| } |
| } |
| |
| // Refuse POD arguments that weren't caught by the format string |
| // checks above. |
| if (CallType != VariadicDoesNotApply) { |
| for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { |
| // Args[ArgIdx] can be null in malformed code. |
| if (const Expr *Arg = Args[ArgIdx]) { |
| if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) |
| checkVariadicArgument(Arg, CallType); |
| } |
| } |
| } |
| |
| if (FDecl) { |
| CheckNonNullArguments(*this, FDecl, Args.data(), Loc); |
| |
| // Type safety checking. |
| for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) |
| CheckArgumentWithTypeTag(I, Args.data()); |
| } |
| } |
| |
| /// CheckConstructorCall - Check a constructor call for correctness and safety |
| /// properties not enforced by the C type system. |
| void Sema::CheckConstructorCall(FunctionDecl *FDecl, |
| ArrayRef<const Expr *> Args, |
| const FunctionProtoType *Proto, |
| SourceLocation Loc) { |
| VariadicCallType CallType = |
| Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; |
| checkCall(FDecl, Args, Proto->getNumParams(), |
| /*IsMemberFunction=*/true, Loc, SourceRange(), CallType); |
| } |
| |
| /// CheckFunctionCall - Check a direct function call for various correctness |
| /// and safety properties not strictly enforced by the C type system. |
| bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, |
| const FunctionProtoType *Proto) { |
| bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && |
| isa<CXXMethodDecl>(FDecl); |
| bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || |
| IsMemberOperatorCall; |
| VariadicCallType CallType = getVariadicCallType(FDecl, Proto, |
| TheCall->getCallee()); |
| unsigned NumParams = Proto ? Proto->getNumParams() : 0; |
| Expr** Args = TheCall->getArgs(); |
| unsigned NumArgs = TheCall->getNumArgs(); |
| if (IsMemberOperatorCall) { |
| // If this is a call to a member operator, hide the first argument |
| // from checkCall. |
| // FIXME: Our choice of AST representation here is less than ideal. |
| ++Args; |
| --NumArgs; |
| } |
| checkCall(FDecl, llvm::makeArrayRef<const Expr *>(Args, NumArgs), NumParams, |
| IsMemberFunction, TheCall->getRParenLoc(), |
| TheCall->getCallee()->getSourceRange(), CallType); |
| |
| IdentifierInfo *FnInfo = FDecl->getIdentifier(); |
| // None of the checks below are needed for functions that don't have |
| // simple names (e.g., C++ conversion functions). |
| if (!FnInfo) |
| return false; |
| |
| CheckAbsoluteValueFunction(TheCall, FDecl, FnInfo); |
| |
| unsigned CMId = FDecl->getMemoryFunctionKind(); |
| if (CMId == 0) |
| return false; |
| |
| // Handle memory setting and copying functions. |
| if (CMId == Builtin::BIstrlcpy || CMId == Builtin::BIstrlcat) |
| CheckStrlcpycatArguments(TheCall, FnInfo); |
| else if (CMId == Builtin::BIstrncat) |
| CheckStrncatArguments(TheCall, FnInfo); |
| else |
| CheckMemaccessArguments(TheCall, CMId, FnInfo); |
| |
| return false; |
| } |
| |
| bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, |
| ArrayRef<const Expr *> Args) { |
| VariadicCallType CallType = |
| Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; |
| |
| checkCall(Method, Args, Method->param_size(), |
| /*IsMemberFunction=*/false, |
| lbrac, Method->getSourceRange(), CallType); |
| |
| return false; |
| } |
| |
| bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, |
| const FunctionProtoType *Proto) { |
| const VarDecl *V = dyn_cast<VarDecl>(NDecl); |
| if (!V) |
| return false; |
| |
| QualType Ty = V->getType(); |
| if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType()) |
| return false; |
| |
| VariadicCallType CallType; |
| if (!Proto || !Proto->isVariadic()) { |
| CallType = VariadicDoesNotApply; |
| } else if (Ty->isBlockPointerType()) { |
| CallType = VariadicBlock; |
| } else { // Ty->isFunctionPointerType() |
| CallType = VariadicFunction; |
| } |
| unsigned NumParams = Proto ? Proto->getNumParams() : 0; |
| |
| checkCall(NDecl, llvm::makeArrayRef<const Expr *>(TheCall->getArgs(), |
| TheCall->getNumArgs()), |
| NumParams, /*IsMemberFunction=*/false, TheCall->getRParenLoc(), |
| TheCall->getCallee()->getSourceRange(), CallType); |
| |
| return false; |
| } |
| |
| /// Checks function calls when a FunctionDecl or a NamedDecl is not available, |
| /// such as function pointers returned from functions. |
| bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { |
| VariadicCallType CallType = getVariadicCallType(/*FDecl=*/0, Proto, |
| TheCall->getCallee()); |
| unsigned NumParams = Proto ? Proto->getNumParams() : 0; |
| |
| checkCall(/*FDecl=*/0, llvm::makeArrayRef<const Expr *>( |
| TheCall->getArgs(), TheCall->getNumArgs()), |
| NumParams, /*IsMemberFunction=*/false, TheCall->getRParenLoc(), |
| TheCall->getCallee()->getSourceRange(), CallType); |
| |
| return false; |
| } |
| |
| static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { |
| if (Ordering < AtomicExpr::AO_ABI_memory_order_relaxed || |
| Ordering > AtomicExpr::AO_ABI_memory_order_seq_cst) |
| return false; |
| |
| switch (Op) { |
| case AtomicExpr::AO__c11_atomic_init: |
| llvm_unreachable("There is no ordering argument for an init"); |
| |
| case AtomicExpr::AO__c11_atomic_load: |
| case AtomicExpr::AO__atomic_load_n: |
| case AtomicExpr::AO__atomic_load: |
| return Ordering != AtomicExpr::AO_ABI_memory_order_release && |
| Ordering != AtomicExpr::AO_ABI_memory_order_acq_rel; |
| |
| case AtomicExpr::AO__c11_atomic_store: |
| case AtomicExpr::AO__atomic_store: |
| case AtomicExpr::AO__atomic_store_n: |
| return Ordering != AtomicExpr::AO_ABI_memory_order_consume && |
| Ordering != AtomicExpr::AO_ABI_memory_order_acquire && |
| Ordering != AtomicExpr::AO_ABI_memory_order_acq_rel; |
| |
| default: |
| return true; |
| } |
| } |
| |
| ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, |
| AtomicExpr::AtomicOp Op) { |
| CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); |
| DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); |
| |
| // All these operations take one of the following forms: |
| enum { |
| // C __c11_atomic_init(A *, C) |
| Init, |
| // C __c11_atomic_load(A *, int) |
| Load, |
| // void __atomic_load(A *, CP, int) |
| Copy, |
| // C __c11_atomic_add(A *, M, int) |
| Arithmetic, |
| // C __atomic_exchange_n(A *, CP, int) |
| Xchg, |
| // void __atomic_exchange(A *, C *, CP, int) |
| GNUXchg, |
| // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) |
| C11CmpXchg, |
| // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) |
| GNUCmpXchg |
| } Form = Init; |
| const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 4, 5, 6 }; |
| const unsigned NumVals[] = { 1, 0, 1, 1, 1, 2, 2, 3 }; |
| // where: |
| // C is an appropriate type, |
| // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, |
| // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, |
| // M is C if C is an integer, and ptrdiff_t if C is a pointer, and |
| // the int parameters are for orderings. |
| |
| assert(AtomicExpr::AO__c11_atomic_init == 0 && |
| AtomicExpr::AO__c11_atomic_fetch_xor + 1 == AtomicExpr::AO__atomic_load |
| && "need to update code for modified C11 atomics"); |
| bool IsC11 = Op >= AtomicExpr::AO__c11_atomic_init && |
| Op <= AtomicExpr::AO__c11_atomic_fetch_xor; |
| bool IsN = Op == AtomicExpr::AO__atomic_load_n || |
| Op == AtomicExpr::AO__atomic_store_n || |
| Op == AtomicExpr::AO__atomic_exchange_n || |
| Op == AtomicExpr::AO__atomic_compare_exchange_n; |
| bool IsAddSub = false; |
| |
| switch (Op) { |
| case AtomicExpr::AO__c11_atomic_init: |
| Form = Init; |
| break; |
| |
| case AtomicExpr::AO__c11_atomic_load: |
| case AtomicExpr::AO__atomic_load_n: |
| Form = Load; |
| break; |
| |
| case AtomicExpr::AO__c11_atomic_store: |
| case AtomicExpr::AO__atomic_load: |
| case AtomicExpr::AO__atomic_store: |
| case AtomicExpr::AO__atomic_store_n: |
| Form = Copy; |
| break; |
| |
| case AtomicExpr::AO__c11_atomic_fetch_add: |
| case AtomicExpr::AO__c11_atomic_fetch_sub: |
| case AtomicExpr::AO__atomic_fetch_add: |
| case AtomicExpr::AO__atomic_fetch_sub: |
| case AtomicExpr::AO__atomic_add_fetch: |
| case AtomicExpr::AO__atomic_sub_fetch: |
| IsAddSub = true; |
| // Fall through. |
| case AtomicExpr::AO__c11_atomic_fetch_and: |
| case AtomicExpr::AO__c11_atomic_fetch_or: |
| case AtomicExpr::AO__c11_atomic_fetch_xor: |
| case AtomicExpr::AO__atomic_fetch_and: |
| case AtomicExpr::AO__atomic_fetch_or: |
| case AtomicExpr::AO__atomic_fetch_xor: |
| case AtomicExpr::AO__atomic_fetch_nand: |
| case AtomicExpr::AO__atomic_and_fetch: |
| case AtomicExpr::AO__atomic_or_fetch: |
| case AtomicExpr::AO__atomic_xor_fetch: |
| case AtomicExpr::AO__atomic_nand_fetch: |
| Form = Arithmetic; |
| break; |
| |
| case AtomicExpr::AO__c11_atomic_exchange: |
| case AtomicExpr::AO__atomic_exchange_n: |
| Form = Xchg; |
| break; |
| |
| case AtomicExpr::AO__atomic_exchange: |
| Form = GNUXchg; |
| break; |
| |
| case AtomicExpr::AO__c11_atomic_compare_exchange_strong: |
| case AtomicExpr::AO__c11_atomic_compare_exchange_weak: |
| Form = C11CmpXchg; |
| break; |
| |
| case AtomicExpr::AO__atomic_compare_exchange: |
| case AtomicExpr::AO__atomic_compare_exchange_n: |
| Form = GNUCmpXchg; |
| break; |
| } |
| |
| // Check we have the right number of arguments. |
| if (TheCall->getNumArgs() < NumArgs[Form]) { |
| Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args) |
| << 0 << NumArgs[Form] << TheCall->getNumArgs() |
| << TheCall->getCallee()->getSourceRange(); |
| return ExprError(); |
| } else if (TheCall->getNumArgs() > NumArgs[Form]) { |
| Diag(TheCall->getArg(NumArgs[Form])->getLocStart(), |
| diag::err_typecheck_call_too_many_args) |
| << 0 << NumArgs[Form] << TheCall->getNumArgs() |
| << TheCall->getCallee()->getSourceRange(); |
| return ExprError(); |
| } |
| |
| // Inspect the first argument of the atomic operation. |
| Expr *Ptr = TheCall->getArg(0); |
| Ptr = DefaultFunctionArrayLvalueConversion(Ptr).get(); |
| const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); |
| if (!pointerType) { |
| Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer) |
| << Ptr->getType() << Ptr->getSourceRange(); |
| return ExprError(); |
| } |
| |
| // For a __c11 builtin, this should be a pointer to an _Atomic type. |
| QualType AtomTy = pointerType->getPointeeType(); // 'A' |
| QualType ValType = AtomTy; // 'C' |
| if (IsC11) { |
| if (!AtomTy->isAtomicType()) { |
| Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic) |
| << Ptr->getType() << Ptr->getSourceRange(); |
| return ExprError(); |
| } |
| if (AtomTy.isConstQualified()) { |
| Diag(DRE->getLocStart(), diag::err_atomic_op_needs_non_const_atomic) |
| << Ptr->getType() << Ptr->getSourceRange(); |
| return ExprError(); |
| } |
| ValType = AtomTy->getAs<AtomicType>()->getValueType(); |
| } |
| |
| // For an arithmetic operation, the implied arithmetic must be well-formed. |
| if (Form == Arithmetic) { |
| // gcc does not enforce these rules for GNU atomics, but we do so for sanity. |
| if (IsAddSub && !ValType->isIntegerType() && !ValType->isPointerType()) { |
| Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic_int_or_ptr) |
| << IsC11 << Ptr->getType() << Ptr->getSourceRange(); |
| return ExprError(); |
| } |
| if (!IsAddSub && !ValType->isIntegerType()) { |
| Diag(DRE->getLocStart(), diag::err_atomic_op_bitwise_needs_atomic_int) |
| << IsC11 << Ptr->getType() << Ptr->getSourceRange(); |
| return ExprError(); |
| } |
| } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { |
| // For __atomic_*_n operations, the value type must be a scalar integral or |
| // pointer type which is 1, 2, 4, 8 or 16 bytes in length. |
| Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic_int_or_ptr) |
| << IsC11 << Ptr->getType() << Ptr->getSourceRange(); |
| return ExprError(); |
| } |
| |
| if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && |
| !AtomTy->isScalarType()) { |
| // For GNU atomics, require a trivially-copyable type. This is not part of |
| // the GNU atomics specification, but we enforce it for sanity. |
| Diag(DRE->getLocStart(), diag::err_atomic_op_needs_trivial_copy) |
| << Ptr->getType() << Ptr->getSourceRange(); |
| return ExprError(); |
| } |
| |
| // FIXME: For any builtin other than a load, the ValType must not be |
| // const-qualified. |
| |
| switch (ValType.getObjCLifetime()) { |
| case Qualifiers::OCL_None: |
| case Qualifiers::OCL_ExplicitNone: |
| // okay |
| break; |
| |
| case Qualifiers::OCL_Weak: |
| case Qualifiers::OCL_Strong: |
| case Qualifiers::OCL_Autoreleasing: |
| // FIXME: Can this happen? By this point, ValType should be known |
| // to be trivially copyable. |
| Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership) |
| << ValType << Ptr->getSourceRange(); |
| return ExprError(); |
| } |
| |
| QualType ResultType = ValType; |
| if (Form == Copy || Form == GNUXchg || Form == Init) |
| ResultType = Context.VoidTy; |
| else if (Form == C11CmpXchg || Form == GNUCmpXchg) |
| ResultType = Context.BoolTy; |
| |
| // The type of a parameter passed 'by value'. In the GNU atomics, such |
| // arguments are actually passed as pointers. |
| QualType ByValType = ValType; // 'CP' |
| if (!IsC11 && !IsN) |
| ByValType = Ptr->getType(); |
| |
| // The first argument --- the pointer --- has a fixed type; we |
| // deduce the types of the rest of the arguments accordingly. Walk |
| // the remaining arguments, converting them to the deduced value type. |
| for (unsigned i = 1; i != NumArgs[Form]; ++i) { |
| QualType Ty; |
| if (i < NumVals[Form] + 1) { |
| switch (i) { |
| case 1: |
| // The second argument is the non-atomic operand. For arithmetic, this |
| // is always passed by value, and for a compare_exchange it is always |
| // passed by address. For the rest, GNU uses by-address and C11 uses |
| // by-value. |
| assert(Form != Load); |
| if (Form == Init || (Form == Arithmetic && ValType->isIntegerType())) |
| Ty = ValType; |
| else if (Form == Copy || Form == Xchg) |
| Ty = ByValType; |
| else if (Form == Arithmetic) |
| Ty = Context.getPointerDiffType(); |
| else |
| Ty = Context.getPointerType(ValType.getUnqualifiedType()); |
| break; |
| case 2: |
| // The third argument to compare_exchange / GNU exchange is a |
| // (pointer to a) desired value. |
| Ty = ByValType; |
| break; |
| case 3: |
| // The fourth argument to GNU compare_exchange is a 'weak' flag. |
| Ty = Context.BoolTy; |
| break; |
| } |
| } else { |
| // The order(s) are always converted to int. |
| Ty = Context.IntTy; |
| } |
| |
| InitializedEntity Entity = |
| InitializedEntity::InitializeParameter(Context, Ty, false); |
| ExprResult Arg = TheCall->getArg(i); |
| Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); |
| if (Arg.isInvalid()) |
| return true; |
| TheCall->setArg(i, Arg.get()); |
| } |
| |
| // Permute the arguments into a 'consistent' order. |
| SmallVector<Expr*, 5> SubExprs; |
| SubExprs.push_back(Ptr); |
| switch (Form) { |
| case Init: |
| // Note, AtomicExpr::getVal1() has a special case for this atomic. |
| SubExprs.push_back(TheCall->getArg(1)); // Val1 |
| break; |
| case Load: |
| SubExprs.push_back(TheCall->getArg(1)); // Order |
| break; |
| case Copy: |
| case Arithmetic: |
| case Xchg: |
| SubExprs.push_back(TheCall->getArg(2)); // Order |
| SubExprs.push_back(TheCall->getArg(1)); // Val1 |
| break; |
| case GNUXchg: |
| // Note, AtomicExpr::getVal2() has a special case for this atomic. |
| SubExprs.push_back(TheCall->getArg(3)); // Order |
| SubExprs.push_back(TheCall->getArg(1)); // Val1 |
| SubExprs.push_back(TheCall->getArg(2)); // Val2 |
| break; |
| case C11CmpXchg: |
| SubExprs.push_back(TheCall->getArg(3)); // Order |
| SubExprs.push_back(TheCall->getArg(1)); // Val1 |
| SubExprs.push_back(TheCall->getArg(4)); // OrderFail |
| SubExprs.push_back(TheCall->getArg(2)); // Val2 |
| break; |
| case GNUCmpXchg: |
| SubExprs.push_back(TheCall->getArg(4)); // Order |
| SubExprs.push_back(TheCall->getArg(1)); // Val1 |
| SubExprs.push_back(TheCall->getArg(5)); // OrderFail |
| SubExprs.push_back(TheCall->getArg(2)); // Val2 |
| SubExprs.push_back(TheCall->getArg(3)); // Weak |
| break; |
| } |
| |
| if (SubExprs.size() >= 2 && Form != Init) { |
| llvm::APSInt Result(32); |
| if (SubExprs[1]->isIntegerConstantExpr(Result, Context) && |
| !isValidOrderingForOp(Result.getSExtValue(), Op)) |
| Diag(SubExprs[1]->getLocStart(), |
| diag::warn_atomic_op_has_invalid_memory_order) |
| << SubExprs[1]->getSourceRange(); |
| } |
| |
| AtomicExpr *AE = new (Context) AtomicExpr(TheCall->getCallee()->getLocStart(), |
| SubExprs, ResultType, Op, |
| TheCall->getRParenLoc()); |
| |
| if ((Op == AtomicExpr::AO__c11_atomic_load || |
| (Op == AtomicExpr::AO__c11_atomic_store)) && |
| Context.AtomicUsesUnsupportedLibcall(AE)) |
| Diag(AE->getLocStart(), diag::err_atomic_load_store_uses_lib) << |
| ((Op == AtomicExpr::AO__c11_atomic_load) ? 0 : 1); |
| |
| return Owned(AE); |
| } |
| |
| |
| /// checkBuiltinArgument - Given a call to a builtin function, perform |
| /// normal type-checking on the given argument, updating the call in |
| /// place. This is useful when a builtin function requires custom |
| /// type-checking for some of its arguments but not necessarily all of |
| /// them. |
| /// |
| /// Returns true on error. |
| static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { |
| FunctionDecl *Fn = E->getDirectCallee(); |
| assert(Fn && "builtin call without direct callee!"); |
| |
| ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); |
| InitializedEntity Entity = |
| InitializedEntity::InitializeParameter(S.Context, Param); |
| |
| ExprResult Arg = E->getArg(0); |
| Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); |
| if (Arg.isInvalid()) |
| return true; |
| |
| E->setArg(ArgIndex, Arg.take()); |
| return false; |
| } |
| |
| /// SemaBuiltinAtomicOverloaded - We have a call to a function like |
| /// __sync_fetch_and_add, which is an overloaded function based on the pointer |
| /// type of its first argument. The main ActOnCallExpr routines have already |
| /// promoted the types of arguments because all of these calls are prototyped as |
| /// void(...). |
| /// |
| /// This function goes through and does final semantic checking for these |
| /// builtins, |
| ExprResult |
| Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { |
| CallExpr *TheCall = (CallExpr *)TheCallResult.get(); |
| DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); |
| FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); |
| |
| // Ensure that we have at least one argument to do type inference from. |
| if (TheCall->getNumArgs() < 1) { |
| Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least) |
| << 0 << 1 << TheCall->getNumArgs() |
| << TheCall->getCallee()->getSourceRange(); |
| return ExprError(); |
| } |
| |
| // Inspect the first argument of the atomic builtin. This should always be |
| // a pointer type, whose element is an integral scalar or pointer type. |
| // Because it is a pointer type, we don't have to worry about any implicit |
| // casts here. |
| // FIXME: We don't allow floating point scalars as input. |
| Expr *FirstArg = TheCall->getArg(0); |
| ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); |
| if (FirstArgResult.isInvalid()) |
| return ExprError(); |
| FirstArg = FirstArgResult.take(); |
| TheCall->setArg(0, FirstArg); |
| |
| const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); |
| if (!pointerType) { |
| Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer) |
| << FirstArg->getType() << FirstArg->getSourceRange(); |
| return ExprError(); |
| } |
| |
| QualType ValType = pointerType->getPointeeType(); |
| if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && |
| !ValType->isBlockPointerType()) { |
| Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer_intptr) |
| << FirstArg->getType() << FirstArg->getSourceRange(); |
| return ExprError(); |
| } |
| |
| switch (ValType.getObjCLifetime()) { |
| case Qualifiers::OCL_None: |
| case Qualifiers::OCL_ExplicitNone: |
| // okay |
| break; |
| |
| case Qualifiers::OCL_Weak: |
| case Qualifiers::OCL_Strong: |
| case Qualifiers::OCL_Autoreleasing: |
| Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership) |
| << ValType << FirstArg->getSourceRange(); |
| return ExprError(); |
| } |
| |
| // Strip any qualifiers off ValType. |
| ValType = ValType.getUnqualifiedType(); |
| |
| // The majority of builtins return a value, but a few have special return |
| // types, so allow them to override appropriately below. |
| QualType ResultType = ValType; |
| |
| // We need to figure out which concrete builtin this maps onto. For example, |
| // __sync_fetch_and_add with a 2 byte object turns into |
| // __sync_fetch_and_add_2. |
| #define BUILTIN_ROW(x) \ |
| { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ |
| Builtin::BI##x##_8, Builtin::BI##x##_16 } |
| |
| static const unsigned BuiltinIndices[][5] = { |
| BUILTIN_ROW(__sync_fetch_and_add), |
| BUILTIN_ROW(__sync_fetch_and_sub), |
| BUILTIN_ROW(__sync_fetch_and_or), |
| BUILTIN_ROW(__sync_fetch_and_and), |
| BUILTIN_ROW(__sync_fetch_and_xor), |
| |
| BUILTIN_ROW(__sync_add_and_fetch), |
| BUILTIN_ROW(__sync_sub_and_fetch), |
| BUILTIN_ROW(__sync_and_and_fetch), |
| BUILTIN_ROW(__sync_or_and_fetch), |
| BUILTIN_ROW(__sync_xor_and_fetch), |
| |
| BUILTIN_ROW(__sync_val_compare_and_swap), |
| BUILTIN_ROW(__sync_bool_compare_and_swap), |
| BUILTIN_ROW(__sync_lock_test_and_set), |
| BUILTIN_ROW(__sync_lock_release), |
| BUILTIN_ROW(__sync_swap) |
| }; |
| #undef BUILTIN_ROW |
| |
| // Determine the index of the size. |
| unsigned SizeIndex; |
| switch (Context.getTypeSizeInChars(ValType).getQuantity()) { |
| case 1: SizeIndex = 0; break; |
| case 2: SizeIndex = 1; break; |
| case 4: SizeIndex = 2; break; |
| case 8: SizeIndex = 3; break; |
| case 16: SizeIndex = 4; break; |
| default: |
| Diag(DRE->getLocStart(), diag::err_atomic_builtin_pointer_size) |
| << FirstArg->getType() << FirstArg->getSourceRange(); |
| return ExprError(); |
| } |
| |
| // Each of these builtins has one pointer argument, followed by some number of |
| // values (0, 1 or 2) followed by a potentially empty varags list of stuff |
| // that we ignore. Find out which row of BuiltinIndices to read from as well |
| // as the number of fixed args. |
| unsigned BuiltinID = FDecl->getBuiltinID(); |
| unsigned BuiltinIndex, NumFixed = 1; |
| switch (BuiltinID) { |
| default: llvm_unreachable("Unknown overloaded atomic builtin!"); |
| case Builtin::BI__sync_fetch_and_add: |
| case Builtin::BI__sync_fetch_and_add_1: |
| case Builtin::BI__sync_fetch_and_add_2: |
| case Builtin::BI__sync_fetch_and_add_4: |
| case Builtin::BI__sync_fetch_and_add_8: |
| case Builtin::BI__sync_fetch_and_add_16: |
| BuiltinIndex = 0; |
| break; |
| |
| case Builtin::BI__sync_fetch_and_sub: |
| case Builtin::BI__sync_fetch_and_sub_1: |
| case Builtin::BI__sync_fetch_and_sub_2: |
| case Builtin::BI__sync_fetch_and_sub_4: |
| case Builtin::BI__sync_fetch_and_sub_8: |
| case Builtin::BI__sync_fetch_and_sub_16: |
| BuiltinIndex = 1; |
| break; |
| |
| case Builtin::BI__sync_fetch_and_or: |
| case Builtin::BI__sync_fetch_and_or_1: |
| case Builtin::BI__sync_fetch_and_or_2: |
| case Builtin::BI__sync_fetch_and_or_4: |
| case Builtin::BI__sync_fetch_and_or_8: |
| case Builtin::BI__sync_fetch_and_or_16: |
| BuiltinIndex = 2; |
| break; |
| |
| case Builtin::BI__sync_fetch_and_and: |
| case Builtin::BI__sync_fetch_and_and_1: |
| case Builtin::BI__sync_fetch_and_and_2: |
| case Builtin::BI__sync_fetch_and_and_4: |
| case Builtin::BI__sync_fetch_and_and_8: |
| case Builtin::BI__sync_fetch_and_and_16: |
| BuiltinIndex = 3; |
| break; |
| |
| case Builtin::BI__sync_fetch_and_xor: |
| case Builtin::BI__sync_fetch_and_xor_1: |
| case Builtin::BI__sync_fetch_and_xor_2: |
| case Builtin::BI__sync_fetch_and_xor_4: |
| case Builtin::BI__sync_fetch_and_xor_8: |
| case Builtin::BI__sync_fetch_and_xor_16: |
| BuiltinIndex = 4; |
| break; |
| |
| case Builtin::BI__sync_add_and_fetch: |
| case Builtin::BI__sync_add_and_fetch_1: |
| case Builtin::BI__sync_add_and_fetch_2: |
| case Builtin::BI__sync_add_and_fetch_4: |
| case Builtin::BI__sync_add_and_fetch_8: |
| case Builtin::BI__sync_add_and_fetch_16: |
| BuiltinIndex = 5; |
| break; |
| |
| case Builtin::BI__sync_sub_and_fetch: |
| case Builtin::BI__sync_sub_and_fetch_1: |
| case Builtin::BI__sync_sub_and_fetch_2: |
| case Builtin::BI__sync_sub_and_fetch_4: |
| case Builtin::BI__sync_sub_and_fetch_8: |
| case Builtin::BI__sync_sub_and_fetch_16: |
| BuiltinIndex = 6; |
| break; |
| |
| case Builtin::BI__sync_and_and_fetch: |
| case Builtin::BI__sync_and_and_fetch_1: |
| case Builtin::BI__sync_and_and_fetch_2: |
| case Builtin::BI__sync_and_and_fetch_4: |
| case Builtin::BI__sync_and_and_fetch_8: |
| case Builtin::BI__sync_and_and_fetch_16: |
| BuiltinIndex = 7; |
| break; |
| |
| case Builtin::BI__sync_or_and_fetch: |
| case Builtin::BI__sync_or_and_fetch_1: |
| case Builtin::BI__sync_or_and_fetch_2: |
| case Builtin::BI__sync_or_and_fetch_4: |
| case Builtin::BI__sync_or_and_fetch_8: |
| case Builtin::BI__sync_or_and_fetch_16: |
| BuiltinIndex = 8; |
| break; |
| |
| case Builtin::BI__sync_xor_and_fetch: |
| case Builtin::BI__sync_xor_and_fetch_1: |
| case Builtin::BI__sync_xor_and_fetch_2: |
| case Builtin::BI__sync_xor_and_fetch_4: |
| case Builtin::BI__sync_xor_and_fetch_8: |
| case Builtin::BI__sync_xor_and_fetch_16: |
| BuiltinIndex = 9; |
| break; |
| |
| case Builtin::BI__sync_val_compare_and_swap: |
| case Builtin::BI__sync_val_compare_and_swap_1: |
| case Builtin::BI__sync_val_compare_and_swap_2: |
| case Builtin::BI__sync_val_compare_and_swap_4: |
| case Builtin::BI__sync_val_compare_and_swap_8: |
| case Builtin::BI__sync_val_compare_and_swap_16: |
| BuiltinIndex = 10; |
| NumFixed = 2; |
| break; |
| |
| case Builtin::BI__sync_bool_compare_and_swap: |
| case Builtin::BI__sync_bool_compare_and_swap_1: |
| case Builtin::BI__sync_bool_compare_and_swap_2: |
| case Builtin::BI__sync_bool_compare_and_swap_4: |
| case Builtin::BI__sync_bool_compare_and_swap_8: |
| case Builtin::BI__sync_bool_compare_and_swap_16: |
| BuiltinIndex = 11; |
| NumFixed = 2; |
| ResultType = Context.BoolTy; |
| break; |
| |
| case Builtin::BI__sync_lock_test_and_set: |
| case Builtin::BI__sync_lock_test_and_set_1: |
| case Builtin::BI__sync_lock_test_and_set_2: |
| case Builtin::BI__sync_lock_test_and_set_4: |
| case Builtin::BI__sync_lock_test_and_set_8: |
| case Builtin::BI__sync_lock_test_and_set_16: |
| BuiltinIndex = 12; |
| break; |
| |
| case Builtin::BI__sync_lock_release: |
| case Builtin::BI__sync_lock_release_1: |
| case Builtin::BI__sync_lock_release_2: |
| case Builtin::BI__sync_lock_release_4: |
| case Builtin::BI__sync_lock_release_8: |
| case Builtin::BI__sync_lock_release_16: |
| BuiltinIndex = 13; |
| NumFixed = 0; |
| ResultType = Context.VoidTy; |
| break; |
| |
| case Builtin::BI__sync_swap: |
| case Builtin::BI__sync_swap_1: |
| case Builtin::BI__sync_swap_2: |
| case Builtin::BI__sync_swap_4: |
| case Builtin::BI__sync_swap_8: |
| case Builtin::BI__sync_swap_16: |
| BuiltinIndex = 14; |
| break; |
| } |
| |
| // Now that we know how many fixed arguments we expect, first check that we |
| // have at least that many. |
| if (TheCall->getNumArgs() < 1+NumFixed) { |
| Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least) |
| << 0 << 1+NumFixed << TheCall->getNumArgs() |
| << TheCall->getCallee()->getSourceRange(); |
| return ExprError(); |
| } |
| |
| // Get the decl for the concrete builtin from this, we can tell what the |
| // concrete integer type we should convert to is. |
| unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; |
| const char *NewBuiltinName = Context.BuiltinInfo.GetName(NewBuiltinID); |
| FunctionDecl *NewBuiltinDecl; |
| if (NewBuiltinID == BuiltinID) |
| NewBuiltinDecl = FDecl; |
| else { |
| // Perform builtin lookup to avoid redeclaring it. |
| DeclarationName DN(&Context.Idents.get(NewBuiltinName)); |
| LookupResult Res(*this, DN, DRE->getLocStart(), LookupOrdinaryName); |
| LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); |
| assert(Res.getFoundDecl()); |
| NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); |
| if (NewBuiltinDecl == 0) |
| return ExprError(); |
| } |
| |
| // The first argument --- the pointer --- has a fixed type; we |
| // deduce the types of the rest of the arguments accordingly. Walk |
| // the remaining arguments, converting them to the deduced value type. |
| for (unsigned i = 0; i != NumFixed; ++i) { |
| ExprResult Arg = TheCall->getArg(i+1); |
| |
| // GCC does an implicit conversion to the pointer or integer ValType. This |
| // can fail in some cases (1i -> int**), check for this error case now. |
| // Initialize the argument. |
| InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, |
| ValType, /*consume*/ false); |
| Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); |
| if (Arg.isInvalid()) |
| return ExprError(); |
| |
| // Okay, we have something that *can* be converted to the right type. Check |
| // to see if there is a potentially weird extension going on here. This can |
| // happen when you do an atomic operation on something like an char* and |
| // pass in 42. The 42 gets converted to char. This is even more strange |
| // for things like 45.123 -> char, etc. |
| // FIXME: Do this check. |
| TheCall->setArg(i+1, Arg.take()); |
| } |
| |
| ASTContext& Context = this->getASTContext(); |
| |
| // Create a new DeclRefExpr to refer to the new decl. |
| DeclRefExpr* NewDRE = DeclRefExpr::Create( |
| Context, |
| DRE->getQualifierLoc(), |
| SourceLocation(), |
| NewBuiltinDecl, |
| /*enclosing*/ false, |
| DRE->getLocation(), |
| Context.BuiltinFnTy, |
| DRE->getValueKind()); |
| |
| // Set the callee in the CallExpr. |
| // FIXME: This loses syntactic information. |
| QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); |
| ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, |
| CK_BuiltinFnToFnPtr); |
| TheCall->setCallee(PromotedCall.take()); |
| |
| // Change the result type of the call to match the original value type. This |
| // is arbitrary, but the codegen for these builtins ins design to handle it |
| // gracefully. |
| TheCall->setType(ResultType); |
| |
| return TheCallResult; |
| } |
| |
| /// CheckObjCString - Checks that the argument to the builtin |
| /// CFString constructor is correct |
| /// Note: It might also make sense to do the UTF-16 conversion here (would |
| /// simplify the backend). |
| bool Sema::CheckObjCString(Expr *Arg) { |
| Arg = Arg->IgnoreParenCasts(); |
| StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); |
| |
| if (!Literal || !Literal->isAscii()) { |
| Diag(Arg->getLocStart(), diag::err_cfstring_literal_not_string_constant) |
| << Arg->getSourceRange(); |
| return true; |
| } |
| |
| if (Literal->containsNonAsciiOrNull()) { |
| StringRef String = Literal->getString(); |
| unsigned NumBytes = String.size(); |
| SmallVector<UTF16, 128> ToBuf(NumBytes); |
| const UTF8 *FromPtr = (const UTF8 *)String.data(); |
| UTF16 *ToPtr = &ToBuf[0]; |
| |
| ConversionResult Result = ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, |
| &ToPtr, ToPtr + NumBytes, |
| strictConversion); |
| // Check for conversion failure. |
| if (Result != conversionOK) |
| Diag(Arg->getLocStart(), |
| diag::warn_cfstring_truncated) << Arg->getSourceRange(); |
| } |
| return false; |
| } |
| |
| /// SemaBuiltinVAStart - Check the arguments to __builtin_va_start for validity. |
| /// Emit an error and return true on failure, return false on success. |
| bool Sema::SemaBuiltinVAStart(CallExpr *TheCall) { |
| Expr *Fn = TheCall->getCallee(); |
| if (TheCall->getNumArgs() > 2) { |
| Diag(TheCall->getArg(2)->getLocStart(), |
| diag::err_typecheck_call_too_many_args) |
| << 0 /*function call*/ << 2 << TheCall->getNumArgs() |
| << Fn->getSourceRange() |
| << SourceRange(TheCall->getArg(2)->getLocStart(), |
| (*(TheCall->arg_end()-1))->getLocEnd()); |
| return true; |
| } |
| |
| if (TheCall->getNumArgs() < 2) { |
| return Diag(TheCall->getLocEnd(), |
| diag::err_typecheck_call_too_few_args_at_least) |
| << 0 /*function call*/ << 2 << TheCall->getNumArgs(); |
| } |
| |
| // Type-check the first argument normally. |
| if (checkBuiltinArgument(*this, TheCall, 0)) |
| return true; |
| |
| // Determine whether the current function is variadic or not. |
| BlockScopeInfo *CurBlock = getCurBlock(); |
| bool isVariadic; |
| if (CurBlock) |
| isVariadic = CurBlock->TheDecl->isVariadic(); |
| else if (FunctionDecl *FD = getCurFunctionDecl()) |
| isVariadic = FD->isVariadic(); |
| else |
| isVariadic = getCurMethodDecl()->isVariadic(); |
| |
| if (!isVariadic) { |
| Diag(Fn->getLocStart(), diag::err_va_start_used_in_non_variadic_function); |
| return true; |
| } |
| |
| // Verify that the second argument to the builtin is the last argument of the |
| // current function or method. |
| bool SecondArgIsLastNamedArgument = false; |
| const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); |
| |
| // These are valid if SecondArgIsLastNamedArgument is false after the next |
| // block. |
| QualType Type; |
| SourceLocation ParamLoc; |
| |
| if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { |
| if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { |
| // FIXME: This isn't correct for methods (results in bogus warning). |
| // Get the last formal in the current function. |
| const ParmVarDecl *LastArg; |
| if (CurBlock) |
| LastArg = *(CurBlock->TheDecl->param_end()-1); |
| else if (FunctionDecl *FD = getCurFunctionDecl()) |
| LastArg = *(FD->param_end()-1); |
| else |
| LastArg = *(getCurMethodDecl()->param_end()-1); |
| SecondArgIsLastNamedArgument = PV == LastArg; |
| |
| Type = PV->getType(); |
| ParamLoc = PV->getLocation(); |
| } |
| } |
| |
| if (!SecondArgIsLastNamedArgument) |
| Diag(TheCall->getArg(1)->getLocStart(), |
| diag::warn_second_parameter_of_va_start_not_last_named_argument); |
| else if (Type->isReferenceType()) { |
| Diag(Arg->getLocStart(), |
| diag::warn_va_start_of_reference_type_is_undefined); |
| Diag(ParamLoc, diag::note_parameter_type) << Type; |
| } |
| |
| TheCall->setType(Context.VoidTy); |
| return false; |
| } |
| |
| /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and |
| /// friends. This is declared to take (...), so we have to check everything. |
| bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { |
| if (TheCall->getNumArgs() < 2) |
| return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args) |
| << 0 << 2 << TheCall->getNumArgs()/*function call*/; |
| if (TheCall->getNumArgs() > 2) |
| return Diag(TheCall->getArg(2)->getLocStart(), |
| diag::err_typecheck_call_too_many_args) |
| << 0 /*function call*/ << 2 << TheCall->getNumArgs() |
| << SourceRange(TheCall->getArg(2)->getLocStart(), |
| (*(TheCall->arg_end()-1))->getLocEnd()); |
| |
| ExprResult OrigArg0 = TheCall->getArg(0); |
| ExprResult OrigArg1 = TheCall->getArg(1); |
| |
| // Do standard promotions between the two arguments, returning their common |
| // type. |
| QualType Res = UsualArithmeticConversions(OrigArg0, OrigArg1, false); |
| if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) |
| return true; |
| |
| // Make sure any conversions are pushed back into the call; this is |
| // type safe since unordered compare builtins are declared as "_Bool |
| // foo(...)". |
| TheCall->setArg(0, OrigArg0.get()); |
| TheCall->setArg(1, OrigArg1.get()); |
| |
| if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) |
| return false; |
| |
| // If the common type isn't a real floating type, then the arguments were |
| // invalid for this operation. |
| if (Res.isNull() || !Res->isRealFloatingType()) |
| return Diag(OrigArg0.get()->getLocStart(), |
| diag::err_typecheck_call_invalid_ordered_compare) |
| << OrigArg0.get()->getType() << OrigArg1.get()->getType() |
| << SourceRange(OrigArg0.get()->getLocStart(), OrigArg1.get()->getLocEnd()); |
| |
| return false; |
| } |
| |
| /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like |
| /// __builtin_isnan and friends. This is declared to take (...), so we have |
| /// to check everything. We expect the last argument to be a floating point |
| /// value. |
| bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { |
| if (TheCall->getNumArgs() < NumArgs) |
| return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args) |
| << 0 << NumArgs << TheCall->getNumArgs()/*function call*/; |
| if (TheCall->getNumArgs() > NumArgs) |
| return Diag(TheCall->getArg(NumArgs)->getLocStart(), |
| diag::err_typecheck_call_too_many_args) |
| << 0 /*function call*/ << NumArgs << TheCall->getNumArgs() |
| << SourceRange(TheCall->getArg(NumArgs)->getLocStart(), |
| (*(TheCall->arg_end()-1))->getLocEnd()); |
| |
| Expr *OrigArg = TheCall->getArg(NumArgs-1); |
| |
| if (OrigArg->isTypeDependent()) |
| return false; |
| |
| // This operation requires a non-_Complex floating-point number. |
| if (!OrigArg->getType()->isRealFloatingType()) |
| return Diag(OrigArg->getLocStart(), |
| diag::err_typecheck_call_invalid_unary_fp) |
| << OrigArg->getType() << OrigArg->getSourceRange(); |
| |
| // If this is an implicit conversion from float -> double, remove it. |
| if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(OrigArg)) { |
| Expr *CastArg = Cast->getSubExpr(); |
| if (CastArg->getType()->isSpecificBuiltinType(BuiltinType::Float)) { |
| assert(Cast->getType()->isSpecificBuiltinType(BuiltinType::Double) && |
| "promotion from float to double is the only expected cast here"); |
| Cast->setSubExpr(0); |
| TheCall->setArg(NumArgs-1, CastArg); |
| } |
| } |
| |
| return false; |
| } |
| |
| /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. |
| // This is declared to take (...), so we have to check everything. |
| ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { |
| if (TheCall->getNumArgs() < 2) |
| return ExprError(Diag(TheCall->getLocEnd(), |
| diag::err_typecheck_call_too_few_args_at_least) |
| << 0 /*function call*/ << 2 << TheCall->getNumArgs() |
| << TheCall->getSourceRange()); |
| |
| // Determine which of the following types of shufflevector we're checking: |
| // 1) unary, vector mask: (lhs, mask) |
| // 2) binary, vector mask: (lhs, rhs, mask) |
| // 3) binary, scalar mask: (lhs, rhs, index, ..., index) |
| QualType resType = TheCall->getArg(0)->getType(); |
| unsigned numElements = 0; |
| |
| if (!TheCall->getArg(0)->isTypeDependent() && |
| !TheCall->getArg(1)->isTypeDependent()) { |
| QualType LHSType = TheCall->getArg(0)->getType(); |
| QualType RHSType = TheCall->getArg(1)->getType(); |
| |
| if (!LHSType->isVectorType() || !RHSType->isVectorType()) |
| return ExprError(Diag(TheCall->getLocStart(), |
| diag::err_shufflevector_non_vector) |
| << SourceRange(TheCall->getArg(0)->getLocStart(), |
| TheCall->getArg(1)->getLocEnd())); |
| |
| numElements = LHSType->getAs<VectorType>()->getNumElements(); |
| unsigned numResElements = TheCall->getNumArgs() - 2; |
| |
| // Check to see if we have a call with 2 vector arguments, the unary shuffle |
| // with mask. If so, verify that RHS is an integer vector type with the |
| // same number of elts as lhs. |
| if (TheCall->getNumArgs() == 2) { |
| if (!RHSType->hasIntegerRepresentation() || |
| RHSType->getAs<VectorType>()->getNumElements() != numElements) |
| return ExprError(Diag(TheCall->getLocStart(), |
| diag::err_shufflevector_incompatible_vector) |
| << SourceRange(TheCall->getArg(1)->getLocStart(), |
| TheCall->getArg(1)->getLocEnd())); |
| } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { |
| return ExprError(Diag(TheCall->getLocStart(), |
| diag::err_shufflevector_incompatible_vector) |
| << SourceRange(TheCall->getArg(0)->getLocStart(), |
| TheCall->getArg(1)->getLocEnd())); |
| } else if (numElements != numResElements) { |
| QualType eltType = LHSType->getAs<VectorType>()->getElementType(); |
| resType = Context.getVectorType(eltType, numResElements, |
| VectorType::GenericVector); |
| } |
| } |
| |
| for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { |
| if (TheCall->getArg(i)->isTypeDependent() || |
| TheCall->getArg(i)->isValueDependent()) |
| continue; |
| |
| llvm::APSInt Result(32); |
| if (!TheCall->getArg(i)->isIntegerConstantExpr(Result, Context)) |
| return ExprError(Diag(TheCall->getLocStart(), |
| diag::err_shufflevector_nonconstant_argument) |
| << TheCall->getArg(i)->getSourceRange()); |
| |
| // Allow -1 which will be translated to undef in the IR. |
| if (Result.isSigned() && Result.isAllOnesValue()) |
| continue; |
| |
| if (Result.getActiveBits() > 64 || Result.getZExtValue() >= numElements*2) |
| return ExprError(Diag(TheCall->getLocStart(), |
| diag::err_shufflevector_argument_too_large) |
| << TheCall->getArg(i)->getSourceRange()); |
| } |
| |
| SmallVector<Expr*, 32> exprs; |
| |
| for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { |
| exprs.push_back(TheCall->getArg(i)); |
| TheCall->setArg(i, 0); |
| } |
| |
| return Owned(new (Context) ShuffleVectorExpr(Context, exprs, resType, |
| TheCall->getCallee()->getLocStart(), |
| TheCall->getRParenLoc())); |
| } |
| |
| /// SemaConvertVectorExpr - Handle __builtin_convertvector |
| ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, |
| SourceLocation BuiltinLoc, |
| SourceLocation RParenLoc) { |
| ExprValueKind VK = VK_RValue; |
| ExprObjectKind OK = OK_Ordinary; |
| QualType DstTy = TInfo->getType(); |
| QualType SrcTy = E->getType(); |
| |
| if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) |
| return ExprError(Diag(BuiltinLoc, |
| diag::err_convertvector_non_vector) |
| << E->getSourceRange()); |
| if (!DstTy->isVectorType() && !DstTy->isDependentType()) |
| return ExprError(Diag(BuiltinLoc, |
| diag::err_convertvector_non_vector_type)); |
| |
| if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { |
| unsigned SrcElts = SrcTy->getAs<VectorType>()->getNumElements(); |
| unsigned DstElts = DstTy->getAs<VectorType>()->getNumElements(); |
| if (SrcElts != DstElts) |
| return ExprError(Diag(BuiltinLoc, |
| diag::err_convertvector_incompatible_vector) |
| << E->getSourceRange()); |
| } |
| |
| return Owned(new (Context) ConvertVectorExpr(E, TInfo, DstTy, VK, OK, |
| BuiltinLoc, RParenLoc)); |
| |
| } |
| |
| /// SemaBuiltinPrefetch - Handle __builtin_prefetch. |
| // This is declared to take (const void*, ...) and can take two |
| // optional constant int args. |
| bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { |
| unsigned NumArgs = TheCall->getNumArgs(); |
| |
| if (NumArgs > 3) |
| return Diag(TheCall->getLocEnd(), |
| diag::err_typecheck_call_too_many_args_at_most) |
| << 0 /*function call*/ << 3 << NumArgs |
| << TheCall->getSourceRange(); |
| |
| // Argument 0 is checked for us and the remaining arguments must be |
| // constant integers. |
| for (unsigned i = 1; i != NumArgs; ++i) |
| if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) |
| return true; |
| |
| return false; |
| } |
| |
| /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr |
| /// TheCall is a constant expression. |
| bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, |
| llvm::APSInt &Result) { |
| Expr *Arg = TheCall->getArg(ArgNum); |
| DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); |
| FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); |
| |
| if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; |
| |
| if (!Arg->isIntegerConstantExpr(Result, Context)) |
| return Diag(TheCall->getLocStart(), diag::err_constant_integer_arg_type) |
| << FDecl->getDeclName() << Arg->getSourceRange(); |
| |
| return false; |
| } |
| |
| /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr |
| /// TheCall is a constant expression in the range [Low, High]. |
| bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, |
| int Low, int High) { |
| llvm::APSInt Result; |
| |
| // We can't check the value of a dependent argument. |
| Expr *Arg = TheCall->getArg(ArgNum); |
| if (Arg->isTypeDependent() || Arg->isValueDependent()) |
| return false; |
| |
| // Check constant-ness first. |
| if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) |
| return true; |
| |
| if (Result.getSExtValue() < Low || Result.getSExtValue() > High) |
| return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range) |
| << Low << High << Arg->getSourceRange(); |
| |
| return false; |
| } |
| |
| /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). |
| /// This checks that val is a constant 1. |
| bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { |
| Expr *Arg = TheCall->getArg(1); |
| llvm::APSInt Result; |
| |
| // TODO: This is less than ideal. Overload this to take a value. |
| if (SemaBuiltinConstantArg(TheCall, 1, Result)) |
| return true; |
| |
| if (Result != 1) |
| return Diag(TheCall->getLocStart(), diag::err_builtin_longjmp_invalid_val) |
| << SourceRange(Arg->getLocStart(), Arg->getLocEnd()); |
| |
| return false; |
| } |
| |
| namespace { |
| enum StringLiteralCheckType { |
| SLCT_NotALiteral, |
| SLCT_UncheckedLiteral, |
| SLCT_CheckedLiteral |
| }; |
| } |
| |
| // Determine if an expression is a string literal or constant string. |
| // If this function returns false on the arguments to a function expecting a |
| // format string, we will usually need to emit a warning. |
| // True string literals are then checked by CheckFormatString. |
| static StringLiteralCheckType |
| checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, |
| bool HasVAListArg, unsigned format_idx, |
| unsigned firstDataArg, Sema::FormatStringType Type, |
| Sema::VariadicCallType CallType, bool InFunctionCall, |
| llvm::SmallBitVector &CheckedVarArgs) { |
| tryAgain: |
| if (E->isTypeDependent() || E->isValueDependent()) |
| return SLCT_NotALiteral; |
| |
| E = E->IgnoreParenCasts(); |
| |
| if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) |
| // Technically -Wformat-nonliteral does not warn about this case. |
| // The behavior of printf and friends in this case is implementation |
| // dependent. Ideally if the format string cannot be null then |
| // it should have a 'nonnull' attribute in the function prototype. |
| return SLCT_UncheckedLiteral; |
| |
| switch (E->getStmtClass()) { |
| case Stmt::BinaryConditionalOperatorClass: |
| case Stmt::ConditionalOperatorClass: { |
| // The expression is a literal if both sub-expressions were, and it was |
| // completely checked only if both sub-expressions were checked. |
| const AbstractConditionalOperator *C = |
| cast<AbstractConditionalOperator>(E); |
| StringLiteralCheckType Left = |
| checkFormatStringExpr(S, C->getTrueExpr(), Args, |
| HasVAListArg, format_idx, firstDataArg, |
| Type, CallType, InFunctionCall, CheckedVarArgs); |
| if (Left == SLCT_NotALiteral) |
| return SLCT_NotALiteral; |
| StringLiteralCheckType Right = |
| checkFormatStringExpr(S, C->getFalseExpr(), Args, |
| HasVAListArg, format_idx, firstDataArg, |
| Type, CallType, InFunctionCall, CheckedVarArgs); |
| return Left < Right ? Left : Right; |
| } |
| |
| case Stmt::ImplicitCastExprClass: { |
| E = cast<ImplicitCastExpr>(E)->getSubExpr(); |
| goto tryAgain; |
| } |
| |
| case Stmt::OpaqueValueExprClass: |
| if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { |
| E = src; |
| goto tryAgain; |
| } |
| return SLCT_NotALiteral; |
| |
| case Stmt::PredefinedExprClass: |
| // While __func__, etc., are technically not string literals, they |
| // cannot contain format specifiers and thus are not a security |
| // liability. |
| return SLCT_UncheckedLiteral; |
| |
| case Stmt::DeclRefExprClass: { |
| const DeclRefExpr *DR = cast<DeclRefExpr>(E); |
| |
| // As an exception, do not flag errors for variables binding to |
| // const string literals. |
| if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { |
| bool isConstant = false; |
| QualType T = DR->getType(); |
| |
| if (const ArrayType *AT = S.Context.getAsArrayType(T)) { |
| isConstant = AT->getElementType().isConstant(S.Context); |
| } else if (const PointerType *PT = T->getAs<PointerType>()) { |
| isConstant = T.isConstant(S.Context) && |
| PT->getPointeeType().isConstant(S.Context); |
| } else if (T->isObjCObjectPointerType()) { |
| // In ObjC, there is usually no "const ObjectPointer" type, |
| // so don't check if the pointee type is constant. |
| isConstant = T.isConstant(S.Context); |
| } |
| |
| if (isConstant) { |
| if (const Expr *Init = VD->getAnyInitializer()) { |
| // Look through initializers like const char c[] = { "foo" } |
| if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { |
| if (InitList->isStringLiteralInit()) |
| Init = InitList->getInit(0)->IgnoreParenImpCasts(); |
| } |
| return checkFormatStringExpr(S, Init, Args, |
| HasVAListArg, format_idx, |
| firstDataArg, Type, CallType, |
| /*InFunctionCall*/false, CheckedVarArgs); |
| } |
| } |
| |
| // For vprintf* functions (i.e., HasVAListArg==true), we add a |
| // special check to see if the format string is a function parameter |
| // of the function calling the printf function. If the function |
| // has an attribute indicating it is a printf-like function, then we |
| // should suppress warnings concerning non-literals being used in a call |
| // to a vprintf function. For example: |
| // |
| // void |
| // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){ |
| // va_list ap; |
| // va_start(ap, fmt); |
| // vprintf(fmt, ap); // Do NOT emit a warning about "fmt". |
| // ... |
| // } |
| if (HasVAListArg) { |
| if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) { |
| if (const NamedDecl *ND = dyn_cast<NamedDecl>(PV->getDeclContext())) { |
| int PVIndex = PV->getFunctionScopeIndex() + 1; |
| for (const auto *PVFormat : ND->specific_attrs<FormatAttr>()) { |
| // adjust for implicit parameter |
| if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND)) |
| if (MD->isInstance()) |
| ++PVIndex; |
| // We also check if the formats are compatible. |
| // We can't pass a 'scanf' string to a 'printf' function. |
| if (PVIndex == PVFormat->getFormatIdx() && |
| Type == S.GetFormatStringType(PVFormat)) |
| return SLCT_UncheckedLiteral; |
| } |
| } |
| } |
| } |
| } |
| |
| return SLCT_NotALiteral; |
| } |
| |
| case Stmt::CallExprClass: |
| case Stmt::CXXMemberCallExprClass: { |
| const CallExpr *CE = cast<CallExpr>(E); |
| if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { |
| if (const FormatArgAttr *FA = ND->getAttr<FormatArgAttr>()) { |
| unsigned ArgIndex = FA->getFormatIdx(); |
| if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND)) |
| if (MD->isInstance()) |
| --ArgIndex; |
| const Expr *Arg = CE->getArg(ArgIndex - 1); |
| |
| return checkFormatStringExpr(S, Arg, Args, |
| HasVAListArg, format_idx, firstDataArg, |
| Type, CallType, InFunctionCall, |
| CheckedVarArgs); |
| } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) { |
| unsigned BuiltinID = FD->getBuiltinID(); |
| if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || |
| BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { |
| const Expr *Arg = CE->getArg(0); |
| return checkFormatStringExpr(S, Arg, Args, |
| HasVAListArg, format_idx, |
| firstDataArg, Type, CallType, |
| InFunctionCall, CheckedVarArgs); |
| } |
| } |
| } |
| |
| return SLCT_NotALiteral; |
| } |
| case Stmt::ObjCStringLiteralClass: |
| case Stmt::StringLiteralClass: { |
| const StringLiteral *StrE = NULL; |
| |
| if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) |
| StrE = ObjCFExpr->getString(); |
| else |
| StrE = cast<StringLiteral>(E); |
| |
| if (StrE) { |
| S.CheckFormatString(StrE, E, Args, HasVAListArg, format_idx, firstDataArg, |
| Type, InFunctionCall, CallType, CheckedVarArgs); |
| return SLCT_CheckedLiteral; |
| } |
| |
| return SLCT_NotALiteral; |
| } |
| |
| default: |
| return SLCT_NotALiteral; |
| } |
| } |
| |
| Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { |
| return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) |
| .Case("scanf", FST_Scanf) |
| .Cases("printf", "printf0", FST_Printf) |
| .Cases("NSString", "CFString", FST_NSString) |
| .Case("strftime", FST_Strftime) |
| .Case("strfmon", FST_Strfmon) |
| .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) |
| .Default(FST_Unknown); |
| } |
| |
| /// CheckFormatArguments - Check calls to printf and scanf (and similar |
| /// functions) for correct use of format strings. |
| /// Returns true if a format string has been fully checked. |
| bool Sema::CheckFormatArguments(const FormatAttr *Format, |
| ArrayRef<const Expr *> Args, |
| bool IsCXXMember, |
| VariadicCallType CallType, |
| SourceLocation Loc, SourceRange Range, |
| llvm::SmallBitVector &CheckedVarArgs) { |
| FormatStringInfo FSI; |
| if (getFormatStringInfo(Format, IsCXXMember, &FSI)) |
| return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx, |
| FSI.FirstDataArg, GetFormatStringType(Format), |
| CallType, Loc, Range, CheckedVarArgs); |
| return false; |
| } |
| |
| bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, |
| bool HasVAListArg, unsigned format_idx, |
| unsigned firstDataArg, FormatStringType Type, |
| VariadicCallType CallType, |
| SourceLocation Loc, SourceRange Range, |
| llvm::SmallBitVector &CheckedVarArgs) { |
| // CHECK: printf/scanf-like function is called with no format string. |
| if (format_idx >= Args.size()) { |
| Diag(Loc, diag::warn_missing_format_string) << Range; |
| return false; |
| } |
| |
| const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); |
| |
| // CHECK: format string is not a string literal. |
| // |
| // Dynamically generated format strings are difficult to |
| // automatically vet at compile time. Requiring that format strings |
| // are string literals: (1) permits the checking of format strings by |
| // the compiler and thereby (2) can practically remove the source of |
| // many format string exploits. |
| |
| // Format string can be either ObjC string (e.g. @"%d") or |
| // C string (e.g. "%d") |
| // ObjC string uses the same format specifiers as C string, so we can use |
| // the same format string checking logic for both ObjC and C strings. |
| StringLiteralCheckType CT = |
| checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg, |
| format_idx, firstDataArg, Type, CallType, |
| /*IsFunctionCall*/true, CheckedVarArgs); |
| if (CT != SLCT_NotALiteral) |
| // Literal format string found, check done! |
| return CT == SLCT_CheckedLiteral; |
| |
| // Strftime is particular as it always uses a single 'time' argument, |
| // so it is safe to pass a non-literal string. |
| if (Type == FST_Strftime) |
| return false; |
| |
| // Do not emit diag when the string param is a macro expansion and the |
| // format is either NSString or CFString. This is a hack to prevent |
| // diag when using the NSLocalizedString and CFCopyLocalizedString macros |
| // which are usually used in place of NS and CF string literals. |
| if (Type == FST_NSString && |
| SourceMgr.isInSystemMacro(Args[format_idx]->getLocStart())) |
| return false; |
| |
| // If there are no arguments specified, warn with -Wformat-security, otherwise |
| // warn only with -Wformat-nonliteral. |
| if (Args.size() == firstDataArg) |
| Diag(Args[format_idx]->getLocStart(), |
| diag::warn_format_nonliteral_noargs) |
| << OrigFormatExpr->getSourceRange(); |
| else |
| Diag(Args[format_idx]->getLocStart(), |
| diag::warn_format_nonliteral) |
| << OrigFormatExpr->getSourceRange(); |
| return false; |
| } |
| |
| namespace { |
| class CheckFormatHandler : public analyze_format_string::FormatStringHandler { |
| protected: |
| Sema &S; |
| const StringLiteral *FExpr; |
| const Expr *OrigFormatExpr; |
| const unsigned FirstDataArg; |
| const unsigned NumDataArgs; |
| const char *Beg; // Start of format string. |
| const bool HasVAListArg; |
| ArrayRef<const Expr *> Args; |
| unsigned FormatIdx; |
| llvm::SmallBitVector CoveredArgs; |
| bool usesPositionalArgs; |
| bool atFirstArg; |
| bool inFunctionCall; |
| Sema::VariadicCallType CallType; |
| llvm::SmallBitVector &CheckedVarArgs; |
| public: |
| CheckFormatHandler(Sema &s, const StringLiteral *fexpr, |
| const Expr *origFormatExpr, unsigned firstDataArg, |
| unsigned numDataArgs, const char *beg, bool hasVAListArg, |
| ArrayRef<const Expr *> Args, |
| unsigned formatIdx, bool inFunctionCall, |
| Sema::VariadicCallType callType, |
| llvm::SmallBitVector &CheckedVarArgs) |
| : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), |
| FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), |
| Beg(beg), HasVAListArg(hasVAListArg), |
| Args(Args), FormatIdx(formatIdx), |
| usesPositionalArgs(false), atFirstArg(true), |
| inFunctionCall(inFunctionCall), CallType(callType), |
| CheckedVarArgs(CheckedVarArgs) { |
| CoveredArgs.resize(numDataArgs); |
| CoveredArgs.reset(); |
| } |
| |
| void DoneProcessing(); |
| |
| void HandleIncompleteSpecifier(const char *startSpecifier, |
| unsigned specifierLen) override; |
| |
| void HandleInvalidLengthModifier( |
| const analyze_format_string::FormatSpecifier &FS, |
| const analyze_format_string::ConversionSpecifier &CS, |
| const char *startSpecifier, unsigned specifierLen, |
| unsigned DiagID); |
| |
| void HandleNonStandardLengthModifier( |
| const analyze_format_string::FormatSpecifier &FS, |
| const char *startSpecifier, unsigned specifierLen); |
| |
| void HandleNonStandardConversionSpecifier( |
| const analyze_format_string::ConversionSpecifier &CS, |
| const char *startSpecifier, unsigned specifierLen); |
| |
| void HandlePosition(const char *startPos, unsigned posLen) override; |
| |
| void HandleInvalidPosition(const char *startSpecifier, |
| unsigned specifierLen, |
| analyze_format_string::PositionContext p) override; |
| |
| void HandleZeroPosition(const char *startPos, unsigned posLen) override; |
| |
| void HandleNullChar(const char *nullCharacter) override; |
| |
| template <typename Range> |
| static void EmitFormatDiagnostic(Sema &S, bool inFunctionCall, |
| const Expr *ArgumentExpr, |
| PartialDiagnostic PDiag, |
| SourceLocation StringLoc, |
| bool IsStringLocation, Range StringRange, |
| ArrayRef<FixItHint> Fixit = None); |
| |
| protected: |
| bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, |
| const char *startSpec, |
| unsigned specifierLen, |
| const char *csStart, unsigned csLen); |
| |
| void HandlePositionalNonpositionalArgs(SourceLocation Loc, |
| const char *startSpec, |
| unsigned specifierLen); |
| |
| SourceRange getFormatStringRange(); |
| CharSourceRange getSpecifierRange(const char *startSpecifier, |
| unsigned specifierLen); |
| SourceLocation getLocationOfByte(const char *x); |
| |
| const Expr *getDataArg(unsigned i) const; |
| |
| bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, |
| const analyze_format_string::ConversionSpecifier &CS, |
| const char *startSpecifier, unsigned specifierLen, |
| unsigned argIndex); |
| |
| template <typename Range> |
| void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, |
| bool IsStringLocation, Range StringRange, |
| ArrayRef<FixItHint> Fixit = None); |
| }; |
| } |
| |
| SourceRange CheckFormatHandler::getFormatStringRange() { |
| return OrigFormatExpr->getSourceRange(); |
| } |
| |
| CharSourceRange CheckFormatHandler:: |
| getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { |
| SourceLocation Start = getLocationOfByte(startSpecifier); |
| SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); |
| |
| // Advance the end SourceLocation by one due to half-open ranges. |
| End = End.getLocWithOffset(1); |
| |
| return CharSourceRange::getCharRange(Start, End); |
| } |
| |
| SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { |
| return S.getLocationOfStringLiteralByte(FExpr, x - Beg); |
| } |
| |
| void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, |
| unsigned specifierLen){ |
| EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), |
| getLocationOfByte(startSpecifier), |
| /*IsStringLocation*/true, |
| getSpecifierRange(startSpecifier, specifierLen)); |
| } |
| |
| void CheckFormatHandler::HandleInvalidLengthModifier( |
| const analyze_format_string::FormatSpecifier &FS, |
| const analyze_format_string::ConversionSpecifier &CS, |
| const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { |
| using namespace analyze_format_string; |
| |
| const LengthModifier &LM = FS.getLengthModifier(); |
| CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); |
| |
| // See if we know how to fix this length modifier. |
| Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); |
| if (FixedLM) { |
| EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), |
| getLocationOfByte(LM.getStart()), |
| /*IsStringLocation*/true, |
| getSpecifierRange(startSpecifier, specifierLen)); |
| |
| S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) |
| << FixedLM->toString() |
| << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); |
| |
| } else { |
| FixItHint Hint; |
| if (DiagID == diag::warn_format_nonsensical_length) |
| Hint = FixItHint::CreateRemoval(LMRange); |
| |
| EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), |
| getLocationOfByte(LM.getStart()), |
| /*IsStringLocation*/true, |
| getSpecifierRange(startSpecifier, specifierLen), |
| Hint); |
| } |
| } |
| |
| void CheckFormatHandler::HandleNonStandardLengthModifier( |
| const analyze_format_string::FormatSpecifier &FS, |
| const char *startSpecifier, unsigned specifierLen) { |
| using namespace analyze_format_string; |
| |
| const LengthModifier &LM = FS.getLengthModifier(); |
| CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); |
| |
| // See if we know how to fix this length modifier. |
| Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); |
| if (FixedLM) { |
| EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) |
| << LM.toString() << 0, |
| getLocationOfByte(LM.getStart()), |
| /*IsStringLocation*/true, |
| getSpecifierRange(startSpecifier, specifierLen)); |
| |
| S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) |
| << FixedLM->toString() |
| << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); |
| |
| } else { |
| EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) |
| << LM.toString() << 0, |
| getLocationOfByte(LM.getStart()), |
| /*IsStringLocation*/true, |
| getSpecifierRange(startSpecifier, specifierLen)); |
| } |
| } |
| |
| void CheckFormatHandler::HandleNonStandardConversionSpecifier( |
| const analyze_format_string::ConversionSpecifier &CS, |
| const char *startSpecifier, unsigned specifierLen) { |
| using namespace analyze_format_string; |
| |
| // See if we know how to fix this conversion specifier. |
| Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); |
| if (FixedCS) { |
| EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) |
| << CS.toString() << /*conversion specifier*/1, |
| getLocationOfByte(CS.getStart()), |
| /*IsStringLocation*/true, |
| getSpecifierRange(startSpecifier, specifierLen)); |
| |
| CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); |
| S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) |
| << FixedCS->toString() |
| << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); |
| } else { |
| EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) |
| << CS.toString() << /*conversion specifier*/1, |
| getLocationOfByte(CS.getStart()), |
| /*IsStringLocation*/true, |
| getSpecifierRange(startSpecifier, specifierLen)); |
| } |
| } |
| |
| void CheckFormatHandler::HandlePosition(const char *startPos, |
| unsigned posLen) { |
| EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), |
| getLocationOfByte(startPos), |
| /*IsStringLocation*/true, |
| getSpecifierRange(startPos, posLen)); |
| } |
| |
| void |
| CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, |
| analyze_format_string::PositionContext p) { |
| EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) |
| << (unsigned) p, |
| getLocationOfByte(startPos), /*IsStringLocation*/true, |
| getSpecifierRange(startPos, posLen)); |
| } |
| |
| void CheckFormatHandler::HandleZeroPosition(const char *startPos, |
| unsigned posLen) { |
| EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), |
| getLocationOfByte(startPos), |
| /*IsStringLocation*/true, |
| getSpecifierRange(startPos, posLen)); |
| } |
| |
| void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { |
| if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { |
| // The presence of a null character is likely an error. |
| EmitFormatDiagnostic( |
| S.PDiag(diag::warn_printf_format_string_contains_null_char), |
| getLocationOfByte(nullCharacter), /*IsStringLocation*/true, |
| getFormatStringRange()); |
| } |
| } |
| |
| // Note that this may return NULL if there was an error parsing or building |
| // one of the argument expressions. |
| const Expr *CheckFormatHandler::getDataArg(unsigned i) const { |
| return Args[FirstDataArg + i]; |
| } |
| |
| void CheckFormatHandler::DoneProcessing() { |
| // Does the number of data arguments exceed the number of |
| // format conversions in the format string? |
| if (!HasVAListArg) { |
| // Find any arguments that weren't covered. |
| CoveredArgs.flip(); |
| signed notCoveredArg = CoveredArgs.find_first(); |
| if (notCoveredArg >= 0) { |
| assert((unsigned)notCoveredArg < NumDataArgs); |
| if (const Expr *E = getDataArg((unsigned) notCoveredArg)) { |
| SourceLocation Loc = E->getLocStart(); |
| if (!S.getSourceManager().isInSystemMacro(Loc)) { |
| EmitFormatDiagnostic(S.PDiag(diag::warn_printf_data_arg_not_used), |
| Loc, /*IsStringLocation*/false, |
| getFormatStringRange()); |
| } |
| } |
| } |
| } |
| } |
| |
| bool |
| CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, |
| SourceLocation Loc, |
| const char *startSpec, |
| unsigned specifierLen, |
| const char *csStart, |
| unsigned csLen) { |
| |
| bool keepGoing = true; |
| if (argIndex < NumDataArgs) { |
| // Consider the argument coverered, even though the specifier doesn't |
| // make sense. |
| CoveredArgs.set(argIndex); |
| } |
| else { |
| // If argIndex exceeds the number of data arguments we |
| // don't issue a warning because that is just a cascade of warnings (and |
| // they may have intended '%%' anyway). We don't want to continue processing |
| // the format string after this point, however, as we will like just get |
| // gibberish when trying to match arguments. |
| keepGoing = false; |
| } |
| |
| EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_conversion) |
| << StringRef(csStart, csLen), |
| Loc, /*IsStringLocation*/true, |
| getSpecifierRange(startSpec, specifierLen)); |
| |
| return keepGoing; |
| } |
| |
| void |
| CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, |
| const char *startSpec, |
| unsigned specifierLen) { |
| EmitFormatDiagnostic( |
| S.PDiag(diag::warn_format_mix_positional_nonpositional_args), |
| Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); |
| } |
| |
| bool |
| CheckFormatHandler::CheckNumArgs( |
| const analyze_format_string::FormatSpecifier &FS, |
| const analyze_format_string::ConversionSpecifier &CS, |
| const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { |
| |
| if (argIndex >= NumDataArgs) { |
| PartialDiagnostic PDiag = FS.usesPositionalArg() |
| ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) |
| << (argIndex+1) << NumDataArgs) |
| : S.PDiag(diag::warn_printf_insufficient_data_args); |
| EmitFormatDiagnostic( |
| PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, |
| getSpecifierRange(startSpecifier, specifierLen)); |
| return false; |
| } |
| return true; |
| } |
| |
| template<typename Range> |
| void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, |
| SourceLocation Loc, |
| bool IsStringLocation, |
| Range StringRange, |
| ArrayRef<FixItHint> FixIt) { |
| EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, |
| Loc, IsStringLocation, StringRange, FixIt); |
| } |
| |
| /// \brief If the format string is not within the funcion call, emit a note |
| /// so that the function call and string are in diagnostic messages. |
| /// |
| /// \param InFunctionCall if true, the format string is within the function |
| /// call and only one diagnostic message will be produced. Otherwise, an |
| /// extra note will be emitted pointing to location of the format string. |
| /// |
| /// \param ArgumentExpr the expression that is passed as the format string |
| /// argument in the function call. Used for getting locations when two |
| /// diagnostics are emitted. |
| /// |
| /// \param PDiag the callee should already have provided any strings for the |
| /// diagnostic message. This function only adds locations and fixits |
| /// to diagnostics. |
| /// |
| /// \param Loc primary location for diagnostic. If two diagnostics are |
| /// required, one will be at Loc and a new SourceLocation will be created for |
| /// the other one. |
| /// |
| /// \param IsStringLocation if true, Loc points to the format string should be |
| /// used for the note. Otherwise, Loc points to the argument list and will |
| /// be used with PDiag. |
| /// |
| /// \param StringRange some or all of the string to highlight. This is |
| /// templated so it can accept either a CharSourceRange or a SourceRange. |
| /// |
| /// \param FixIt optional fix it hint for the format string. |
| template<typename Range> |
| void CheckFormatHandler::EmitFormatDiagnostic(Sema &S, bool InFunctionCall, |
| const Expr *ArgumentExpr, |
|