SemaChecking.cpp revision f067d8eecfd56a56b4192882bf86d0857c92dcc5
1//===--- SemaChecking.cpp - Extra Semantic Checking -----------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements extra semantic analysis beyond what is enforced 11// by the C type system. 12// 13//===----------------------------------------------------------------------===// 14 15#include "Sema.h" 16#include "clang/Analysis/CFG.h" 17#include "clang/Analysis/AnalysisContext.h" 18#include "clang/Analysis/Analyses/PrintfFormatString.h" 19#include "clang/AST/ASTContext.h" 20#include "clang/AST/CharUnits.h" 21#include "clang/AST/DeclObjC.h" 22#include "clang/AST/ExprCXX.h" 23#include "clang/AST/ExprObjC.h" 24#include "clang/AST/DeclObjC.h" 25#include "clang/AST/StmtCXX.h" 26#include "clang/AST/StmtObjC.h" 27#include "clang/Lex/LiteralSupport.h" 28#include "clang/Lex/Preprocessor.h" 29#include "llvm/ADT/BitVector.h" 30#include "llvm/ADT/STLExtras.h" 31#include <limits> 32#include <queue> 33using namespace clang; 34 35/// getLocationOfStringLiteralByte - Return a source location that points to the 36/// specified byte of the specified string literal. 37/// 38/// Strings are amazingly complex. They can be formed from multiple tokens and 39/// can have escape sequences in them in addition to the usual trigraph and 40/// escaped newline business. This routine handles this complexity. 41/// 42SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 43 unsigned ByteNo) const { 44 assert(!SL->isWide() && "This doesn't work for wide strings yet"); 45 46 // Loop over all of the tokens in this string until we find the one that 47 // contains the byte we're looking for. 48 unsigned TokNo = 0; 49 while (1) { 50 assert(TokNo < SL->getNumConcatenated() && "Invalid byte number!"); 51 SourceLocation StrTokLoc = SL->getStrTokenLoc(TokNo); 52 53 // Get the spelling of the string so that we can get the data that makes up 54 // the string literal, not the identifier for the macro it is potentially 55 // expanded through. 56 SourceLocation StrTokSpellingLoc = SourceMgr.getSpellingLoc(StrTokLoc); 57 58 // Re-lex the token to get its length and original spelling. 59 std::pair<FileID, unsigned> LocInfo = 60 SourceMgr.getDecomposedLoc(StrTokSpellingLoc); 61 std::pair<const char *,const char *> Buffer = 62 SourceMgr.getBufferData(LocInfo.first); 63 const char *StrData = Buffer.first+LocInfo.second; 64 65 // Create a langops struct and enable trigraphs. This is sufficient for 66 // relexing tokens. 67 LangOptions LangOpts; 68 LangOpts.Trigraphs = true; 69 70 // Create a lexer starting at the beginning of this token. 71 Lexer TheLexer(StrTokSpellingLoc, LangOpts, Buffer.first, StrData, 72 Buffer.second); 73 Token TheTok; 74 TheLexer.LexFromRawLexer(TheTok); 75 76 // Use the StringLiteralParser to compute the length of the string in bytes. 77 StringLiteralParser SLP(&TheTok, 1, PP); 78 unsigned TokNumBytes = SLP.GetStringLength(); 79 80 // If the byte is in this token, return the location of the byte. 81 if (ByteNo < TokNumBytes || 82 (ByteNo == TokNumBytes && TokNo == SL->getNumConcatenated())) { 83 unsigned Offset = 84 StringLiteralParser::getOffsetOfStringByte(TheTok, ByteNo, PP); 85 86 // Now that we know the offset of the token in the spelling, use the 87 // preprocessor to get the offset in the original source. 88 return PP.AdvanceToTokenCharacter(StrTokLoc, Offset); 89 } 90 91 // Move to the next string token. 92 ++TokNo; 93 ByteNo -= TokNumBytes; 94 } 95} 96 97/// CheckablePrintfAttr - does a function call have a "printf" attribute 98/// and arguments that merit checking? 99bool Sema::CheckablePrintfAttr(const FormatAttr *Format, CallExpr *TheCall) { 100 if (Format->getType() == "printf") return true; 101 if (Format->getType() == "printf0") { 102 // printf0 allows null "format" string; if so don't check format/args 103 unsigned format_idx = Format->getFormatIdx() - 1; 104 // Does the index refer to the implicit object argument? 105 if (isa<CXXMemberCallExpr>(TheCall)) { 106 if (format_idx == 0) 107 return false; 108 --format_idx; 109 } 110 if (format_idx < TheCall->getNumArgs()) { 111 Expr *Format = TheCall->getArg(format_idx)->IgnoreParenCasts(); 112 if (!Format->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) 113 return true; 114 } 115 } 116 return false; 117} 118 119Action::OwningExprResult 120Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 121 OwningExprResult TheCallResult(Owned(TheCall)); 122 123 switch (BuiltinID) { 124 case Builtin::BI__builtin___CFStringMakeConstantString: 125 assert(TheCall->getNumArgs() == 1 && 126 "Wrong # arguments to builtin CFStringMakeConstantString"); 127 if (CheckObjCString(TheCall->getArg(0))) 128 return ExprError(); 129 break; 130 case Builtin::BI__builtin_stdarg_start: 131 case Builtin::BI__builtin_va_start: 132 if (SemaBuiltinVAStart(TheCall)) 133 return ExprError(); 134 break; 135 case Builtin::BI__builtin_isgreater: 136 case Builtin::BI__builtin_isgreaterequal: 137 case Builtin::BI__builtin_isless: 138 case Builtin::BI__builtin_islessequal: 139 case Builtin::BI__builtin_islessgreater: 140 case Builtin::BI__builtin_isunordered: 141 if (SemaBuiltinUnorderedCompare(TheCall)) 142 return ExprError(); 143 break; 144 case Builtin::BI__builtin_fpclassify: 145 if (SemaBuiltinFPClassification(TheCall, 6)) 146 return ExprError(); 147 break; 148 case Builtin::BI__builtin_isfinite: 149 case Builtin::BI__builtin_isinf: 150 case Builtin::BI__builtin_isinf_sign: 151 case Builtin::BI__builtin_isnan: 152 case Builtin::BI__builtin_isnormal: 153 if (SemaBuiltinFPClassification(TheCall, 1)) 154 return ExprError(); 155 break; 156 case Builtin::BI__builtin_return_address: 157 case Builtin::BI__builtin_frame_address: 158 if (SemaBuiltinStackAddress(TheCall)) 159 return ExprError(); 160 break; 161 case Builtin::BI__builtin_eh_return_data_regno: 162 if (SemaBuiltinEHReturnDataRegNo(TheCall)) 163 return ExprError(); 164 break; 165 case Builtin::BI__builtin_shufflevector: 166 return SemaBuiltinShuffleVector(TheCall); 167 // TheCall will be freed by the smart pointer here, but that's fine, since 168 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 169 case Builtin::BI__builtin_prefetch: 170 if (SemaBuiltinPrefetch(TheCall)) 171 return ExprError(); 172 break; 173 case Builtin::BI__builtin_object_size: 174 if (SemaBuiltinObjectSize(TheCall)) 175 return ExprError(); 176 break; 177 case Builtin::BI__builtin_longjmp: 178 if (SemaBuiltinLongjmp(TheCall)) 179 return ExprError(); 180 break; 181 case Builtin::BI__sync_fetch_and_add: 182 case Builtin::BI__sync_fetch_and_sub: 183 case Builtin::BI__sync_fetch_and_or: 184 case Builtin::BI__sync_fetch_and_and: 185 case Builtin::BI__sync_fetch_and_xor: 186 case Builtin::BI__sync_fetch_and_nand: 187 case Builtin::BI__sync_add_and_fetch: 188 case Builtin::BI__sync_sub_and_fetch: 189 case Builtin::BI__sync_and_and_fetch: 190 case Builtin::BI__sync_or_and_fetch: 191 case Builtin::BI__sync_xor_and_fetch: 192 case Builtin::BI__sync_nand_and_fetch: 193 case Builtin::BI__sync_val_compare_and_swap: 194 case Builtin::BI__sync_bool_compare_and_swap: 195 case Builtin::BI__sync_lock_test_and_set: 196 case Builtin::BI__sync_lock_release: 197 if (SemaBuiltinAtomicOverloaded(TheCall)) 198 return ExprError(); 199 break; 200 } 201 202 return move(TheCallResult); 203} 204 205/// CheckFunctionCall - Check a direct function call for various correctness 206/// and safety properties not strictly enforced by the C type system. 207bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall) { 208 // Get the IdentifierInfo* for the called function. 209 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 210 211 // None of the checks below are needed for functions that don't have 212 // simple names (e.g., C++ conversion functions). 213 if (!FnInfo) 214 return false; 215 216 // FIXME: This mechanism should be abstracted to be less fragile and 217 // more efficient. For example, just map function ids to custom 218 // handlers. 219 220 // Printf checking. 221 if (const FormatAttr *Format = FDecl->getAttr<FormatAttr>()) { 222 if (CheckablePrintfAttr(Format, TheCall)) { 223 bool HasVAListArg = Format->getFirstArg() == 0; 224 if (!HasVAListArg) { 225 if (const FunctionProtoType *Proto 226 = FDecl->getType()->getAs<FunctionProtoType>()) 227 HasVAListArg = !Proto->isVariadic(); 228 } 229 CheckPrintfArguments(TheCall, HasVAListArg, Format->getFormatIdx() - 1, 230 HasVAListArg ? 0 : Format->getFirstArg() - 1); 231 } 232 } 233 234 for (const NonNullAttr *NonNull = FDecl->getAttr<NonNullAttr>(); NonNull; 235 NonNull = NonNull->getNext<NonNullAttr>()) 236 CheckNonNullArguments(NonNull, TheCall); 237 238 return false; 239} 240 241bool Sema::CheckBlockCall(NamedDecl *NDecl, CallExpr *TheCall) { 242 // Printf checking. 243 const FormatAttr *Format = NDecl->getAttr<FormatAttr>(); 244 if (!Format) 245 return false; 246 247 const VarDecl *V = dyn_cast<VarDecl>(NDecl); 248 if (!V) 249 return false; 250 251 QualType Ty = V->getType(); 252 if (!Ty->isBlockPointerType()) 253 return false; 254 255 if (!CheckablePrintfAttr(Format, TheCall)) 256 return false; 257 258 bool HasVAListArg = Format->getFirstArg() == 0; 259 if (!HasVAListArg) { 260 const FunctionType *FT = 261 Ty->getAs<BlockPointerType>()->getPointeeType()->getAs<FunctionType>(); 262 if (const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FT)) 263 HasVAListArg = !Proto->isVariadic(); 264 } 265 CheckPrintfArguments(TheCall, HasVAListArg, Format->getFormatIdx() - 1, 266 HasVAListArg ? 0 : Format->getFirstArg() - 1); 267 268 return false; 269} 270 271/// SemaBuiltinAtomicOverloaded - We have a call to a function like 272/// __sync_fetch_and_add, which is an overloaded function based on the pointer 273/// type of its first argument. The main ActOnCallExpr routines have already 274/// promoted the types of arguments because all of these calls are prototyped as 275/// void(...). 276/// 277/// This function goes through and does final semantic checking for these 278/// builtins, 279bool Sema::SemaBuiltinAtomicOverloaded(CallExpr *TheCall) { 280 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 281 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 282 283 // Ensure that we have at least one argument to do type inference from. 284 if (TheCall->getNumArgs() < 1) 285 return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args) 286 << 0 << TheCall->getCallee()->getSourceRange(); 287 288 // Inspect the first argument of the atomic builtin. This should always be 289 // a pointer type, whose element is an integral scalar or pointer type. 290 // Because it is a pointer type, we don't have to worry about any implicit 291 // casts here. 292 Expr *FirstArg = TheCall->getArg(0); 293 if (!FirstArg->getType()->isPointerType()) 294 return Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer) 295 << FirstArg->getType() << FirstArg->getSourceRange(); 296 297 QualType ValType = FirstArg->getType()->getAs<PointerType>()->getPointeeType(); 298 if (!ValType->isIntegerType() && !ValType->isPointerType() && 299 !ValType->isBlockPointerType()) 300 return Diag(DRE->getLocStart(), 301 diag::err_atomic_builtin_must_be_pointer_intptr) 302 << FirstArg->getType() << FirstArg->getSourceRange(); 303 304 // We need to figure out which concrete builtin this maps onto. For example, 305 // __sync_fetch_and_add with a 2 byte object turns into 306 // __sync_fetch_and_add_2. 307#define BUILTIN_ROW(x) \ 308 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 309 Builtin::BI##x##_8, Builtin::BI##x##_16 } 310 311 static const unsigned BuiltinIndices[][5] = { 312 BUILTIN_ROW(__sync_fetch_and_add), 313 BUILTIN_ROW(__sync_fetch_and_sub), 314 BUILTIN_ROW(__sync_fetch_and_or), 315 BUILTIN_ROW(__sync_fetch_and_and), 316 BUILTIN_ROW(__sync_fetch_and_xor), 317 BUILTIN_ROW(__sync_fetch_and_nand), 318 319 BUILTIN_ROW(__sync_add_and_fetch), 320 BUILTIN_ROW(__sync_sub_and_fetch), 321 BUILTIN_ROW(__sync_and_and_fetch), 322 BUILTIN_ROW(__sync_or_and_fetch), 323 BUILTIN_ROW(__sync_xor_and_fetch), 324 BUILTIN_ROW(__sync_nand_and_fetch), 325 326 BUILTIN_ROW(__sync_val_compare_and_swap), 327 BUILTIN_ROW(__sync_bool_compare_and_swap), 328 BUILTIN_ROW(__sync_lock_test_and_set), 329 BUILTIN_ROW(__sync_lock_release) 330 }; 331#undef BUILTIN_ROW 332 333 // Determine the index of the size. 334 unsigned SizeIndex; 335 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 336 case 1: SizeIndex = 0; break; 337 case 2: SizeIndex = 1; break; 338 case 4: SizeIndex = 2; break; 339 case 8: SizeIndex = 3; break; 340 case 16: SizeIndex = 4; break; 341 default: 342 return Diag(DRE->getLocStart(), diag::err_atomic_builtin_pointer_size) 343 << FirstArg->getType() << FirstArg->getSourceRange(); 344 } 345 346 // Each of these builtins has one pointer argument, followed by some number of 347 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 348 // that we ignore. Find out which row of BuiltinIndices to read from as well 349 // as the number of fixed args. 350 unsigned BuiltinID = FDecl->getBuiltinID(); 351 unsigned BuiltinIndex, NumFixed = 1; 352 switch (BuiltinID) { 353 default: assert(0 && "Unknown overloaded atomic builtin!"); 354 case Builtin::BI__sync_fetch_and_add: BuiltinIndex = 0; break; 355 case Builtin::BI__sync_fetch_and_sub: BuiltinIndex = 1; break; 356 case Builtin::BI__sync_fetch_and_or: BuiltinIndex = 2; break; 357 case Builtin::BI__sync_fetch_and_and: BuiltinIndex = 3; break; 358 case Builtin::BI__sync_fetch_and_xor: BuiltinIndex = 4; break; 359 case Builtin::BI__sync_fetch_and_nand:BuiltinIndex = 5; break; 360 361 case Builtin::BI__sync_add_and_fetch: BuiltinIndex = 6; break; 362 case Builtin::BI__sync_sub_and_fetch: BuiltinIndex = 7; break; 363 case Builtin::BI__sync_and_and_fetch: BuiltinIndex = 8; break; 364 case Builtin::BI__sync_or_and_fetch: BuiltinIndex = 9; break; 365 case Builtin::BI__sync_xor_and_fetch: BuiltinIndex =10; break; 366 case Builtin::BI__sync_nand_and_fetch:BuiltinIndex =11; break; 367 368 case Builtin::BI__sync_val_compare_and_swap: 369 BuiltinIndex = 12; 370 NumFixed = 2; 371 break; 372 case Builtin::BI__sync_bool_compare_and_swap: 373 BuiltinIndex = 13; 374 NumFixed = 2; 375 break; 376 case Builtin::BI__sync_lock_test_and_set: BuiltinIndex = 14; break; 377 case Builtin::BI__sync_lock_release: 378 BuiltinIndex = 15; 379 NumFixed = 0; 380 break; 381 } 382 383 // Now that we know how many fixed arguments we expect, first check that we 384 // have at least that many. 385 if (TheCall->getNumArgs() < 1+NumFixed) 386 return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args) 387 << 0 << TheCall->getCallee()->getSourceRange(); 388 389 390 // Get the decl for the concrete builtin from this, we can tell what the 391 // concrete integer type we should convert to is. 392 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 393 const char *NewBuiltinName = Context.BuiltinInfo.GetName(NewBuiltinID); 394 IdentifierInfo *NewBuiltinII = PP.getIdentifierInfo(NewBuiltinName); 395 FunctionDecl *NewBuiltinDecl = 396 cast<FunctionDecl>(LazilyCreateBuiltin(NewBuiltinII, NewBuiltinID, 397 TUScope, false, DRE->getLocStart())); 398 const FunctionProtoType *BuiltinFT = 399 NewBuiltinDecl->getType()->getAs<FunctionProtoType>(); 400 ValType = BuiltinFT->getArgType(0)->getAs<PointerType>()->getPointeeType(); 401 402 // If the first type needs to be converted (e.g. void** -> int*), do it now. 403 if (BuiltinFT->getArgType(0) != FirstArg->getType()) { 404 ImpCastExprToType(FirstArg, BuiltinFT->getArgType(0), CastExpr::CK_BitCast); 405 TheCall->setArg(0, FirstArg); 406 } 407 408 // Next, walk the valid ones promoting to the right type. 409 for (unsigned i = 0; i != NumFixed; ++i) { 410 Expr *Arg = TheCall->getArg(i+1); 411 412 // If the argument is an implicit cast, then there was a promotion due to 413 // "...", just remove it now. 414 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) { 415 Arg = ICE->getSubExpr(); 416 ICE->setSubExpr(0); 417 ICE->Destroy(Context); 418 TheCall->setArg(i+1, Arg); 419 } 420 421 // GCC does an implicit conversion to the pointer or integer ValType. This 422 // can fail in some cases (1i -> int**), check for this error case now. 423 CastExpr::CastKind Kind = CastExpr::CK_Unknown; 424 CXXMethodDecl *ConversionDecl = 0; 425 if (CheckCastTypes(Arg->getSourceRange(), ValType, Arg, Kind, 426 ConversionDecl)) 427 return true; 428 429 // Okay, we have something that *can* be converted to the right type. Check 430 // to see if there is a potentially weird extension going on here. This can 431 // happen when you do an atomic operation on something like an char* and 432 // pass in 42. The 42 gets converted to char. This is even more strange 433 // for things like 45.123 -> char, etc. 434 // FIXME: Do this check. 435 ImpCastExprToType(Arg, ValType, Kind, /*isLvalue=*/false); 436 TheCall->setArg(i+1, Arg); 437 } 438 439 // Switch the DeclRefExpr to refer to the new decl. 440 DRE->setDecl(NewBuiltinDecl); 441 DRE->setType(NewBuiltinDecl->getType()); 442 443 // Set the callee in the CallExpr. 444 // FIXME: This leaks the original parens and implicit casts. 445 Expr *PromotedCall = DRE; 446 UsualUnaryConversions(PromotedCall); 447 TheCall->setCallee(PromotedCall); 448 449 450 // Change the result type of the call to match the result type of the decl. 451 TheCall->setType(NewBuiltinDecl->getResultType()); 452 return false; 453} 454 455 456/// CheckObjCString - Checks that the argument to the builtin 457/// CFString constructor is correct 458/// FIXME: GCC currently emits the following warning: 459/// "warning: input conversion stopped due to an input byte that does not 460/// belong to the input codeset UTF-8" 461/// Note: It might also make sense to do the UTF-16 conversion here (would 462/// simplify the backend). 463bool Sema::CheckObjCString(Expr *Arg) { 464 Arg = Arg->IgnoreParenCasts(); 465 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 466 467 if (!Literal || Literal->isWide()) { 468 Diag(Arg->getLocStart(), diag::err_cfstring_literal_not_string_constant) 469 << Arg->getSourceRange(); 470 return true; 471 } 472 473 const char *Data = Literal->getStrData(); 474 unsigned Length = Literal->getByteLength(); 475 476 for (unsigned i = 0; i < Length; ++i) { 477 if (!Data[i]) { 478 Diag(getLocationOfStringLiteralByte(Literal, i), 479 diag::warn_cfstring_literal_contains_nul_character) 480 << Arg->getSourceRange(); 481 break; 482 } 483 } 484 485 return false; 486} 487 488/// SemaBuiltinVAStart - Check the arguments to __builtin_va_start for validity. 489/// Emit an error and return true on failure, return false on success. 490bool Sema::SemaBuiltinVAStart(CallExpr *TheCall) { 491 Expr *Fn = TheCall->getCallee(); 492 if (TheCall->getNumArgs() > 2) { 493 Diag(TheCall->getArg(2)->getLocStart(), 494 diag::err_typecheck_call_too_many_args) 495 << 0 /*function call*/ << Fn->getSourceRange() 496 << SourceRange(TheCall->getArg(2)->getLocStart(), 497 (*(TheCall->arg_end()-1))->getLocEnd()); 498 return true; 499 } 500 501 if (TheCall->getNumArgs() < 2) { 502 return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args) 503 << 0 /*function call*/; 504 } 505 506 // Determine whether the current function is variadic or not. 507 bool isVariadic; 508 if (CurBlock) 509 isVariadic = CurBlock->isVariadic; 510 else if (getCurFunctionDecl()) { 511 if (FunctionProtoType* FTP = 512 dyn_cast<FunctionProtoType>(getCurFunctionDecl()->getType())) 513 isVariadic = FTP->isVariadic(); 514 else 515 isVariadic = false; 516 } else { 517 isVariadic = getCurMethodDecl()->isVariadic(); 518 } 519 520 if (!isVariadic) { 521 Diag(Fn->getLocStart(), diag::err_va_start_used_in_non_variadic_function); 522 return true; 523 } 524 525 // Verify that the second argument to the builtin is the last argument of the 526 // current function or method. 527 bool SecondArgIsLastNamedArgument = false; 528 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 529 530 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 531 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 532 // FIXME: This isn't correct for methods (results in bogus warning). 533 // Get the last formal in the current function. 534 const ParmVarDecl *LastArg; 535 if (CurBlock) 536 LastArg = *(CurBlock->TheDecl->param_end()-1); 537 else if (FunctionDecl *FD = getCurFunctionDecl()) 538 LastArg = *(FD->param_end()-1); 539 else 540 LastArg = *(getCurMethodDecl()->param_end()-1); 541 SecondArgIsLastNamedArgument = PV == LastArg; 542 } 543 } 544 545 if (!SecondArgIsLastNamedArgument) 546 Diag(TheCall->getArg(1)->getLocStart(), 547 diag::warn_second_parameter_of_va_start_not_last_named_argument); 548 return false; 549} 550 551/// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 552/// friends. This is declared to take (...), so we have to check everything. 553bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 554 if (TheCall->getNumArgs() < 2) 555 return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args) 556 << 0 /*function call*/; 557 if (TheCall->getNumArgs() > 2) 558 return Diag(TheCall->getArg(2)->getLocStart(), 559 diag::err_typecheck_call_too_many_args) 560 << 0 /*function call*/ 561 << SourceRange(TheCall->getArg(2)->getLocStart(), 562 (*(TheCall->arg_end()-1))->getLocEnd()); 563 564 Expr *OrigArg0 = TheCall->getArg(0); 565 Expr *OrigArg1 = TheCall->getArg(1); 566 567 // Do standard promotions between the two arguments, returning their common 568 // type. 569 QualType Res = UsualArithmeticConversions(OrigArg0, OrigArg1, false); 570 571 // Make sure any conversions are pushed back into the call; this is 572 // type safe since unordered compare builtins are declared as "_Bool 573 // foo(...)". 574 TheCall->setArg(0, OrigArg0); 575 TheCall->setArg(1, OrigArg1); 576 577 if (OrigArg0->isTypeDependent() || OrigArg1->isTypeDependent()) 578 return false; 579 580 // If the common type isn't a real floating type, then the arguments were 581 // invalid for this operation. 582 if (!Res->isRealFloatingType()) 583 return Diag(OrigArg0->getLocStart(), 584 diag::err_typecheck_call_invalid_ordered_compare) 585 << OrigArg0->getType() << OrigArg1->getType() 586 << SourceRange(OrigArg0->getLocStart(), OrigArg1->getLocEnd()); 587 588 return false; 589} 590 591/// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 592/// __builtin_isnan and friends. This is declared to take (...), so we have 593/// to check everything. We expect the last argument to be a floating point 594/// value. 595bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 596 if (TheCall->getNumArgs() < NumArgs) 597 return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args) 598 << 0 /*function call*/; 599 if (TheCall->getNumArgs() > NumArgs) 600 return Diag(TheCall->getArg(NumArgs)->getLocStart(), 601 diag::err_typecheck_call_too_many_args) 602 << 0 /*function call*/ 603 << SourceRange(TheCall->getArg(NumArgs)->getLocStart(), 604 (*(TheCall->arg_end()-1))->getLocEnd()); 605 606 Expr *OrigArg = TheCall->getArg(NumArgs-1); 607 608 if (OrigArg->isTypeDependent()) 609 return false; 610 611 // This operation requires a floating-point number 612 if (!OrigArg->getType()->isRealFloatingType()) 613 return Diag(OrigArg->getLocStart(), 614 diag::err_typecheck_call_invalid_unary_fp) 615 << OrigArg->getType() << OrigArg->getSourceRange(); 616 617 return false; 618} 619 620bool Sema::SemaBuiltinStackAddress(CallExpr *TheCall) { 621 // The signature for these builtins is exact; the only thing we need 622 // to check is that the argument is a constant. 623 SourceLocation Loc; 624 if (!TheCall->getArg(0)->isTypeDependent() && 625 !TheCall->getArg(0)->isValueDependent() && 626 !TheCall->getArg(0)->isIntegerConstantExpr(Context, &Loc)) 627 return Diag(Loc, diag::err_stack_const_level) << TheCall->getSourceRange(); 628 629 return false; 630} 631 632/// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 633// This is declared to take (...), so we have to check everything. 634Action::OwningExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 635 if (TheCall->getNumArgs() < 3) 636 return ExprError(Diag(TheCall->getLocEnd(), 637 diag::err_typecheck_call_too_few_args) 638 << 0 /*function call*/ << TheCall->getSourceRange()); 639 640 unsigned numElements = std::numeric_limits<unsigned>::max(); 641 if (!TheCall->getArg(0)->isTypeDependent() && 642 !TheCall->getArg(1)->isTypeDependent()) { 643 QualType FAType = TheCall->getArg(0)->getType(); 644 QualType SAType = TheCall->getArg(1)->getType(); 645 646 if (!FAType->isVectorType() || !SAType->isVectorType()) { 647 Diag(TheCall->getLocStart(), diag::err_shufflevector_non_vector) 648 << SourceRange(TheCall->getArg(0)->getLocStart(), 649 TheCall->getArg(1)->getLocEnd()); 650 return ExprError(); 651 } 652 653 if (!Context.hasSameUnqualifiedType(FAType, SAType)) { 654 Diag(TheCall->getLocStart(), diag::err_shufflevector_incompatible_vector) 655 << SourceRange(TheCall->getArg(0)->getLocStart(), 656 TheCall->getArg(1)->getLocEnd()); 657 return ExprError(); 658 } 659 660 numElements = FAType->getAs<VectorType>()->getNumElements(); 661 if (TheCall->getNumArgs() != numElements+2) { 662 if (TheCall->getNumArgs() < numElements+2) 663 return ExprError(Diag(TheCall->getLocEnd(), 664 diag::err_typecheck_call_too_few_args) 665 << 0 /*function call*/ << TheCall->getSourceRange()); 666 return ExprError(Diag(TheCall->getLocEnd(), 667 diag::err_typecheck_call_too_many_args) 668 << 0 /*function call*/ << TheCall->getSourceRange()); 669 } 670 } 671 672 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 673 if (TheCall->getArg(i)->isTypeDependent() || 674 TheCall->getArg(i)->isValueDependent()) 675 continue; 676 677 llvm::APSInt Result(32); 678 if (!TheCall->getArg(i)->isIntegerConstantExpr(Result, Context)) 679 return ExprError(Diag(TheCall->getLocStart(), 680 diag::err_shufflevector_nonconstant_argument) 681 << TheCall->getArg(i)->getSourceRange()); 682 683 if (Result.getActiveBits() > 64 || Result.getZExtValue() >= numElements*2) 684 return ExprError(Diag(TheCall->getLocStart(), 685 diag::err_shufflevector_argument_too_large) 686 << TheCall->getArg(i)->getSourceRange()); 687 } 688 689 llvm::SmallVector<Expr*, 32> exprs; 690 691 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 692 exprs.push_back(TheCall->getArg(i)); 693 TheCall->setArg(i, 0); 694 } 695 696 return Owned(new (Context) ShuffleVectorExpr(Context, exprs.begin(), 697 exprs.size(), exprs[0]->getType(), 698 TheCall->getCallee()->getLocStart(), 699 TheCall->getRParenLoc())); 700} 701 702/// SemaBuiltinPrefetch - Handle __builtin_prefetch. 703// This is declared to take (const void*, ...) and can take two 704// optional constant int args. 705bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 706 unsigned NumArgs = TheCall->getNumArgs(); 707 708 if (NumArgs > 3) 709 return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_many_args) 710 << 0 /*function call*/ << TheCall->getSourceRange(); 711 712 // Argument 0 is checked for us and the remaining arguments must be 713 // constant integers. 714 for (unsigned i = 1; i != NumArgs; ++i) { 715 Expr *Arg = TheCall->getArg(i); 716 if (Arg->isTypeDependent()) 717 continue; 718 719 if (!Arg->getType()->isIntegralType()) 720 return Diag(TheCall->getLocStart(), diag::err_prefetch_invalid_arg_type) 721 << Arg->getSourceRange(); 722 723 ImpCastExprToType(Arg, Context.IntTy, CastExpr::CK_IntegralCast); 724 TheCall->setArg(i, Arg); 725 726 if (Arg->isValueDependent()) 727 continue; 728 729 llvm::APSInt Result; 730 if (!Arg->isIntegerConstantExpr(Result, Context)) 731 return Diag(TheCall->getLocStart(), diag::err_prefetch_invalid_arg_ice) 732 << SourceRange(Arg->getLocStart(), Arg->getLocEnd()); 733 734 // FIXME: gcc issues a warning and rewrites these to 0. These 735 // seems especially odd for the third argument since the default 736 // is 3. 737 if (i == 1) { 738 if (Result.getLimitedValue() > 1) 739 return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range) 740 << "0" << "1" << Arg->getSourceRange(); 741 } else { 742 if (Result.getLimitedValue() > 3) 743 return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range) 744 << "0" << "3" << Arg->getSourceRange(); 745 } 746 } 747 748 return false; 749} 750 751/// SemaBuiltinEHReturnDataRegNo - Handle __builtin_eh_return_data_regno, the 752/// operand must be an integer constant. 753bool Sema::SemaBuiltinEHReturnDataRegNo(CallExpr *TheCall) { 754 llvm::APSInt Result; 755 if (!TheCall->getArg(0)->isIntegerConstantExpr(Result, Context)) 756 return Diag(TheCall->getLocStart(), diag::err_expr_not_ice) 757 << TheCall->getArg(0)->getSourceRange(); 758 759 return false; 760} 761 762 763/// SemaBuiltinObjectSize - Handle __builtin_object_size(void *ptr, 764/// int type). This simply type checks that type is one of the defined 765/// constants (0-3). 766// For compatability check 0-3, llvm only handles 0 and 2. 767bool Sema::SemaBuiltinObjectSize(CallExpr *TheCall) { 768 Expr *Arg = TheCall->getArg(1); 769 if (Arg->isTypeDependent()) 770 return false; 771 772 QualType ArgType = Arg->getType(); 773 const BuiltinType *BT = ArgType->getAs<BuiltinType>(); 774 llvm::APSInt Result(32); 775 if (!BT || BT->getKind() != BuiltinType::Int) 776 return Diag(TheCall->getLocStart(), diag::err_object_size_invalid_argument) 777 << SourceRange(Arg->getLocStart(), Arg->getLocEnd()); 778 779 if (Arg->isValueDependent()) 780 return false; 781 782 if (!Arg->isIntegerConstantExpr(Result, Context)) { 783 return Diag(TheCall->getLocStart(), diag::err_object_size_invalid_argument) 784 << SourceRange(Arg->getLocStart(), Arg->getLocEnd()); 785 } 786 787 if (Result.getSExtValue() < 0 || Result.getSExtValue() > 3) { 788 return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range) 789 << "0" << "3" << SourceRange(Arg->getLocStart(), Arg->getLocEnd()); 790 } 791 792 return false; 793} 794 795/// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 796/// This checks that val is a constant 1. 797bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 798 Expr *Arg = TheCall->getArg(1); 799 if (Arg->isTypeDependent() || Arg->isValueDependent()) 800 return false; 801 802 llvm::APSInt Result(32); 803 if (!Arg->isIntegerConstantExpr(Result, Context) || Result != 1) 804 return Diag(TheCall->getLocStart(), diag::err_builtin_longjmp_invalid_val) 805 << SourceRange(Arg->getLocStart(), Arg->getLocEnd()); 806 807 return false; 808} 809 810// Handle i > 1 ? "x" : "y", recursivelly 811bool Sema::SemaCheckStringLiteral(const Expr *E, const CallExpr *TheCall, 812 bool HasVAListArg, 813 unsigned format_idx, unsigned firstDataArg) { 814 if (E->isTypeDependent() || E->isValueDependent()) 815 return false; 816 817 switch (E->getStmtClass()) { 818 case Stmt::ConditionalOperatorClass: { 819 const ConditionalOperator *C = cast<ConditionalOperator>(E); 820 return SemaCheckStringLiteral(C->getTrueExpr(), TheCall, 821 HasVAListArg, format_idx, firstDataArg) 822 && SemaCheckStringLiteral(C->getRHS(), TheCall, 823 HasVAListArg, format_idx, firstDataArg); 824 } 825 826 case Stmt::ImplicitCastExprClass: { 827 const ImplicitCastExpr *Expr = cast<ImplicitCastExpr>(E); 828 return SemaCheckStringLiteral(Expr->getSubExpr(), TheCall, HasVAListArg, 829 format_idx, firstDataArg); 830 } 831 832 case Stmt::ParenExprClass: { 833 const ParenExpr *Expr = cast<ParenExpr>(E); 834 return SemaCheckStringLiteral(Expr->getSubExpr(), TheCall, HasVAListArg, 835 format_idx, firstDataArg); 836 } 837 838 case Stmt::DeclRefExprClass: { 839 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 840 841 // As an exception, do not flag errors for variables binding to 842 // const string literals. 843 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 844 bool isConstant = false; 845 QualType T = DR->getType(); 846 847 if (const ArrayType *AT = Context.getAsArrayType(T)) { 848 isConstant = AT->getElementType().isConstant(Context); 849 } else if (const PointerType *PT = T->getAs<PointerType>()) { 850 isConstant = T.isConstant(Context) && 851 PT->getPointeeType().isConstant(Context); 852 } 853 854 if (isConstant) { 855 if (const Expr *Init = VD->getAnyInitializer()) 856 return SemaCheckStringLiteral(Init, TheCall, 857 HasVAListArg, format_idx, firstDataArg); 858 } 859 860 // For vprintf* functions (i.e., HasVAListArg==true), we add a 861 // special check to see if the format string is a function parameter 862 // of the function calling the printf function. If the function 863 // has an attribute indicating it is a printf-like function, then we 864 // should suppress warnings concerning non-literals being used in a call 865 // to a vprintf function. For example: 866 // 867 // void 868 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){ 869 // va_list ap; 870 // va_start(ap, fmt); 871 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt". 872 // ... 873 // 874 // 875 // FIXME: We don't have full attribute support yet, so just check to see 876 // if the argument is a DeclRefExpr that references a parameter. We'll 877 // add proper support for checking the attribute later. 878 if (HasVAListArg) 879 if (isa<ParmVarDecl>(VD)) 880 return true; 881 } 882 883 return false; 884 } 885 886 case Stmt::CallExprClass: { 887 const CallExpr *CE = cast<CallExpr>(E); 888 if (const ImplicitCastExpr *ICE 889 = dyn_cast<ImplicitCastExpr>(CE->getCallee())) { 890 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) { 891 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(DRE->getDecl())) { 892 if (const FormatArgAttr *FA = FD->getAttr<FormatArgAttr>()) { 893 unsigned ArgIndex = FA->getFormatIdx(); 894 const Expr *Arg = CE->getArg(ArgIndex - 1); 895 896 return SemaCheckStringLiteral(Arg, TheCall, HasVAListArg, 897 format_idx, firstDataArg); 898 } 899 } 900 } 901 } 902 903 return false; 904 } 905 case Stmt::ObjCStringLiteralClass: 906 case Stmt::StringLiteralClass: { 907 const StringLiteral *StrE = NULL; 908 909 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 910 StrE = ObjCFExpr->getString(); 911 else 912 StrE = cast<StringLiteral>(E); 913 914 if (StrE) { 915 CheckPrintfString(StrE, E, TheCall, HasVAListArg, format_idx, 916 firstDataArg); 917 return true; 918 } 919 920 return false; 921 } 922 923 default: 924 return false; 925 } 926} 927 928void 929Sema::CheckNonNullArguments(const NonNullAttr *NonNull, 930 const CallExpr *TheCall) { 931 for (NonNullAttr::iterator i = NonNull->begin(), e = NonNull->end(); 932 i != e; ++i) { 933 const Expr *ArgExpr = TheCall->getArg(*i); 934 if (ArgExpr->isNullPointerConstant(Context, 935 Expr::NPC_ValueDependentIsNotNull)) 936 Diag(TheCall->getCallee()->getLocStart(), diag::warn_null_arg) 937 << ArgExpr->getSourceRange(); 938 } 939} 940 941/// CheckPrintfArguments - Check calls to printf (and similar functions) for 942/// correct use of format strings. 943/// 944/// HasVAListArg - A predicate indicating whether the printf-like 945/// function is passed an explicit va_arg argument (e.g., vprintf) 946/// 947/// format_idx - The index into Args for the format string. 948/// 949/// Improper format strings to functions in the printf family can be 950/// the source of bizarre bugs and very serious security holes. A 951/// good source of information is available in the following paper 952/// (which includes additional references): 953/// 954/// FormatGuard: Automatic Protection From printf Format String 955/// Vulnerabilities, Proceedings of the 10th USENIX Security Symposium, 2001. 956/// 957/// Functionality implemented: 958/// 959/// We can statically check the following properties for string 960/// literal format strings for non v.*printf functions (where the 961/// arguments are passed directly): 962// 963/// (1) Are the number of format conversions equal to the number of 964/// data arguments? 965/// 966/// (2) Does each format conversion correctly match the type of the 967/// corresponding data argument? (TODO) 968/// 969/// Moreover, for all printf functions we can: 970/// 971/// (3) Check for a missing format string (when not caught by type checking). 972/// 973/// (4) Check for no-operation flags; e.g. using "#" with format 974/// conversion 'c' (TODO) 975/// 976/// (5) Check the use of '%n', a major source of security holes. 977/// 978/// (6) Check for malformed format conversions that don't specify anything. 979/// 980/// (7) Check for empty format strings. e.g: printf(""); 981/// 982/// (8) Check that the format string is a wide literal. 983/// 984/// All of these checks can be done by parsing the format string. 985/// 986/// For now, we ONLY do (1), (3), (5), (6), (7), and (8). 987void 988Sema::CheckPrintfArguments(const CallExpr *TheCall, bool HasVAListArg, 989 unsigned format_idx, unsigned firstDataArg) { 990 const Expr *Fn = TheCall->getCallee(); 991 992 // The way the format attribute works in GCC, the implicit this argument 993 // of member functions is counted. However, it doesn't appear in our own 994 // lists, so decrement format_idx in that case. 995 if (isa<CXXMemberCallExpr>(TheCall)) { 996 // Catch a format attribute mistakenly referring to the object argument. 997 if (format_idx == 0) 998 return; 999 --format_idx; 1000 if(firstDataArg != 0) 1001 --firstDataArg; 1002 } 1003 1004 // CHECK: printf-like function is called with no format string. 1005 if (format_idx >= TheCall->getNumArgs()) { 1006 Diag(TheCall->getRParenLoc(), diag::warn_printf_missing_format_string) 1007 << Fn->getSourceRange(); 1008 return; 1009 } 1010 1011 const Expr *OrigFormatExpr = TheCall->getArg(format_idx)->IgnoreParenCasts(); 1012 1013 // CHECK: format string is not a string literal. 1014 // 1015 // Dynamically generated format strings are difficult to 1016 // automatically vet at compile time. Requiring that format strings 1017 // are string literals: (1) permits the checking of format strings by 1018 // the compiler and thereby (2) can practically remove the source of 1019 // many format string exploits. 1020 1021 // Format string can be either ObjC string (e.g. @"%d") or 1022 // C string (e.g. "%d") 1023 // ObjC string uses the same format specifiers as C string, so we can use 1024 // the same format string checking logic for both ObjC and C strings. 1025 if (SemaCheckStringLiteral(OrigFormatExpr, TheCall, HasVAListArg, format_idx, 1026 firstDataArg)) 1027 return; // Literal format string found, check done! 1028 1029 // If there are no arguments specified, warn with -Wformat-security, otherwise 1030 // warn only with -Wformat-nonliteral. 1031 if (TheCall->getNumArgs() == format_idx+1) 1032 Diag(TheCall->getArg(format_idx)->getLocStart(), 1033 diag::warn_printf_nonliteral_noargs) 1034 << OrigFormatExpr->getSourceRange(); 1035 else 1036 Diag(TheCall->getArg(format_idx)->getLocStart(), 1037 diag::warn_printf_nonliteral) 1038 << OrigFormatExpr->getSourceRange(); 1039} 1040 1041namespace { 1042class CheckPrintfHandler : public analyze_printf::FormatStringHandler { 1043 Sema &S; 1044 const StringLiteral *FExpr; 1045 const Expr *OrigFormatExpr; 1046 unsigned NumConversions; 1047 const unsigned NumDataArgs; 1048 const bool IsObjCLiteral; 1049 const char *Beg; // Start of format string. 1050 const bool HasVAListArg; 1051 const CallExpr *TheCall; 1052 unsigned FormatIdx; 1053public: 1054 CheckPrintfHandler(Sema &s, const StringLiteral *fexpr, 1055 const Expr *origFormatExpr, 1056 unsigned numDataArgs, bool isObjCLiteral, 1057 const char *beg, bool hasVAListArg, 1058 const CallExpr *theCall, unsigned formatIdx) 1059 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), 1060 NumConversions(0), NumDataArgs(numDataArgs), 1061 IsObjCLiteral(isObjCLiteral), Beg(beg), 1062 HasVAListArg(hasVAListArg), 1063 TheCall(theCall), FormatIdx(formatIdx) {} 1064 1065 void DoneProcessing(); 1066 1067 void HandleIncompleteFormatSpecifier(const char *startSpecifier, 1068 unsigned specifierLen); 1069 1070 void 1071 HandleInvalidConversionSpecifier(const analyze_printf::FormatSpecifier &FS, 1072 const char *startSpecifier, 1073 unsigned specifierLen); 1074 1075 void HandleNullChar(const char *nullCharacter); 1076 1077 bool HandleFormatSpecifier(const analyze_printf::FormatSpecifier &FS, 1078 const char *startSpecifier, 1079 unsigned specifierLen); 1080private: 1081 SourceRange getFormatStringRange(); 1082 SourceRange getFormatSpecifierRange(const char *startSpecifier, 1083 unsigned specifierLen); 1084 SourceLocation getLocationOfByte(const char *x); 1085 1086 bool HandleAmount(const analyze_printf::OptionalAmount &Amt, 1087 unsigned MissingArgDiag, unsigned BadTypeDiag, 1088 const char *startSpecifier, unsigned specifierLen); 1089 void HandleFlags(const analyze_printf::FormatSpecifier &FS, 1090 llvm::StringRef flag, llvm::StringRef cspec, 1091 const char *startSpecifier, unsigned specifierLen); 1092 1093 const Expr *getDataArg(unsigned i) const; 1094}; 1095} 1096 1097SourceRange CheckPrintfHandler::getFormatStringRange() { 1098 return OrigFormatExpr->getSourceRange(); 1099} 1100 1101SourceRange CheckPrintfHandler:: 1102getFormatSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 1103 return SourceRange(getLocationOfByte(startSpecifier), 1104 getLocationOfByte(startSpecifier+specifierLen-1)); 1105} 1106 1107SourceLocation CheckPrintfHandler::getLocationOfByte(const char *x) { 1108 return S.getLocationOfStringLiteralByte(FExpr, x - Beg); 1109} 1110 1111void CheckPrintfHandler:: 1112HandleIncompleteFormatSpecifier(const char *startSpecifier, 1113 unsigned specifierLen) { 1114 SourceLocation Loc = getLocationOfByte(startSpecifier); 1115 S.Diag(Loc, diag::warn_printf_incomplete_specifier) 1116 << getFormatSpecifierRange(startSpecifier, specifierLen); 1117} 1118 1119void CheckPrintfHandler:: 1120HandleInvalidConversionSpecifier(const analyze_printf::FormatSpecifier &FS, 1121 const char *startSpecifier, 1122 unsigned specifierLen) { 1123 1124 ++NumConversions; 1125 const analyze_printf::ConversionSpecifier &CS = 1126 FS.getConversionSpecifier(); 1127 SourceLocation Loc = getLocationOfByte(CS.getStart()); 1128 S.Diag(Loc, diag::warn_printf_invalid_conversion) 1129 << llvm::StringRef(CS.getStart(), CS.getLength()) 1130 << getFormatSpecifierRange(startSpecifier, specifierLen); 1131} 1132 1133void CheckPrintfHandler::HandleNullChar(const char *nullCharacter) { 1134 // The presence of a null character is likely an error. 1135 S.Diag(getLocationOfByte(nullCharacter), 1136 diag::warn_printf_format_string_contains_null_char) 1137 << getFormatStringRange(); 1138} 1139 1140const Expr *CheckPrintfHandler::getDataArg(unsigned i) const { 1141 return TheCall->getArg(FormatIdx + i); 1142} 1143 1144 1145 1146void CheckPrintfHandler::HandleFlags(const analyze_printf::FormatSpecifier &FS, 1147 llvm::StringRef flag, 1148 llvm::StringRef cspec, 1149 const char *startSpecifier, 1150 unsigned specifierLen) { 1151 const analyze_printf::ConversionSpecifier &CS = FS.getConversionSpecifier(); 1152 S.Diag(getLocationOfByte(CS.getStart()), diag::warn_printf_nonsensical_flag) 1153 << flag << cspec << getFormatSpecifierRange(startSpecifier, specifierLen); 1154} 1155 1156bool 1157CheckPrintfHandler::HandleAmount(const analyze_printf::OptionalAmount &Amt, 1158 unsigned MissingArgDiag, 1159 unsigned BadTypeDiag, 1160 const char *startSpecifier, 1161 unsigned specifierLen) { 1162 1163 if (Amt.hasDataArgument()) { 1164 ++NumConversions; 1165 if (!HasVAListArg) { 1166 if (NumConversions > NumDataArgs) { 1167 S.Diag(getLocationOfByte(Amt.getStart()), MissingArgDiag) 1168 << getFormatSpecifierRange(startSpecifier, specifierLen); 1169 // Don't do any more checking. We will just emit 1170 // spurious errors. 1171 return false; 1172 } 1173 1174 // Type check the data argument. It should be an 'int'. 1175 // Although not in conformance with C99, we also allow the argument to be 1176 // an 'unsigned int' as that is a reasonably safe case. GCC also 1177 // doesn't emit a warning for that case. 1178 const Expr *Arg = getDataArg(NumConversions); 1179 QualType T = Arg->getType(); 1180 1181 const analyze_printf::ArgTypeResult &ATR = Amt.getArgType(S.Context); 1182 assert(ATR.isValid()); 1183 1184 if (!ATR.matchesType(S.Context, T)) { 1185 S.Diag(getLocationOfByte(Amt.getStart()), BadTypeDiag) 1186 << ATR.getRepresentativeType(S.Context) << T 1187 << getFormatSpecifierRange(startSpecifier, specifierLen) 1188 << Arg->getSourceRange(); 1189 // Don't do any more checking. We will just emit 1190 // spurious errors. 1191 return false; 1192 } 1193 } 1194 } 1195 return true; 1196} 1197 1198bool 1199CheckPrintfHandler::HandleFormatSpecifier(const analyze_printf::FormatSpecifier 1200 &FS, 1201 const char *startSpecifier, 1202 unsigned specifierLen) { 1203 1204 using namespace analyze_printf; 1205 const ConversionSpecifier &CS = FS.getConversionSpecifier(); 1206 1207 // First check if the field width, precision, and conversion specifier 1208 // have matching data arguments. 1209 if (!HandleAmount(FS.getFieldWidth(), 1210 diag::warn_printf_asterisk_width_missing_arg, 1211 diag::warn_printf_asterisk_width_wrong_type, 1212 startSpecifier, specifierLen)) { 1213 return false; 1214 } 1215 1216 if (!HandleAmount(FS.getPrecision(), 1217 diag::warn_printf_asterisk_precision_missing_arg, 1218 diag::warn_printf_asterisk_precision_wrong_type, 1219 startSpecifier, specifierLen)) { 1220 return false; 1221 } 1222 1223 // Check for using an Objective-C specific conversion specifier 1224 // in a non-ObjC literal. 1225 if (!IsObjCLiteral && CS.isObjCArg()) { 1226 HandleInvalidConversionSpecifier(FS, startSpecifier, specifierLen); 1227 1228 // Continue checking the other format specifiers. 1229 return true; 1230 } 1231 1232 if (!CS.consumesDataArgument()) { 1233 // FIXME: Technically specifying a precision or field width here 1234 // makes no sense. Worth issuing a warning at some point. 1235 return true; 1236 } 1237 1238 ++NumConversions; 1239 1240 // Are we using '%n'? Issue a warning about this being 1241 // a possible security issue. 1242 if (CS.getKind() == ConversionSpecifier::OutIntPtrArg) { 1243 S.Diag(getLocationOfByte(CS.getStart()), diag::warn_printf_write_back) 1244 << getFormatSpecifierRange(startSpecifier, specifierLen); 1245 // Continue checking the other format specifiers. 1246 return true; 1247 } 1248 1249 if (CS.getKind() == ConversionSpecifier::VoidPtrArg) { 1250 if (FS.getPrecision().getHowSpecified() != OptionalAmount::NotSpecified) 1251 S.Diag(getLocationOfByte(CS.getStart()), 1252 diag::warn_printf_nonsensical_precision) 1253 << CS.getCharacters() 1254 << getFormatSpecifierRange(startSpecifier, specifierLen); 1255 } 1256 if (CS.getKind() == ConversionSpecifier::VoidPtrArg || 1257 CS.getKind() == ConversionSpecifier::CStrArg) { 1258 // FIXME: Instead of using "0", "+", etc., eventually get them from 1259 // the FormatSpecifier. 1260 if (FS.hasLeadingZeros()) 1261 HandleFlags(FS, "0", CS.getCharacters(), startSpecifier, specifierLen); 1262 if (FS.hasPlusPrefix()) 1263 HandleFlags(FS, "+", CS.getCharacters(), startSpecifier, specifierLen); 1264 if (FS.hasSpacePrefix()) 1265 HandleFlags(FS, " ", CS.getCharacters(), startSpecifier, specifierLen); 1266 } 1267 1268 // The remaining checks depend on the data arguments. 1269 if (HasVAListArg) 1270 return true; 1271 1272 if (NumConversions > NumDataArgs) { 1273 S.Diag(getLocationOfByte(CS.getStart()), 1274 diag::warn_printf_insufficient_data_args) 1275 << getFormatSpecifierRange(startSpecifier, specifierLen); 1276 // Don't do any more checking. 1277 return false; 1278 } 1279 1280 // Now type check the data expression that matches the 1281 // format specifier. 1282 const Expr *Ex = getDataArg(NumConversions); 1283 const analyze_printf::ArgTypeResult &ATR = FS.getArgType(S.Context); 1284 if (ATR.isValid() && !ATR.matchesType(S.Context, Ex->getType())) { 1285 // Check if we didn't match because of an implicit cast from a 'char' 1286 // or 'short' to an 'int'. This is done because printf is a varargs 1287 // function. 1288 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Ex)) 1289 if (ICE->getType() == S.Context.IntTy) 1290 if (ATR.matchesType(S.Context, ICE->getSubExpr()->getType())) 1291 return true; 1292 1293 S.Diag(getLocationOfByte(CS.getStart()), 1294 diag::warn_printf_conversion_argument_type_mismatch) 1295 << ATR.getRepresentativeType(S.Context) << Ex->getType() 1296 << getFormatSpecifierRange(startSpecifier, specifierLen) 1297 << Ex->getSourceRange(); 1298 } 1299 1300 return true; 1301} 1302 1303void CheckPrintfHandler::DoneProcessing() { 1304 // Does the number of data arguments exceed the number of 1305 // format conversions in the format string? 1306 if (!HasVAListArg && NumConversions < NumDataArgs) 1307 S.Diag(getDataArg(NumConversions+1)->getLocStart(), 1308 diag::warn_printf_too_many_data_args) 1309 << getFormatStringRange(); 1310} 1311 1312void Sema::CheckPrintfString(const StringLiteral *FExpr, 1313 const Expr *OrigFormatExpr, 1314 const CallExpr *TheCall, bool HasVAListArg, 1315 unsigned format_idx, unsigned firstDataArg) { 1316 1317 // CHECK: is the format string a wide literal? 1318 if (FExpr->isWide()) { 1319 Diag(FExpr->getLocStart(), 1320 diag::warn_printf_format_string_is_wide_literal) 1321 << OrigFormatExpr->getSourceRange(); 1322 return; 1323 } 1324 1325 // Str - The format string. NOTE: this is NOT null-terminated! 1326 const char *Str = FExpr->getStrData(); 1327 1328 // CHECK: empty format string? 1329 unsigned StrLen = FExpr->getByteLength(); 1330 1331 if (StrLen == 0) { 1332 Diag(FExpr->getLocStart(), diag::warn_printf_empty_format_string) 1333 << OrigFormatExpr->getSourceRange(); 1334 return; 1335 } 1336 1337 CheckPrintfHandler H(*this, FExpr, OrigFormatExpr, 1338 TheCall->getNumArgs() - firstDataArg, 1339 isa<ObjCStringLiteral>(OrigFormatExpr), Str, 1340 HasVAListArg, TheCall, format_idx); 1341 1342 if (!analyze_printf::ParseFormatString(H, Str, Str + StrLen)) 1343 H.DoneProcessing(); 1344} 1345 1346//===--- CHECK: Return Address of Stack Variable --------------------------===// 1347 1348static DeclRefExpr* EvalVal(Expr *E); 1349static DeclRefExpr* EvalAddr(Expr* E); 1350 1351/// CheckReturnStackAddr - Check if a return statement returns the address 1352/// of a stack variable. 1353void 1354Sema::CheckReturnStackAddr(Expr *RetValExp, QualType lhsType, 1355 SourceLocation ReturnLoc) { 1356 1357 // Perform checking for returned stack addresses. 1358 if (lhsType->isPointerType() || lhsType->isBlockPointerType()) { 1359 if (DeclRefExpr *DR = EvalAddr(RetValExp)) 1360 Diag(DR->getLocStart(), diag::warn_ret_stack_addr) 1361 << DR->getDecl()->getDeclName() << RetValExp->getSourceRange(); 1362 1363 // Skip over implicit cast expressions when checking for block expressions. 1364 RetValExp = RetValExp->IgnoreParenCasts(); 1365 1366 if (BlockExpr *C = dyn_cast<BlockExpr>(RetValExp)) 1367 if (C->hasBlockDeclRefExprs()) 1368 Diag(C->getLocStart(), diag::err_ret_local_block) 1369 << C->getSourceRange(); 1370 1371 if (AddrLabelExpr *ALE = dyn_cast<AddrLabelExpr>(RetValExp)) 1372 Diag(ALE->getLocStart(), diag::warn_ret_addr_label) 1373 << ALE->getSourceRange(); 1374 1375 } else if (lhsType->isReferenceType()) { 1376 // Perform checking for stack values returned by reference. 1377 // Check for a reference to the stack 1378 if (DeclRefExpr *DR = EvalVal(RetValExp)) 1379 Diag(DR->getLocStart(), diag::warn_ret_stack_ref) 1380 << DR->getDecl()->getDeclName() << RetValExp->getSourceRange(); 1381 } 1382} 1383 1384/// EvalAddr - EvalAddr and EvalVal are mutually recursive functions that 1385/// check if the expression in a return statement evaluates to an address 1386/// to a location on the stack. The recursion is used to traverse the 1387/// AST of the return expression, with recursion backtracking when we 1388/// encounter a subexpression that (1) clearly does not lead to the address 1389/// of a stack variable or (2) is something we cannot determine leads to 1390/// the address of a stack variable based on such local checking. 1391/// 1392/// EvalAddr processes expressions that are pointers that are used as 1393/// references (and not L-values). EvalVal handles all other values. 1394/// At the base case of the recursion is a check for a DeclRefExpr* in 1395/// the refers to a stack variable. 1396/// 1397/// This implementation handles: 1398/// 1399/// * pointer-to-pointer casts 1400/// * implicit conversions from array references to pointers 1401/// * taking the address of fields 1402/// * arbitrary interplay between "&" and "*" operators 1403/// * pointer arithmetic from an address of a stack variable 1404/// * taking the address of an array element where the array is on the stack 1405static DeclRefExpr* EvalAddr(Expr *E) { 1406 // We should only be called for evaluating pointer expressions. 1407 assert((E->getType()->isAnyPointerType() || 1408 E->getType()->isBlockPointerType() || 1409 E->getType()->isObjCQualifiedIdType()) && 1410 "EvalAddr only works on pointers"); 1411 1412 // Our "symbolic interpreter" is just a dispatch off the currently 1413 // viewed AST node. We then recursively traverse the AST by calling 1414 // EvalAddr and EvalVal appropriately. 1415 switch (E->getStmtClass()) { 1416 case Stmt::ParenExprClass: 1417 // Ignore parentheses. 1418 return EvalAddr(cast<ParenExpr>(E)->getSubExpr()); 1419 1420 case Stmt::UnaryOperatorClass: { 1421 // The only unary operator that make sense to handle here 1422 // is AddrOf. All others don't make sense as pointers. 1423 UnaryOperator *U = cast<UnaryOperator>(E); 1424 1425 if (U->getOpcode() == UnaryOperator::AddrOf) 1426 return EvalVal(U->getSubExpr()); 1427 else 1428 return NULL; 1429 } 1430 1431 case Stmt::BinaryOperatorClass: { 1432 // Handle pointer arithmetic. All other binary operators are not valid 1433 // in this context. 1434 BinaryOperator *B = cast<BinaryOperator>(E); 1435 BinaryOperator::Opcode op = B->getOpcode(); 1436 1437 if (op != BinaryOperator::Add && op != BinaryOperator::Sub) 1438 return NULL; 1439 1440 Expr *Base = B->getLHS(); 1441 1442 // Determine which argument is the real pointer base. It could be 1443 // the RHS argument instead of the LHS. 1444 if (!Base->getType()->isPointerType()) Base = B->getRHS(); 1445 1446 assert (Base->getType()->isPointerType()); 1447 return EvalAddr(Base); 1448 } 1449 1450 // For conditional operators we need to see if either the LHS or RHS are 1451 // valid DeclRefExpr*s. If one of them is valid, we return it. 1452 case Stmt::ConditionalOperatorClass: { 1453 ConditionalOperator *C = cast<ConditionalOperator>(E); 1454 1455 // Handle the GNU extension for missing LHS. 1456 if (Expr *lhsExpr = C->getLHS()) 1457 if (DeclRefExpr* LHS = EvalAddr(lhsExpr)) 1458 return LHS; 1459 1460 return EvalAddr(C->getRHS()); 1461 } 1462 1463 // For casts, we need to handle conversions from arrays to 1464 // pointer values, and pointer-to-pointer conversions. 1465 case Stmt::ImplicitCastExprClass: 1466 case Stmt::CStyleCastExprClass: 1467 case Stmt::CXXFunctionalCastExprClass: { 1468 Expr* SubExpr = cast<CastExpr>(E)->getSubExpr(); 1469 QualType T = SubExpr->getType(); 1470 1471 if (SubExpr->getType()->isPointerType() || 1472 SubExpr->getType()->isBlockPointerType() || 1473 SubExpr->getType()->isObjCQualifiedIdType()) 1474 return EvalAddr(SubExpr); 1475 else if (T->isArrayType()) 1476 return EvalVal(SubExpr); 1477 else 1478 return 0; 1479 } 1480 1481 // C++ casts. For dynamic casts, static casts, and const casts, we 1482 // are always converting from a pointer-to-pointer, so we just blow 1483 // through the cast. In the case the dynamic cast doesn't fail (and 1484 // return NULL), we take the conservative route and report cases 1485 // where we return the address of a stack variable. For Reinterpre 1486 // FIXME: The comment about is wrong; we're not always converting 1487 // from pointer to pointer. I'm guessing that this code should also 1488 // handle references to objects. 1489 case Stmt::CXXStaticCastExprClass: 1490 case Stmt::CXXDynamicCastExprClass: 1491 case Stmt::CXXConstCastExprClass: 1492 case Stmt::CXXReinterpretCastExprClass: { 1493 Expr *S = cast<CXXNamedCastExpr>(E)->getSubExpr(); 1494 if (S->getType()->isPointerType() || S->getType()->isBlockPointerType()) 1495 return EvalAddr(S); 1496 else 1497 return NULL; 1498 } 1499 1500 // Everything else: we simply don't reason about them. 1501 default: 1502 return NULL; 1503 } 1504} 1505 1506 1507/// EvalVal - This function is complements EvalAddr in the mutual recursion. 1508/// See the comments for EvalAddr for more details. 1509static DeclRefExpr* EvalVal(Expr *E) { 1510 1511 // We should only be called for evaluating non-pointer expressions, or 1512 // expressions with a pointer type that are not used as references but instead 1513 // are l-values (e.g., DeclRefExpr with a pointer type). 1514 1515 // Our "symbolic interpreter" is just a dispatch off the currently 1516 // viewed AST node. We then recursively traverse the AST by calling 1517 // EvalAddr and EvalVal appropriately. 1518 switch (E->getStmtClass()) { 1519 case Stmt::DeclRefExprClass: { 1520 // DeclRefExpr: the base case. When we hit a DeclRefExpr we are looking 1521 // at code that refers to a variable's name. We check if it has local 1522 // storage within the function, and if so, return the expression. 1523 DeclRefExpr *DR = cast<DeclRefExpr>(E); 1524 1525 if (VarDecl *V = dyn_cast<VarDecl>(DR->getDecl())) 1526 if (V->hasLocalStorage() && !V->getType()->isReferenceType()) return DR; 1527 1528 return NULL; 1529 } 1530 1531 case Stmt::ParenExprClass: 1532 // Ignore parentheses. 1533 return EvalVal(cast<ParenExpr>(E)->getSubExpr()); 1534 1535 case Stmt::UnaryOperatorClass: { 1536 // The only unary operator that make sense to handle here 1537 // is Deref. All others don't resolve to a "name." This includes 1538 // handling all sorts of rvalues passed to a unary operator. 1539 UnaryOperator *U = cast<UnaryOperator>(E); 1540 1541 if (U->getOpcode() == UnaryOperator::Deref) 1542 return EvalAddr(U->getSubExpr()); 1543 1544 return NULL; 1545 } 1546 1547 case Stmt::ArraySubscriptExprClass: { 1548 // Array subscripts are potential references to data on the stack. We 1549 // retrieve the DeclRefExpr* for the array variable if it indeed 1550 // has local storage. 1551 return EvalAddr(cast<ArraySubscriptExpr>(E)->getBase()); 1552 } 1553 1554 case Stmt::ConditionalOperatorClass: { 1555 // For conditional operators we need to see if either the LHS or RHS are 1556 // non-NULL DeclRefExpr's. If one is non-NULL, we return it. 1557 ConditionalOperator *C = cast<ConditionalOperator>(E); 1558 1559 // Handle the GNU extension for missing LHS. 1560 if (Expr *lhsExpr = C->getLHS()) 1561 if (DeclRefExpr *LHS = EvalVal(lhsExpr)) 1562 return LHS; 1563 1564 return EvalVal(C->getRHS()); 1565 } 1566 1567 // Accesses to members are potential references to data on the stack. 1568 case Stmt::MemberExprClass: { 1569 MemberExpr *M = cast<MemberExpr>(E); 1570 1571 // Check for indirect access. We only want direct field accesses. 1572 if (!M->isArrow()) 1573 return EvalVal(M->getBase()); 1574 else 1575 return NULL; 1576 } 1577 1578 // Everything else: we simply don't reason about them. 1579 default: 1580 return NULL; 1581 } 1582} 1583 1584//===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===// 1585 1586/// Check for comparisons of floating point operands using != and ==. 1587/// Issue a warning if these are no self-comparisons, as they are not likely 1588/// to do what the programmer intended. 1589void Sema::CheckFloatComparison(SourceLocation loc, Expr* lex, Expr *rex) { 1590 bool EmitWarning = true; 1591 1592 Expr* LeftExprSansParen = lex->IgnoreParens(); 1593 Expr* RightExprSansParen = rex->IgnoreParens(); 1594 1595 // Special case: check for x == x (which is OK). 1596 // Do not emit warnings for such cases. 1597 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 1598 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 1599 if (DRL->getDecl() == DRR->getDecl()) 1600 EmitWarning = false; 1601 1602 1603 // Special case: check for comparisons against literals that can be exactly 1604 // represented by APFloat. In such cases, do not emit a warning. This 1605 // is a heuristic: often comparison against such literals are used to 1606 // detect if a value in a variable has not changed. This clearly can 1607 // lead to false negatives. 1608 if (EmitWarning) { 1609 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 1610 if (FLL->isExact()) 1611 EmitWarning = false; 1612 } else 1613 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)){ 1614 if (FLR->isExact()) 1615 EmitWarning = false; 1616 } 1617 } 1618 1619 // Check for comparisons with builtin types. 1620 if (EmitWarning) 1621 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 1622 if (CL->isBuiltinCall(Context)) 1623 EmitWarning = false; 1624 1625 if (EmitWarning) 1626 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 1627 if (CR->isBuiltinCall(Context)) 1628 EmitWarning = false; 1629 1630 // Emit the diagnostic. 1631 if (EmitWarning) 1632 Diag(loc, diag::warn_floatingpoint_eq) 1633 << lex->getSourceRange() << rex->getSourceRange(); 1634} 1635 1636//===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 1637//===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 1638 1639namespace { 1640 1641/// Structure recording the 'active' range of an integer-valued 1642/// expression. 1643struct IntRange { 1644 /// The number of bits active in the int. 1645 unsigned Width; 1646 1647 /// True if the int is known not to have negative values. 1648 bool NonNegative; 1649 1650 IntRange() {} 1651 IntRange(unsigned Width, bool NonNegative) 1652 : Width(Width), NonNegative(NonNegative) 1653 {} 1654 1655 // Returns the range of the bool type. 1656 static IntRange forBoolType() { 1657 return IntRange(1, true); 1658 } 1659 1660 // Returns the range of an integral type. 1661 static IntRange forType(ASTContext &C, QualType T) { 1662 return forCanonicalType(C, T->getCanonicalTypeInternal().getTypePtr()); 1663 } 1664 1665 // Returns the range of an integeral type based on its canonical 1666 // representation. 1667 static IntRange forCanonicalType(ASTContext &C, const Type *T) { 1668 assert(T->isCanonicalUnqualified()); 1669 1670 if (const VectorType *VT = dyn_cast<VectorType>(T)) 1671 T = VT->getElementType().getTypePtr(); 1672 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 1673 T = CT->getElementType().getTypePtr(); 1674 if (const EnumType *ET = dyn_cast<EnumType>(T)) 1675 T = ET->getDecl()->getIntegerType().getTypePtr(); 1676 1677 const BuiltinType *BT = cast<BuiltinType>(T); 1678 assert(BT->isInteger()); 1679 1680 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 1681 } 1682 1683 // Returns the supremum of two ranges: i.e. their conservative merge. 1684 static IntRange join(const IntRange &L, const IntRange &R) { 1685 return IntRange(std::max(L.Width, R.Width), 1686 L.NonNegative && R.NonNegative); 1687 } 1688 1689 // Returns the infinum of two ranges: i.e. their aggressive merge. 1690 static IntRange meet(const IntRange &L, const IntRange &R) { 1691 return IntRange(std::min(L.Width, R.Width), 1692 L.NonNegative || R.NonNegative); 1693 } 1694}; 1695 1696IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, unsigned MaxWidth) { 1697 if (value.isSigned() && value.isNegative()) 1698 return IntRange(value.getMinSignedBits(), false); 1699 1700 if (value.getBitWidth() > MaxWidth) 1701 value.trunc(MaxWidth); 1702 1703 // isNonNegative() just checks the sign bit without considering 1704 // signedness. 1705 return IntRange(value.getActiveBits(), true); 1706} 1707 1708IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 1709 unsigned MaxWidth) { 1710 if (result.isInt()) 1711 return GetValueRange(C, result.getInt(), MaxWidth); 1712 1713 if (result.isVector()) { 1714 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 1715 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 1716 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 1717 R = IntRange::join(R, El); 1718 } 1719 return R; 1720 } 1721 1722 if (result.isComplexInt()) { 1723 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 1724 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 1725 return IntRange::join(R, I); 1726 } 1727 1728 // This can happen with lossless casts to intptr_t of "based" lvalues. 1729 // Assume it might use arbitrary bits. 1730 // FIXME: The only reason we need to pass the type in here is to get 1731 // the sign right on this one case. It would be nice if APValue 1732 // preserved this. 1733 assert(result.isLValue()); 1734 return IntRange(MaxWidth, Ty->isUnsignedIntegerType()); 1735} 1736 1737/// Pseudo-evaluate the given integer expression, estimating the 1738/// range of values it might take. 1739/// 1740/// \param MaxWidth - the width to which the value will be truncated 1741IntRange GetExprRange(ASTContext &C, Expr *E, unsigned MaxWidth) { 1742 E = E->IgnoreParens(); 1743 1744 // Try a full evaluation first. 1745 Expr::EvalResult result; 1746 if (E->Evaluate(result, C)) 1747 return GetValueRange(C, result.Val, E->getType(), MaxWidth); 1748 1749 // I think we only want to look through implicit casts here; if the 1750 // user has an explicit widening cast, we should treat the value as 1751 // being of the new, wider type. 1752 if (ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E)) { 1753 if (CE->getCastKind() == CastExpr::CK_NoOp) 1754 return GetExprRange(C, CE->getSubExpr(), MaxWidth); 1755 1756 IntRange OutputTypeRange = IntRange::forType(C, CE->getType()); 1757 1758 bool isIntegerCast = (CE->getCastKind() == CastExpr::CK_IntegralCast); 1759 if (!isIntegerCast && CE->getCastKind() == CastExpr::CK_Unknown) 1760 isIntegerCast = CE->getSubExpr()->getType()->isIntegerType(); 1761 1762 // Assume that non-integer casts can span the full range of the type. 1763 if (!isIntegerCast) 1764 return OutputTypeRange; 1765 1766 IntRange SubRange 1767 = GetExprRange(C, CE->getSubExpr(), 1768 std::min(MaxWidth, OutputTypeRange.Width)); 1769 1770 // Bail out if the subexpr's range is as wide as the cast type. 1771 if (SubRange.Width >= OutputTypeRange.Width) 1772 return OutputTypeRange; 1773 1774 // Otherwise, we take the smaller width, and we're non-negative if 1775 // either the output type or the subexpr is. 1776 return IntRange(SubRange.Width, 1777 SubRange.NonNegative || OutputTypeRange.NonNegative); 1778 } 1779 1780 if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 1781 // If we can fold the condition, just take that operand. 1782 bool CondResult; 1783 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 1784 return GetExprRange(C, CondResult ? CO->getTrueExpr() 1785 : CO->getFalseExpr(), 1786 MaxWidth); 1787 1788 // Otherwise, conservatively merge. 1789 IntRange L = GetExprRange(C, CO->getTrueExpr(), MaxWidth); 1790 IntRange R = GetExprRange(C, CO->getFalseExpr(), MaxWidth); 1791 return IntRange::join(L, R); 1792 } 1793 1794 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 1795 switch (BO->getOpcode()) { 1796 1797 // Boolean-valued operations are single-bit and positive. 1798 case BinaryOperator::LAnd: 1799 case BinaryOperator::LOr: 1800 case BinaryOperator::LT: 1801 case BinaryOperator::GT: 1802 case BinaryOperator::LE: 1803 case BinaryOperator::GE: 1804 case BinaryOperator::EQ: 1805 case BinaryOperator::NE: 1806 return IntRange::forBoolType(); 1807 1808 // Operations with opaque sources are black-listed. 1809 case BinaryOperator::PtrMemD: 1810 case BinaryOperator::PtrMemI: 1811 return IntRange::forType(C, E->getType()); 1812 1813 // Bitwise-and uses the *infinum* of the two source ranges. 1814 case BinaryOperator::And: 1815 return IntRange::meet(GetExprRange(C, BO->getLHS(), MaxWidth), 1816 GetExprRange(C, BO->getRHS(), MaxWidth)); 1817 1818 // Left shift gets black-listed based on a judgement call. 1819 case BinaryOperator::Shl: 1820 return IntRange::forType(C, E->getType()); 1821 1822 // Right shift by a constant can narrow its left argument. 1823 case BinaryOperator::Shr: { 1824 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth); 1825 1826 // If the shift amount is a positive constant, drop the width by 1827 // that much. 1828 llvm::APSInt shift; 1829 if (BO->getRHS()->isIntegerConstantExpr(shift, C) && 1830 shift.isNonNegative()) { 1831 unsigned zext = shift.getZExtValue(); 1832 if (zext >= L.Width) 1833 L.Width = (L.NonNegative ? 0 : 1); 1834 else 1835 L.Width -= zext; 1836 } 1837 1838 return L; 1839 } 1840 1841 // Comma acts as its right operand. 1842 case BinaryOperator::Comma: 1843 return GetExprRange(C, BO->getRHS(), MaxWidth); 1844 1845 // Black-list pointer subtractions. 1846 case BinaryOperator::Sub: 1847 if (BO->getLHS()->getType()->isPointerType()) 1848 return IntRange::forType(C, E->getType()); 1849 // fallthrough 1850 1851 default: 1852 break; 1853 } 1854 1855 // Treat every other operator as if it were closed on the 1856 // narrowest type that encompasses both operands. 1857 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth); 1858 IntRange R = GetExprRange(C, BO->getRHS(), MaxWidth); 1859 return IntRange::join(L, R); 1860 } 1861 1862 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 1863 switch (UO->getOpcode()) { 1864 // Boolean-valued operations are white-listed. 1865 case UnaryOperator::LNot: 1866 return IntRange::forBoolType(); 1867 1868 // Operations with opaque sources are black-listed. 1869 case UnaryOperator::Deref: 1870 case UnaryOperator::AddrOf: // should be impossible 1871 case UnaryOperator::OffsetOf: 1872 return IntRange::forType(C, E->getType()); 1873 1874 default: 1875 return GetExprRange(C, UO->getSubExpr(), MaxWidth); 1876 } 1877 } 1878 1879 FieldDecl *BitField = E->getBitField(); 1880 if (BitField) { 1881 llvm::APSInt BitWidthAP = BitField->getBitWidth()->EvaluateAsInt(C); 1882 unsigned BitWidth = BitWidthAP.getZExtValue(); 1883 1884 return IntRange(BitWidth, BitField->getType()->isUnsignedIntegerType()); 1885 } 1886 1887 return IntRange::forType(C, E->getType()); 1888} 1889 1890/// Checks whether the given value, which currently has the given 1891/// source semantics, has the same value when coerced through the 1892/// target semantics. 1893bool IsSameFloatAfterCast(const llvm::APFloat &value, 1894 const llvm::fltSemantics &Src, 1895 const llvm::fltSemantics &Tgt) { 1896 llvm::APFloat truncated = value; 1897 1898 bool ignored; 1899 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 1900 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 1901 1902 return truncated.bitwiseIsEqual(value); 1903} 1904 1905/// Checks whether the given value, which currently has the given 1906/// source semantics, has the same value when coerced through the 1907/// target semantics. 1908/// 1909/// The value might be a vector of floats (or a complex number). 1910bool IsSameFloatAfterCast(const APValue &value, 1911 const llvm::fltSemantics &Src, 1912 const llvm::fltSemantics &Tgt) { 1913 if (value.isFloat()) 1914 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 1915 1916 if (value.isVector()) { 1917 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 1918 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 1919 return false; 1920 return true; 1921 } 1922 1923 assert(value.isComplexFloat()); 1924 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 1925 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 1926} 1927 1928} // end anonymous namespace 1929 1930/// \brief Implements -Wsign-compare. 1931/// 1932/// \param lex the left-hand expression 1933/// \param rex the right-hand expression 1934/// \param OpLoc the location of the joining operator 1935/// \param Equality whether this is an "equality-like" join, which 1936/// suppresses the warning in some cases 1937void Sema::CheckSignCompare(Expr *lex, Expr *rex, SourceLocation OpLoc, 1938 const PartialDiagnostic &PD, bool Equality) { 1939 // Don't warn if we're in an unevaluated context. 1940 if (ExprEvalContexts.back().Context == Unevaluated) 1941 return; 1942 1943 // If either expression is value-dependent, don't warn. We'll get another 1944 // chance at instantiation time. 1945 if (lex->isValueDependent() || rex->isValueDependent()) 1946 return; 1947 1948 QualType lt = lex->getType(), rt = rex->getType(); 1949 1950 // Only warn if both operands are integral. 1951 if (!lt->isIntegerType() || !rt->isIntegerType()) 1952 return; 1953 1954 // In C, the width of a bitfield determines its type, and the 1955 // declared type only contributes the signedness. This duplicates 1956 // the work that will later be done by UsualUnaryConversions. 1957 // Eventually, this check will be reorganized in a way that avoids 1958 // this duplication. 1959 if (!getLangOptions().CPlusPlus) { 1960 QualType tmp; 1961 tmp = Context.isPromotableBitField(lex); 1962 if (!tmp.isNull()) lt = tmp; 1963 tmp = Context.isPromotableBitField(rex); 1964 if (!tmp.isNull()) rt = tmp; 1965 } 1966 1967 // The rule is that the signed operand becomes unsigned, so isolate the 1968 // signed operand. 1969 Expr *signedOperand = lex, *unsignedOperand = rex; 1970 QualType signedType = lt, unsignedType = rt; 1971 if (lt->isSignedIntegerType()) { 1972 if (rt->isSignedIntegerType()) return; 1973 } else { 1974 if (!rt->isSignedIntegerType()) return; 1975 std::swap(signedOperand, unsignedOperand); 1976 std::swap(signedType, unsignedType); 1977 } 1978 1979 unsigned unsignedWidth = Context.getIntWidth(unsignedType); 1980 unsigned signedWidth = Context.getIntWidth(signedType); 1981 1982 // If the unsigned type is strictly smaller than the signed type, 1983 // then (1) the result type will be signed and (2) the unsigned 1984 // value will fit fully within the signed type, and thus the result 1985 // of the comparison will be exact. 1986 if (signedWidth > unsignedWidth) 1987 return; 1988 1989 // Otherwise, calculate the effective ranges. 1990 IntRange signedRange = GetExprRange(Context, signedOperand, signedWidth); 1991 IntRange unsignedRange = GetExprRange(Context, unsignedOperand, unsignedWidth); 1992 1993 // We should never be unable to prove that the unsigned operand is 1994 // non-negative. 1995 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 1996 1997 // If the signed operand is non-negative, then the signed->unsigned 1998 // conversion won't change it. 1999 if (signedRange.NonNegative) 2000 return; 2001 2002 // For (in)equality comparisons, if the unsigned operand is a 2003 // constant which cannot collide with a overflowed signed operand, 2004 // then reinterpreting the signed operand as unsigned will not 2005 // change the result of the comparison. 2006 if (Equality && unsignedRange.Width < unsignedWidth) 2007 return; 2008 2009 Diag(OpLoc, PD) 2010 << lt << rt << lex->getSourceRange() << rex->getSourceRange(); 2011} 2012 2013/// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 2014static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, unsigned diag) { 2015 S.Diag(E->getExprLoc(), diag) << E->getType() << T << E->getSourceRange(); 2016} 2017 2018/// Implements -Wconversion. 2019void Sema::CheckImplicitConversion(Expr *E, QualType T) { 2020 // Don't diagnose in unevaluated contexts. 2021 if (ExprEvalContexts.back().Context == Sema::Unevaluated) 2022 return; 2023 2024 // Don't diagnose for value-dependent expressions. 2025 if (E->isValueDependent()) 2026 return; 2027 2028 const Type *Source = Context.getCanonicalType(E->getType()).getTypePtr(); 2029 const Type *Target = Context.getCanonicalType(T).getTypePtr(); 2030 2031 // Never diagnose implicit casts to bool. 2032 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 2033 return; 2034 2035 // Strip vector types. 2036 if (isa<VectorType>(Source)) { 2037 if (!isa<VectorType>(Target)) 2038 return DiagnoseImpCast(*this, E, T, diag::warn_impcast_vector_scalar); 2039 2040 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 2041 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 2042 } 2043 2044 // Strip complex types. 2045 if (isa<ComplexType>(Source)) { 2046 if (!isa<ComplexType>(Target)) 2047 return DiagnoseImpCast(*this, E, T, diag::warn_impcast_complex_scalar); 2048 2049 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 2050 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 2051 } 2052 2053 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 2054 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 2055 2056 // If the source is floating point... 2057 if (SourceBT && SourceBT->isFloatingPoint()) { 2058 // ...and the target is floating point... 2059 if (TargetBT && TargetBT->isFloatingPoint()) { 2060 // ...then warn if we're dropping FP rank. 2061 2062 // Builtin FP kinds are ordered by increasing FP rank. 2063 if (SourceBT->getKind() > TargetBT->getKind()) { 2064 // Don't warn about float constants that are precisely 2065 // representable in the target type. 2066 Expr::EvalResult result; 2067 if (E->Evaluate(result, Context)) { 2068 // Value might be a float, a float vector, or a float complex. 2069 if (IsSameFloatAfterCast(result.Val, 2070 Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 2071 Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 2072 return; 2073 } 2074 2075 DiagnoseImpCast(*this, E, T, diag::warn_impcast_float_precision); 2076 } 2077 return; 2078 } 2079 2080 // If the target is integral, always warn. 2081 if ((TargetBT && TargetBT->isInteger())) 2082 // TODO: don't warn for integer values? 2083 return DiagnoseImpCast(*this, E, T, diag::warn_impcast_float_integer); 2084 2085 return; 2086 } 2087 2088 if (!Source->isIntegerType() || !Target->isIntegerType()) 2089 return; 2090 2091 IntRange SourceRange = GetExprRange(Context, E, Context.getIntWidth(E->getType())); 2092 IntRange TargetRange = IntRange::forCanonicalType(Context, Target); 2093 2094 // FIXME: also signed<->unsigned? 2095 2096 if (SourceRange.Width > TargetRange.Width) { 2097 // People want to build with -Wshorten-64-to-32 and not -Wconversion 2098 // and by god we'll let them. 2099 if (SourceRange.Width == 64 && TargetRange.Width == 32) 2100 return DiagnoseImpCast(*this, E, T, diag::warn_impcast_integer_64_32); 2101 return DiagnoseImpCast(*this, E, T, diag::warn_impcast_integer_precision); 2102 } 2103 2104 return; 2105} 2106 2107// MarkReachable - Mark all the blocks reachable from Start as live. 2108// Returns the total number of blocks that were marked reachable. 2109static unsigned MarkReachable(CFGBlock &Start, llvm::BitVector &live) { 2110 unsigned count = 0; 2111 llvm::SmallVector<CFGBlock*, 12> WL; 2112 2113 // Prep work queue 2114 live.set(Start.getBlockID()); 2115 ++count; 2116 WL.push_back(&Start); 2117 2118 // Find the reachable blocks from 'Start'. 2119 while (!WL.empty()) { 2120 CFGBlock *item = WL.back(); 2121 WL.pop_back(); 2122 2123 // Look at the successors and mark then reachable. 2124 for (CFGBlock::succ_iterator I=item->succ_begin(), E=item->succ_end(); 2125 I != E; ++I) 2126 if (CFGBlock *B = *I) { 2127 unsigned blockID = B->getBlockID(); 2128 if (!live[blockID]) { 2129 live.set(blockID); 2130 ++count; 2131 WL.push_back(B); 2132 } 2133 } 2134 } 2135 return count; 2136} 2137 2138static SourceLocation GetUnreachableLoc(CFGBlock &b, SourceRange &R1, 2139 SourceRange &R2) { 2140 Stmt *S; 2141 unsigned sn = 0; 2142 R1 = R2 = SourceRange(); 2143 2144 top: 2145 if (sn < b.size()) 2146 S = b[sn].getStmt(); 2147 else if (b.getTerminator()) 2148 S = b.getTerminator(); 2149 else 2150 return SourceLocation(); 2151 2152 switch (S->getStmtClass()) { 2153 case Expr::BinaryOperatorClass: { 2154 BinaryOperator *BO = cast<BinaryOperator>(S); 2155 if (BO->getOpcode() == BinaryOperator::Comma) { 2156 if (sn+1 < b.size()) 2157 return b[sn+1].getStmt()->getLocStart(); 2158 CFGBlock *n = &b; 2159 while (1) { 2160 if (n->getTerminator()) 2161 return n->getTerminator()->getLocStart(); 2162 if (n->succ_size() != 1) 2163 return SourceLocation(); 2164 n = n[0].succ_begin()[0]; 2165 if (n->pred_size() != 1) 2166 return SourceLocation(); 2167 if (!n->empty()) 2168 return n[0][0].getStmt()->getLocStart(); 2169 } 2170 } 2171 R1 = BO->getLHS()->getSourceRange(); 2172 R2 = BO->getRHS()->getSourceRange(); 2173 return BO->getOperatorLoc(); 2174 } 2175 case Expr::UnaryOperatorClass: { 2176 const UnaryOperator *UO = cast<UnaryOperator>(S); 2177 R1 = UO->getSubExpr()->getSourceRange(); 2178 return UO->getOperatorLoc(); 2179 } 2180 case Expr::CompoundAssignOperatorClass: { 2181 const CompoundAssignOperator *CAO = cast<CompoundAssignOperator>(S); 2182 R1 = CAO->getLHS()->getSourceRange(); 2183 R2 = CAO->getRHS()->getSourceRange(); 2184 return CAO->getOperatorLoc(); 2185 } 2186 case Expr::ConditionalOperatorClass: { 2187 const ConditionalOperator *CO = cast<ConditionalOperator>(S); 2188 return CO->getQuestionLoc(); 2189 } 2190 case Expr::MemberExprClass: { 2191 const MemberExpr *ME = cast<MemberExpr>(S); 2192 R1 = ME->getSourceRange(); 2193 return ME->getMemberLoc(); 2194 } 2195 case Expr::ArraySubscriptExprClass: { 2196 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(S); 2197 R1 = ASE->getLHS()->getSourceRange(); 2198 R2 = ASE->getRHS()->getSourceRange(); 2199 return ASE->getRBracketLoc(); 2200 } 2201 case Expr::CStyleCastExprClass: { 2202 const CStyleCastExpr *CSC = cast<CStyleCastExpr>(S); 2203 R1 = CSC->getSubExpr()->getSourceRange(); 2204 return CSC->getLParenLoc(); 2205 } 2206 case Expr::CXXFunctionalCastExprClass: { 2207 const CXXFunctionalCastExpr *CE = cast <CXXFunctionalCastExpr>(S); 2208 R1 = CE->getSubExpr()->getSourceRange(); 2209 return CE->getTypeBeginLoc(); 2210 } 2211 case Expr::ImplicitCastExprClass: 2212 ++sn; 2213 goto top; 2214 case Stmt::CXXTryStmtClass: { 2215 return cast<CXXTryStmt>(S)->getHandler(0)->getCatchLoc(); 2216 } 2217 default: ; 2218 } 2219 R1 = S->getSourceRange(); 2220 return S->getLocStart(); 2221} 2222 2223static SourceLocation MarkLiveTop(CFGBlock *e, llvm::BitVector &live, 2224 SourceManager &SM) { 2225 std::queue<CFGBlock*> workq; 2226 // Prep work queue 2227 workq.push(e); 2228 SourceRange R1, R2; 2229 SourceLocation top = GetUnreachableLoc(*e, R1, R2); 2230 bool FromMainFile = false; 2231 bool FromSystemHeader = false; 2232 bool TopValid = false; 2233 if (top.isValid()) { 2234 FromMainFile = SM.isFromMainFile(top); 2235 FromSystemHeader = SM.isInSystemHeader(top); 2236 TopValid = true; 2237 } 2238 // Solve 2239 while (!workq.empty()) { 2240 CFGBlock *item = workq.front(); 2241 workq.pop(); 2242 SourceLocation c = GetUnreachableLoc(*item, R1, R2); 2243 if (c.isValid() 2244 && (!TopValid 2245 || (SM.isFromMainFile(c) && !FromMainFile) 2246 || (FromSystemHeader && !SM.isInSystemHeader(c)) 2247 || SM.isBeforeInTranslationUnit(c, top))) { 2248 top = c; 2249 FromMainFile = SM.isFromMainFile(top); 2250 FromSystemHeader = SM.isInSystemHeader(top); 2251 } 2252 live.set(item->getBlockID()); 2253 for (CFGBlock::succ_iterator I=item->succ_begin(), 2254 E=item->succ_end(); 2255 I != E; 2256 ++I) { 2257 if ((*I) && !live[(*I)->getBlockID()]) { 2258 live.set((*I)->getBlockID()); 2259 workq.push(*I); 2260 } 2261 } 2262 } 2263 return top; 2264} 2265 2266static int LineCmp(const void *p1, const void *p2) { 2267 SourceLocation *Line1 = (SourceLocation *)p1; 2268 SourceLocation *Line2 = (SourceLocation *)p2; 2269 return !(*Line1 < *Line2); 2270} 2271 2272namespace { 2273 struct ErrLoc { 2274 SourceLocation Loc; 2275 SourceRange R1; 2276 SourceRange R2; 2277 ErrLoc(SourceLocation l, SourceRange r1, SourceRange r2) 2278 : Loc(l), R1(r1), R2(r2) { } 2279 }; 2280} 2281 2282/// CheckUnreachable - Check for unreachable code. 2283void Sema::CheckUnreachable(AnalysisContext &AC) { 2284 unsigned count; 2285 // We avoid checking when there are errors, as the CFG won't faithfully match 2286 // the user's code. 2287 if (getDiagnostics().hasErrorOccurred() || 2288 Diags.getDiagnosticLevel(diag::warn_unreachable) == Diagnostic::Ignored) 2289 return; 2290 2291 CFG *cfg = AC.getCFG(); 2292 if (cfg == 0) 2293 return; 2294 2295 // Mark all live things first. 2296 llvm::BitVector live(cfg->getNumBlockIDs()); 2297 count = MarkReachable(cfg->getEntry(), live); 2298 2299 // If there are no dead blocks, we're done. 2300 if (count == cfg->getNumBlockIDs()) 2301 return; 2302 2303 SourceRange R1, R2; 2304 2305 llvm::SmallVector<ErrLoc, 24> lines; 2306 bool AddEHEdges = AC.getAddEHEdges(); 2307 // First, give warnings for blocks with no predecessors, as they 2308 // can't be part of a loop. 2309 for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) { 2310 CFGBlock &b = **I; 2311 if (!live[b.getBlockID()]) { 2312 if (b.pred_begin() == b.pred_end()) { 2313 if (!AddEHEdges && b.getTerminator() 2314 && isa<CXXTryStmt>(b.getTerminator())) { 2315 // When not adding EH edges from calls, catch clauses 2316 // can otherwise seem dead. Avoid noting them as dead. 2317 count += MarkReachable(b, live); 2318 continue; 2319 } 2320 SourceLocation c = GetUnreachableLoc(b, R1, R2); 2321 if (!c.isValid()) { 2322 // Blocks without a location can't produce a warning, so don't mark 2323 // reachable blocks from here as live. 2324 live.set(b.getBlockID()); 2325 ++count; 2326 continue; 2327 } 2328 lines.push_back(ErrLoc(c, R1, R2)); 2329 // Avoid excessive errors by marking everything reachable from here 2330 count += MarkReachable(b, live); 2331 } 2332 } 2333 } 2334 2335 if (count < cfg->getNumBlockIDs()) { 2336 // And then give warnings for the tops of loops. 2337 for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) { 2338 CFGBlock &b = **I; 2339 if (!live[b.getBlockID()]) 2340 // Avoid excessive errors by marking everything reachable from here 2341 lines.push_back(ErrLoc(MarkLiveTop(&b, live, 2342 Context.getSourceManager()), 2343 SourceRange(), SourceRange())); 2344 } 2345 } 2346 2347 llvm::array_pod_sort(lines.begin(), lines.end(), LineCmp); 2348 for (llvm::SmallVectorImpl<ErrLoc>::iterator I=lines.begin(), E=lines.end(); 2349 I != E; ++I) 2350 if (I->Loc.isValid()) 2351 Diag(I->Loc, diag::warn_unreachable) << I->R1 << I->R2; 2352} 2353 2354/// CheckFallThrough - Check that we don't fall off the end of a 2355/// Statement that should return a value. 2356/// 2357/// \returns AlwaysFallThrough iff we always fall off the end of the statement, 2358/// MaybeFallThrough iff we might or might not fall off the end, 2359/// NeverFallThroughOrReturn iff we never fall off the end of the statement or 2360/// return. We assume NeverFallThrough iff we never fall off the end of the 2361/// statement but we may return. We assume that functions not marked noreturn 2362/// will return. 2363Sema::ControlFlowKind Sema::CheckFallThrough(AnalysisContext &AC) { 2364 CFG *cfg = AC.getCFG(); 2365 if (cfg == 0) 2366 // FIXME: This should be NeverFallThrough 2367 return NeverFallThroughOrReturn; 2368 2369 // The CFG leaves in dead things, and we don't want the dead code paths to 2370 // confuse us, so we mark all live things first. 2371 std::queue<CFGBlock*> workq; 2372 llvm::BitVector live(cfg->getNumBlockIDs()); 2373 unsigned count = MarkReachable(cfg->getEntry(), live); 2374 2375 bool AddEHEdges = AC.getAddEHEdges(); 2376 if (!AddEHEdges && count != cfg->getNumBlockIDs()) 2377 // When there are things remaining dead, and we didn't add EH edges 2378 // from CallExprs to the catch clauses, we have to go back and 2379 // mark them as live. 2380 for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) { 2381 CFGBlock &b = **I; 2382 if (!live[b.getBlockID()]) { 2383 if (b.pred_begin() == b.pred_end()) { 2384 if (b.getTerminator() && isa<CXXTryStmt>(b.getTerminator())) 2385 // When not adding EH edges from calls, catch clauses 2386 // can otherwise seem dead. Avoid noting them as dead. 2387 count += MarkReachable(b, live); 2388 continue; 2389 } 2390 } 2391 } 2392 2393 // Now we know what is live, we check the live precessors of the exit block 2394 // and look for fall through paths, being careful to ignore normal returns, 2395 // and exceptional paths. 2396 bool HasLiveReturn = false; 2397 bool HasFakeEdge = false; 2398 bool HasPlainEdge = false; 2399 bool HasAbnormalEdge = false; 2400 for (CFGBlock::pred_iterator I=cfg->getExit().pred_begin(), 2401 E = cfg->getExit().pred_end(); 2402 I != E; 2403 ++I) { 2404 CFGBlock& B = **I; 2405 if (!live[B.getBlockID()]) 2406 continue; 2407 if (B.size() == 0) { 2408 if (B.getTerminator() && isa<CXXTryStmt>(B.getTerminator())) { 2409 HasAbnormalEdge = true; 2410 continue; 2411 } 2412 2413 // A labeled empty statement, or the entry block... 2414 HasPlainEdge = true; 2415 continue; 2416 } 2417 Stmt *S = B[B.size()-1]; 2418 if (isa<ReturnStmt>(S)) { 2419 HasLiveReturn = true; 2420 continue; 2421 } 2422 if (isa<ObjCAtThrowStmt>(S)) { 2423 HasFakeEdge = true; 2424 continue; 2425 } 2426 if (isa<CXXThrowExpr>(S)) { 2427 HasFakeEdge = true; 2428 continue; 2429 } 2430 if (const AsmStmt *AS = dyn_cast<AsmStmt>(S)) { 2431 if (AS->isMSAsm()) { 2432 HasFakeEdge = true; 2433 HasLiveReturn = true; 2434 continue; 2435 } 2436 } 2437 if (isa<CXXTryStmt>(S)) { 2438 HasAbnormalEdge = true; 2439 continue; 2440 } 2441 2442 bool NoReturnEdge = false; 2443 if (CallExpr *C = dyn_cast<CallExpr>(S)) { 2444 if (B.succ_begin()[0] != &cfg->getExit()) { 2445 HasAbnormalEdge = true; 2446 continue; 2447 } 2448 Expr *CEE = C->getCallee()->IgnoreParenCasts(); 2449 if (CEE->getType().getNoReturnAttr()) { 2450 NoReturnEdge = true; 2451 HasFakeEdge = true; 2452 } else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CEE)) { 2453 ValueDecl *VD = DRE->getDecl(); 2454 if (VD->hasAttr<NoReturnAttr>()) { 2455 NoReturnEdge = true; 2456 HasFakeEdge = true; 2457 } 2458 } 2459 } 2460 // FIXME: Add noreturn message sends. 2461 if (NoReturnEdge == false) 2462 HasPlainEdge = true; 2463 } 2464 if (!HasPlainEdge) { 2465 if (HasLiveReturn) 2466 return NeverFallThrough; 2467 return NeverFallThroughOrReturn; 2468 } 2469 if (HasAbnormalEdge || HasFakeEdge || HasLiveReturn) 2470 return MaybeFallThrough; 2471 // This says AlwaysFallThrough for calls to functions that are not marked 2472 // noreturn, that don't return. If people would like this warning to be more 2473 // accurate, such functions should be marked as noreturn. 2474 return AlwaysFallThrough; 2475} 2476 2477/// CheckFallThroughForFunctionDef - Check that we don't fall off the end of a 2478/// function that should return a value. Check that we don't fall off the end 2479/// of a noreturn function. We assume that functions and blocks not marked 2480/// noreturn will return. 2481void Sema::CheckFallThroughForFunctionDef(Decl *D, Stmt *Body, 2482 AnalysisContext &AC) { 2483 // FIXME: Would be nice if we had a better way to control cascading errors, 2484 // but for now, avoid them. The problem is that when Parse sees: 2485 // int foo() { return a; } 2486 // The return is eaten and the Sema code sees just: 2487 // int foo() { } 2488 // which this code would then warn about. 2489 if (getDiagnostics().hasErrorOccurred()) 2490 return; 2491 2492 bool ReturnsVoid = false; 2493 bool HasNoReturn = false; 2494 2495 if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 2496 // For function templates, class templates and member function templates 2497 // we'll do the analysis at instantiation time. 2498 if (FD->isDependentContext()) 2499 return; 2500 2501 ReturnsVoid = FD->getResultType()->isVoidType(); 2502 HasNoReturn = FD->hasAttr<NoReturnAttr>() || 2503 FD->getType()->getAs<FunctionType>()->getNoReturnAttr(); 2504 2505 } else if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) { 2506 ReturnsVoid = MD->getResultType()->isVoidType(); 2507 HasNoReturn = MD->hasAttr<NoReturnAttr>(); 2508 } 2509 2510 // Short circuit for compilation speed. 2511 if ((Diags.getDiagnosticLevel(diag::warn_maybe_falloff_nonvoid_function) 2512 == Diagnostic::Ignored || ReturnsVoid) 2513 && (Diags.getDiagnosticLevel(diag::warn_noreturn_function_has_return_expr) 2514 == Diagnostic::Ignored || !HasNoReturn) 2515 && (Diags.getDiagnosticLevel(diag::warn_suggest_noreturn_block) 2516 == Diagnostic::Ignored || !ReturnsVoid)) 2517 return; 2518 // FIXME: Function try block 2519 if (CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body)) { 2520 switch (CheckFallThrough(AC)) { 2521 case MaybeFallThrough: 2522 if (HasNoReturn) 2523 Diag(Compound->getRBracLoc(), diag::warn_falloff_noreturn_function); 2524 else if (!ReturnsVoid) 2525 Diag(Compound->getRBracLoc(),diag::warn_maybe_falloff_nonvoid_function); 2526 break; 2527 case AlwaysFallThrough: 2528 if (HasNoReturn) 2529 Diag(Compound->getRBracLoc(), diag::warn_falloff_noreturn_function); 2530 else if (!ReturnsVoid) 2531 Diag(Compound->getRBracLoc(), diag::warn_falloff_nonvoid_function); 2532 break; 2533 case NeverFallThroughOrReturn: 2534 if (ReturnsVoid && !HasNoReturn) 2535 Diag(Compound->getLBracLoc(), diag::warn_suggest_noreturn_function); 2536 break; 2537 case NeverFallThrough: 2538 break; 2539 } 2540 } 2541} 2542 2543/// CheckFallThroughForBlock - Check that we don't fall off the end of a block 2544/// that should return a value. Check that we don't fall off the end of a 2545/// noreturn block. We assume that functions and blocks not marked noreturn 2546/// will return. 2547void Sema::CheckFallThroughForBlock(QualType BlockTy, Stmt *Body, 2548 AnalysisContext &AC) { 2549 // FIXME: Would be nice if we had a better way to control cascading errors, 2550 // but for now, avoid them. The problem is that when Parse sees: 2551 // int foo() { return a; } 2552 // The return is eaten and the Sema code sees just: 2553 // int foo() { } 2554 // which this code would then warn about. 2555 if (getDiagnostics().hasErrorOccurred()) 2556 return; 2557 bool ReturnsVoid = false; 2558 bool HasNoReturn = false; 2559 if (const FunctionType *FT =BlockTy->getPointeeType()->getAs<FunctionType>()){ 2560 if (FT->getResultType()->isVoidType()) 2561 ReturnsVoid = true; 2562 if (FT->getNoReturnAttr()) 2563 HasNoReturn = true; 2564 } 2565 2566 // Short circuit for compilation speed. 2567 if (ReturnsVoid 2568 && !HasNoReturn 2569 && (Diags.getDiagnosticLevel(diag::warn_suggest_noreturn_block) 2570 == Diagnostic::Ignored || !ReturnsVoid)) 2571 return; 2572 // FIXME: Funtion try block 2573 if (CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body)) { 2574 switch (CheckFallThrough(AC)) { 2575 case MaybeFallThrough: 2576 if (HasNoReturn) 2577 Diag(Compound->getRBracLoc(), diag::err_noreturn_block_has_return_expr); 2578 else if (!ReturnsVoid) 2579 Diag(Compound->getRBracLoc(), diag::err_maybe_falloff_nonvoid_block); 2580 break; 2581 case AlwaysFallThrough: 2582 if (HasNoReturn) 2583 Diag(Compound->getRBracLoc(), diag::err_noreturn_block_has_return_expr); 2584 else if (!ReturnsVoid) 2585 Diag(Compound->getRBracLoc(), diag::err_falloff_nonvoid_block); 2586 break; 2587 case NeverFallThroughOrReturn: 2588 if (ReturnsVoid) 2589 Diag(Compound->getLBracLoc(), diag::warn_suggest_noreturn_block); 2590 break; 2591 case NeverFallThrough: 2592 break; 2593 } 2594 } 2595} 2596 2597/// CheckParmsForFunctionDef - Check that the parameters of the given 2598/// function are appropriate for the definition of a function. This 2599/// takes care of any checks that cannot be performed on the 2600/// declaration itself, e.g., that the types of each of the function 2601/// parameters are complete. 2602bool Sema::CheckParmsForFunctionDef(FunctionDecl *FD) { 2603 bool HasInvalidParm = false; 2604 for (unsigned p = 0, NumParams = FD->getNumParams(); p < NumParams; ++p) { 2605 ParmVarDecl *Param = FD->getParamDecl(p); 2606 2607 // C99 6.7.5.3p4: the parameters in a parameter type list in a 2608 // function declarator that is part of a function definition of 2609 // that function shall not have incomplete type. 2610 // 2611 // This is also C++ [dcl.fct]p6. 2612 if (!Param->isInvalidDecl() && 2613 RequireCompleteType(Param->getLocation(), Param->getType(), 2614 diag::err_typecheck_decl_incomplete_type)) { 2615 Param->setInvalidDecl(); 2616 HasInvalidParm = true; 2617 } 2618 2619 // C99 6.9.1p5: If the declarator includes a parameter type list, the 2620 // declaration of each parameter shall include an identifier. 2621 if (Param->getIdentifier() == 0 && 2622 !Param->isImplicit() && 2623 !getLangOptions().CPlusPlus) 2624 Diag(Param->getLocation(), diag::err_parameter_name_omitted); 2625 2626 // C99 6.7.5.3p12: 2627 // If the function declarator is not part of a definition of that 2628 // function, parameters may have incomplete type and may use the [*] 2629 // notation in their sequences of declarator specifiers to specify 2630 // variable length array types. 2631 QualType PType = Param->getOriginalType(); 2632 if (const ArrayType *AT = Context.getAsArrayType(PType)) { 2633 if (AT->getSizeModifier() == ArrayType::Star) { 2634 // FIXME: This diagnosic should point the the '[*]' if source-location 2635 // information is added for it. 2636 Diag(Param->getLocation(), diag::err_array_star_in_function_definition); 2637 } 2638 } 2639 2640 if (getLangOptions().CPlusPlus) 2641 if (const RecordType *RT = Param->getType()->getAs<RecordType>()) 2642 FinalizeVarWithDestructor(Param, RT); 2643 } 2644 2645 return HasInvalidParm; 2646} 2647