SemaChecking.cpp revision ce7024e8a3793b05861a4904ecdb1272924ada14
1//===--- SemaChecking.cpp - Extra Semantic Checking -----------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements extra semantic analysis beyond what is enforced 11// by the C type system. 12// 13//===----------------------------------------------------------------------===// 14 15#include "Sema.h" 16#include "clang/Analysis/CFG.h" 17#include "clang/Analysis/AnalysisContext.h" 18#include "clang/AST/ASTContext.h" 19#include "clang/AST/CharUnits.h" 20#include "clang/AST/DeclObjC.h" 21#include "clang/AST/ExprCXX.h" 22#include "clang/AST/ExprObjC.h" 23#include "clang/AST/DeclObjC.h" 24#include "clang/AST/StmtCXX.h" 25#include "clang/AST/StmtObjC.h" 26#include "clang/Lex/LiteralSupport.h" 27#include "clang/Lex/Preprocessor.h" 28#include "llvm/ADT/BitVector.h" 29#include "llvm/ADT/STLExtras.h" 30#include <limits> 31#include <queue> 32using namespace clang; 33 34/// getLocationOfStringLiteralByte - Return a source location that points to the 35/// specified byte of the specified string literal. 36/// 37/// Strings are amazingly complex. They can be formed from multiple tokens and 38/// can have escape sequences in them in addition to the usual trigraph and 39/// escaped newline business. This routine handles this complexity. 40/// 41SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 42 unsigned ByteNo) const { 43 assert(!SL->isWide() && "This doesn't work for wide strings yet"); 44 45 // Loop over all of the tokens in this string until we find the one that 46 // contains the byte we're looking for. 47 unsigned TokNo = 0; 48 while (1) { 49 assert(TokNo < SL->getNumConcatenated() && "Invalid byte number!"); 50 SourceLocation StrTokLoc = SL->getStrTokenLoc(TokNo); 51 52 // Get the spelling of the string so that we can get the data that makes up 53 // the string literal, not the identifier for the macro it is potentially 54 // expanded through. 55 SourceLocation StrTokSpellingLoc = SourceMgr.getSpellingLoc(StrTokLoc); 56 57 // Re-lex the token to get its length and original spelling. 58 std::pair<FileID, unsigned> LocInfo = 59 SourceMgr.getDecomposedLoc(StrTokSpellingLoc); 60 std::pair<const char *,const char *> Buffer = 61 SourceMgr.getBufferData(LocInfo.first); 62 const char *StrData = Buffer.first+LocInfo.second; 63 64 // Create a langops struct and enable trigraphs. This is sufficient for 65 // relexing tokens. 66 LangOptions LangOpts; 67 LangOpts.Trigraphs = true; 68 69 // Create a lexer starting at the beginning of this token. 70 Lexer TheLexer(StrTokSpellingLoc, LangOpts, Buffer.first, StrData, 71 Buffer.second); 72 Token TheTok; 73 TheLexer.LexFromRawLexer(TheTok); 74 75 // Use the StringLiteralParser to compute the length of the string in bytes. 76 StringLiteralParser SLP(&TheTok, 1, PP); 77 unsigned TokNumBytes = SLP.GetStringLength(); 78 79 // If the byte is in this token, return the location of the byte. 80 if (ByteNo < TokNumBytes || 81 (ByteNo == TokNumBytes && TokNo == SL->getNumConcatenated())) { 82 unsigned Offset = 83 StringLiteralParser::getOffsetOfStringByte(TheTok, ByteNo, PP); 84 85 // Now that we know the offset of the token in the spelling, use the 86 // preprocessor to get the offset in the original source. 87 return PP.AdvanceToTokenCharacter(StrTokLoc, Offset); 88 } 89 90 // Move to the next string token. 91 ++TokNo; 92 ByteNo -= TokNumBytes; 93 } 94} 95 96/// CheckablePrintfAttr - does a function call have a "printf" attribute 97/// and arguments that merit checking? 98bool Sema::CheckablePrintfAttr(const FormatAttr *Format, CallExpr *TheCall) { 99 if (Format->getType() == "printf") return true; 100 if (Format->getType() == "printf0") { 101 // printf0 allows null "format" string; if so don't check format/args 102 unsigned format_idx = Format->getFormatIdx() - 1; 103 // Does the index refer to the implicit object argument? 104 if (isa<CXXMemberCallExpr>(TheCall)) { 105 if (format_idx == 0) 106 return false; 107 --format_idx; 108 } 109 if (format_idx < TheCall->getNumArgs()) { 110 Expr *Format = TheCall->getArg(format_idx)->IgnoreParenCasts(); 111 if (!Format->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) 112 return true; 113 } 114 } 115 return false; 116} 117 118Action::OwningExprResult 119Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 120 OwningExprResult TheCallResult(Owned(TheCall)); 121 122 switch (BuiltinID) { 123 case Builtin::BI__builtin___CFStringMakeConstantString: 124 assert(TheCall->getNumArgs() == 1 && 125 "Wrong # arguments to builtin CFStringMakeConstantString"); 126 if (CheckObjCString(TheCall->getArg(0))) 127 return ExprError(); 128 break; 129 case Builtin::BI__builtin_stdarg_start: 130 case Builtin::BI__builtin_va_start: 131 if (SemaBuiltinVAStart(TheCall)) 132 return ExprError(); 133 break; 134 case Builtin::BI__builtin_isgreater: 135 case Builtin::BI__builtin_isgreaterequal: 136 case Builtin::BI__builtin_isless: 137 case Builtin::BI__builtin_islessequal: 138 case Builtin::BI__builtin_islessgreater: 139 case Builtin::BI__builtin_isunordered: 140 if (SemaBuiltinUnorderedCompare(TheCall)) 141 return ExprError(); 142 break; 143 case Builtin::BI__builtin_isfinite: 144 case Builtin::BI__builtin_isinf: 145 case Builtin::BI__builtin_isinf_sign: 146 case Builtin::BI__builtin_isnan: 147 case Builtin::BI__builtin_isnormal: 148 if (SemaBuiltinUnaryFP(TheCall)) 149 return ExprError(); 150 break; 151 case Builtin::BI__builtin_return_address: 152 case Builtin::BI__builtin_frame_address: 153 if (SemaBuiltinStackAddress(TheCall)) 154 return ExprError(); 155 break; 156 case Builtin::BI__builtin_eh_return_data_regno: 157 if (SemaBuiltinEHReturnDataRegNo(TheCall)) 158 return ExprError(); 159 break; 160 case Builtin::BI__builtin_shufflevector: 161 return SemaBuiltinShuffleVector(TheCall); 162 // TheCall will be freed by the smart pointer here, but that's fine, since 163 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 164 case Builtin::BI__builtin_prefetch: 165 if (SemaBuiltinPrefetch(TheCall)) 166 return ExprError(); 167 break; 168 case Builtin::BI__builtin_object_size: 169 if (SemaBuiltinObjectSize(TheCall)) 170 return ExprError(); 171 break; 172 case Builtin::BI__builtin_longjmp: 173 if (SemaBuiltinLongjmp(TheCall)) 174 return ExprError(); 175 break; 176 case Builtin::BI__sync_fetch_and_add: 177 case Builtin::BI__sync_fetch_and_sub: 178 case Builtin::BI__sync_fetch_and_or: 179 case Builtin::BI__sync_fetch_and_and: 180 case Builtin::BI__sync_fetch_and_xor: 181 case Builtin::BI__sync_fetch_and_nand: 182 case Builtin::BI__sync_add_and_fetch: 183 case Builtin::BI__sync_sub_and_fetch: 184 case Builtin::BI__sync_and_and_fetch: 185 case Builtin::BI__sync_or_and_fetch: 186 case Builtin::BI__sync_xor_and_fetch: 187 case Builtin::BI__sync_nand_and_fetch: 188 case Builtin::BI__sync_val_compare_and_swap: 189 case Builtin::BI__sync_bool_compare_and_swap: 190 case Builtin::BI__sync_lock_test_and_set: 191 case Builtin::BI__sync_lock_release: 192 if (SemaBuiltinAtomicOverloaded(TheCall)) 193 return ExprError(); 194 break; 195 } 196 197 return move(TheCallResult); 198} 199 200/// CheckFunctionCall - Check a direct function call for various correctness 201/// and safety properties not strictly enforced by the C type system. 202bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall) { 203 // Get the IdentifierInfo* for the called function. 204 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 205 206 // None of the checks below are needed for functions that don't have 207 // simple names (e.g., C++ conversion functions). 208 if (!FnInfo) 209 return false; 210 211 // FIXME: This mechanism should be abstracted to be less fragile and 212 // more efficient. For example, just map function ids to custom 213 // handlers. 214 215 // Printf checking. 216 if (const FormatAttr *Format = FDecl->getAttr<FormatAttr>()) { 217 if (CheckablePrintfAttr(Format, TheCall)) { 218 bool HasVAListArg = Format->getFirstArg() == 0; 219 if (!HasVAListArg) { 220 if (const FunctionProtoType *Proto 221 = FDecl->getType()->getAs<FunctionProtoType>()) 222 HasVAListArg = !Proto->isVariadic(); 223 } 224 CheckPrintfArguments(TheCall, HasVAListArg, Format->getFormatIdx() - 1, 225 HasVAListArg ? 0 : Format->getFirstArg() - 1); 226 } 227 } 228 229 for (const NonNullAttr *NonNull = FDecl->getAttr<NonNullAttr>(); NonNull; 230 NonNull = NonNull->getNext<NonNullAttr>()) 231 CheckNonNullArguments(NonNull, TheCall); 232 233 return false; 234} 235 236bool Sema::CheckBlockCall(NamedDecl *NDecl, CallExpr *TheCall) { 237 // Printf checking. 238 const FormatAttr *Format = NDecl->getAttr<FormatAttr>(); 239 if (!Format) 240 return false; 241 242 const VarDecl *V = dyn_cast<VarDecl>(NDecl); 243 if (!V) 244 return false; 245 246 QualType Ty = V->getType(); 247 if (!Ty->isBlockPointerType()) 248 return false; 249 250 if (!CheckablePrintfAttr(Format, TheCall)) 251 return false; 252 253 bool HasVAListArg = Format->getFirstArg() == 0; 254 if (!HasVAListArg) { 255 const FunctionType *FT = 256 Ty->getAs<BlockPointerType>()->getPointeeType()->getAs<FunctionType>(); 257 if (const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FT)) 258 HasVAListArg = !Proto->isVariadic(); 259 } 260 CheckPrintfArguments(TheCall, HasVAListArg, Format->getFormatIdx() - 1, 261 HasVAListArg ? 0 : Format->getFirstArg() - 1); 262 263 return false; 264} 265 266/// SemaBuiltinAtomicOverloaded - We have a call to a function like 267/// __sync_fetch_and_add, which is an overloaded function based on the pointer 268/// type of its first argument. The main ActOnCallExpr routines have already 269/// promoted the types of arguments because all of these calls are prototyped as 270/// void(...). 271/// 272/// This function goes through and does final semantic checking for these 273/// builtins, 274bool Sema::SemaBuiltinAtomicOverloaded(CallExpr *TheCall) { 275 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 276 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 277 278 // Ensure that we have at least one argument to do type inference from. 279 if (TheCall->getNumArgs() < 1) 280 return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args) 281 << 0 << TheCall->getCallee()->getSourceRange(); 282 283 // Inspect the first argument of the atomic builtin. This should always be 284 // a pointer type, whose element is an integral scalar or pointer type. 285 // Because it is a pointer type, we don't have to worry about any implicit 286 // casts here. 287 Expr *FirstArg = TheCall->getArg(0); 288 if (!FirstArg->getType()->isPointerType()) 289 return Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer) 290 << FirstArg->getType() << FirstArg->getSourceRange(); 291 292 QualType ValType = FirstArg->getType()->getAs<PointerType>()->getPointeeType(); 293 if (!ValType->isIntegerType() && !ValType->isPointerType() && 294 !ValType->isBlockPointerType()) 295 return Diag(DRE->getLocStart(), 296 diag::err_atomic_builtin_must_be_pointer_intptr) 297 << FirstArg->getType() << FirstArg->getSourceRange(); 298 299 // We need to figure out which concrete builtin this maps onto. For example, 300 // __sync_fetch_and_add with a 2 byte object turns into 301 // __sync_fetch_and_add_2. 302#define BUILTIN_ROW(x) \ 303 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 304 Builtin::BI##x##_8, Builtin::BI##x##_16 } 305 306 static const unsigned BuiltinIndices[][5] = { 307 BUILTIN_ROW(__sync_fetch_and_add), 308 BUILTIN_ROW(__sync_fetch_and_sub), 309 BUILTIN_ROW(__sync_fetch_and_or), 310 BUILTIN_ROW(__sync_fetch_and_and), 311 BUILTIN_ROW(__sync_fetch_and_xor), 312 BUILTIN_ROW(__sync_fetch_and_nand), 313 314 BUILTIN_ROW(__sync_add_and_fetch), 315 BUILTIN_ROW(__sync_sub_and_fetch), 316 BUILTIN_ROW(__sync_and_and_fetch), 317 BUILTIN_ROW(__sync_or_and_fetch), 318 BUILTIN_ROW(__sync_xor_and_fetch), 319 BUILTIN_ROW(__sync_nand_and_fetch), 320 321 BUILTIN_ROW(__sync_val_compare_and_swap), 322 BUILTIN_ROW(__sync_bool_compare_and_swap), 323 BUILTIN_ROW(__sync_lock_test_and_set), 324 BUILTIN_ROW(__sync_lock_release) 325 }; 326#undef BUILTIN_ROW 327 328 // Determine the index of the size. 329 unsigned SizeIndex; 330 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 331 case 1: SizeIndex = 0; break; 332 case 2: SizeIndex = 1; break; 333 case 4: SizeIndex = 2; break; 334 case 8: SizeIndex = 3; break; 335 case 16: SizeIndex = 4; break; 336 default: 337 return Diag(DRE->getLocStart(), diag::err_atomic_builtin_pointer_size) 338 << FirstArg->getType() << FirstArg->getSourceRange(); 339 } 340 341 // Each of these builtins has one pointer argument, followed by some number of 342 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 343 // that we ignore. Find out which row of BuiltinIndices to read from as well 344 // as the number of fixed args. 345 unsigned BuiltinID = FDecl->getBuiltinID(); 346 unsigned BuiltinIndex, NumFixed = 1; 347 switch (BuiltinID) { 348 default: assert(0 && "Unknown overloaded atomic builtin!"); 349 case Builtin::BI__sync_fetch_and_add: BuiltinIndex = 0; break; 350 case Builtin::BI__sync_fetch_and_sub: BuiltinIndex = 1; break; 351 case Builtin::BI__sync_fetch_and_or: BuiltinIndex = 2; break; 352 case Builtin::BI__sync_fetch_and_and: BuiltinIndex = 3; break; 353 case Builtin::BI__sync_fetch_and_xor: BuiltinIndex = 4; break; 354 case Builtin::BI__sync_fetch_and_nand:BuiltinIndex = 5; break; 355 356 case Builtin::BI__sync_add_and_fetch: BuiltinIndex = 6; break; 357 case Builtin::BI__sync_sub_and_fetch: BuiltinIndex = 7; break; 358 case Builtin::BI__sync_and_and_fetch: BuiltinIndex = 8; break; 359 case Builtin::BI__sync_or_and_fetch: BuiltinIndex = 9; break; 360 case Builtin::BI__sync_xor_and_fetch: BuiltinIndex =10; break; 361 case Builtin::BI__sync_nand_and_fetch:BuiltinIndex =11; break; 362 363 case Builtin::BI__sync_val_compare_and_swap: 364 BuiltinIndex = 12; 365 NumFixed = 2; 366 break; 367 case Builtin::BI__sync_bool_compare_and_swap: 368 BuiltinIndex = 13; 369 NumFixed = 2; 370 break; 371 case Builtin::BI__sync_lock_test_and_set: BuiltinIndex = 14; break; 372 case Builtin::BI__sync_lock_release: 373 BuiltinIndex = 15; 374 NumFixed = 0; 375 break; 376 } 377 378 // Now that we know how many fixed arguments we expect, first check that we 379 // have at least that many. 380 if (TheCall->getNumArgs() < 1+NumFixed) 381 return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args) 382 << 0 << TheCall->getCallee()->getSourceRange(); 383 384 385 // Get the decl for the concrete builtin from this, we can tell what the 386 // concrete integer type we should convert to is. 387 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 388 const char *NewBuiltinName = Context.BuiltinInfo.GetName(NewBuiltinID); 389 IdentifierInfo *NewBuiltinII = PP.getIdentifierInfo(NewBuiltinName); 390 FunctionDecl *NewBuiltinDecl = 391 cast<FunctionDecl>(LazilyCreateBuiltin(NewBuiltinII, NewBuiltinID, 392 TUScope, false, DRE->getLocStart())); 393 const FunctionProtoType *BuiltinFT = 394 NewBuiltinDecl->getType()->getAs<FunctionProtoType>(); 395 ValType = BuiltinFT->getArgType(0)->getAs<PointerType>()->getPointeeType(); 396 397 // If the first type needs to be converted (e.g. void** -> int*), do it now. 398 if (BuiltinFT->getArgType(0) != FirstArg->getType()) { 399 ImpCastExprToType(FirstArg, BuiltinFT->getArgType(0), CastExpr::CK_BitCast); 400 TheCall->setArg(0, FirstArg); 401 } 402 403 // Next, walk the valid ones promoting to the right type. 404 for (unsigned i = 0; i != NumFixed; ++i) { 405 Expr *Arg = TheCall->getArg(i+1); 406 407 // If the argument is an implicit cast, then there was a promotion due to 408 // "...", just remove it now. 409 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) { 410 Arg = ICE->getSubExpr(); 411 ICE->setSubExpr(0); 412 ICE->Destroy(Context); 413 TheCall->setArg(i+1, Arg); 414 } 415 416 // GCC does an implicit conversion to the pointer or integer ValType. This 417 // can fail in some cases (1i -> int**), check for this error case now. 418 CastExpr::CastKind Kind = CastExpr::CK_Unknown; 419 CXXMethodDecl *ConversionDecl = 0; 420 if (CheckCastTypes(Arg->getSourceRange(), ValType, Arg, Kind, 421 ConversionDecl)) 422 return true; 423 424 // Okay, we have something that *can* be converted to the right type. Check 425 // to see if there is a potentially weird extension going on here. This can 426 // happen when you do an atomic operation on something like an char* and 427 // pass in 42. The 42 gets converted to char. This is even more strange 428 // for things like 45.123 -> char, etc. 429 // FIXME: Do this check. 430 ImpCastExprToType(Arg, ValType, Kind, /*isLvalue=*/false); 431 TheCall->setArg(i+1, Arg); 432 } 433 434 // Switch the DeclRefExpr to refer to the new decl. 435 DRE->setDecl(NewBuiltinDecl); 436 DRE->setType(NewBuiltinDecl->getType()); 437 438 // Set the callee in the CallExpr. 439 // FIXME: This leaks the original parens and implicit casts. 440 Expr *PromotedCall = DRE; 441 UsualUnaryConversions(PromotedCall); 442 TheCall->setCallee(PromotedCall); 443 444 445 // Change the result type of the call to match the result type of the decl. 446 TheCall->setType(NewBuiltinDecl->getResultType()); 447 return false; 448} 449 450 451/// CheckObjCString - Checks that the argument to the builtin 452/// CFString constructor is correct 453/// FIXME: GCC currently emits the following warning: 454/// "warning: input conversion stopped due to an input byte that does not 455/// belong to the input codeset UTF-8" 456/// Note: It might also make sense to do the UTF-16 conversion here (would 457/// simplify the backend). 458bool Sema::CheckObjCString(Expr *Arg) { 459 Arg = Arg->IgnoreParenCasts(); 460 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 461 462 if (!Literal || Literal->isWide()) { 463 Diag(Arg->getLocStart(), diag::err_cfstring_literal_not_string_constant) 464 << Arg->getSourceRange(); 465 return true; 466 } 467 468 const char *Data = Literal->getStrData(); 469 unsigned Length = Literal->getByteLength(); 470 471 for (unsigned i = 0; i < Length; ++i) { 472 if (!Data[i]) { 473 Diag(getLocationOfStringLiteralByte(Literal, i), 474 diag::warn_cfstring_literal_contains_nul_character) 475 << Arg->getSourceRange(); 476 break; 477 } 478 } 479 480 return false; 481} 482 483/// SemaBuiltinVAStart - Check the arguments to __builtin_va_start for validity. 484/// Emit an error and return true on failure, return false on success. 485bool Sema::SemaBuiltinVAStart(CallExpr *TheCall) { 486 Expr *Fn = TheCall->getCallee(); 487 if (TheCall->getNumArgs() > 2) { 488 Diag(TheCall->getArg(2)->getLocStart(), 489 diag::err_typecheck_call_too_many_args) 490 << 0 /*function call*/ << Fn->getSourceRange() 491 << SourceRange(TheCall->getArg(2)->getLocStart(), 492 (*(TheCall->arg_end()-1))->getLocEnd()); 493 return true; 494 } 495 496 if (TheCall->getNumArgs() < 2) { 497 return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args) 498 << 0 /*function call*/; 499 } 500 501 // Determine whether the current function is variadic or not. 502 bool isVariadic; 503 if (CurBlock) 504 isVariadic = CurBlock->isVariadic; 505 else if (getCurFunctionDecl()) { 506 if (FunctionProtoType* FTP = 507 dyn_cast<FunctionProtoType>(getCurFunctionDecl()->getType())) 508 isVariadic = FTP->isVariadic(); 509 else 510 isVariadic = false; 511 } else { 512 isVariadic = getCurMethodDecl()->isVariadic(); 513 } 514 515 if (!isVariadic) { 516 Diag(Fn->getLocStart(), diag::err_va_start_used_in_non_variadic_function); 517 return true; 518 } 519 520 // Verify that the second argument to the builtin is the last argument of the 521 // current function or method. 522 bool SecondArgIsLastNamedArgument = false; 523 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 524 525 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 526 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 527 // FIXME: This isn't correct for methods (results in bogus warning). 528 // Get the last formal in the current function. 529 const ParmVarDecl *LastArg; 530 if (CurBlock) 531 LastArg = *(CurBlock->TheDecl->param_end()-1); 532 else if (FunctionDecl *FD = getCurFunctionDecl()) 533 LastArg = *(FD->param_end()-1); 534 else 535 LastArg = *(getCurMethodDecl()->param_end()-1); 536 SecondArgIsLastNamedArgument = PV == LastArg; 537 } 538 } 539 540 if (!SecondArgIsLastNamedArgument) 541 Diag(TheCall->getArg(1)->getLocStart(), 542 diag::warn_second_parameter_of_va_start_not_last_named_argument); 543 return false; 544} 545 546/// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 547/// friends. This is declared to take (...), so we have to check everything. 548bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 549 if (TheCall->getNumArgs() < 2) 550 return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args) 551 << 0 /*function call*/; 552 if (TheCall->getNumArgs() > 2) 553 return Diag(TheCall->getArg(2)->getLocStart(), 554 diag::err_typecheck_call_too_many_args) 555 << 0 /*function call*/ 556 << SourceRange(TheCall->getArg(2)->getLocStart(), 557 (*(TheCall->arg_end()-1))->getLocEnd()); 558 559 Expr *OrigArg0 = TheCall->getArg(0); 560 Expr *OrigArg1 = TheCall->getArg(1); 561 562 // Do standard promotions between the two arguments, returning their common 563 // type. 564 QualType Res = UsualArithmeticConversions(OrigArg0, OrigArg1, false); 565 566 // Make sure any conversions are pushed back into the call; this is 567 // type safe since unordered compare builtins are declared as "_Bool 568 // foo(...)". 569 TheCall->setArg(0, OrigArg0); 570 TheCall->setArg(1, OrigArg1); 571 572 if (OrigArg0->isTypeDependent() || OrigArg1->isTypeDependent()) 573 return false; 574 575 // If the common type isn't a real floating type, then the arguments were 576 // invalid for this operation. 577 if (!Res->isRealFloatingType()) 578 return Diag(OrigArg0->getLocStart(), 579 diag::err_typecheck_call_invalid_ordered_compare) 580 << OrigArg0->getType() << OrigArg1->getType() 581 << SourceRange(OrigArg0->getLocStart(), OrigArg1->getLocEnd()); 582 583 return false; 584} 585 586/// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isnan and 587/// friends. This is declared to take (...), so we have to check everything. 588bool Sema::SemaBuiltinUnaryFP(CallExpr *TheCall) { 589 if (TheCall->getNumArgs() < 1) 590 return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args) 591 << 0 /*function call*/; 592 if (TheCall->getNumArgs() > 1) 593 return Diag(TheCall->getArg(1)->getLocStart(), 594 diag::err_typecheck_call_too_many_args) 595 << 0 /*function call*/ 596 << SourceRange(TheCall->getArg(1)->getLocStart(), 597 (*(TheCall->arg_end()-1))->getLocEnd()); 598 599 Expr *OrigArg = TheCall->getArg(0); 600 601 if (OrigArg->isTypeDependent()) 602 return false; 603 604 // This operation requires a floating-point number 605 if (!OrigArg->getType()->isRealFloatingType()) 606 return Diag(OrigArg->getLocStart(), 607 diag::err_typecheck_call_invalid_unary_fp) 608 << OrigArg->getType() << OrigArg->getSourceRange(); 609 610 return false; 611} 612 613bool Sema::SemaBuiltinStackAddress(CallExpr *TheCall) { 614 // The signature for these builtins is exact; the only thing we need 615 // to check is that the argument is a constant. 616 SourceLocation Loc; 617 if (!TheCall->getArg(0)->isTypeDependent() && 618 !TheCall->getArg(0)->isValueDependent() && 619 !TheCall->getArg(0)->isIntegerConstantExpr(Context, &Loc)) 620 return Diag(Loc, diag::err_stack_const_level) << TheCall->getSourceRange(); 621 622 return false; 623} 624 625/// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 626// This is declared to take (...), so we have to check everything. 627Action::OwningExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 628 if (TheCall->getNumArgs() < 3) 629 return ExprError(Diag(TheCall->getLocEnd(), 630 diag::err_typecheck_call_too_few_args) 631 << 0 /*function call*/ << TheCall->getSourceRange()); 632 633 unsigned numElements = std::numeric_limits<unsigned>::max(); 634 if (!TheCall->getArg(0)->isTypeDependent() && 635 !TheCall->getArg(1)->isTypeDependent()) { 636 QualType FAType = TheCall->getArg(0)->getType(); 637 QualType SAType = TheCall->getArg(1)->getType(); 638 639 if (!FAType->isVectorType() || !SAType->isVectorType()) { 640 Diag(TheCall->getLocStart(), diag::err_shufflevector_non_vector) 641 << SourceRange(TheCall->getArg(0)->getLocStart(), 642 TheCall->getArg(1)->getLocEnd()); 643 return ExprError(); 644 } 645 646 if (!Context.hasSameUnqualifiedType(FAType, SAType)) { 647 Diag(TheCall->getLocStart(), diag::err_shufflevector_incompatible_vector) 648 << SourceRange(TheCall->getArg(0)->getLocStart(), 649 TheCall->getArg(1)->getLocEnd()); 650 return ExprError(); 651 } 652 653 numElements = FAType->getAs<VectorType>()->getNumElements(); 654 if (TheCall->getNumArgs() != numElements+2) { 655 if (TheCall->getNumArgs() < numElements+2) 656 return ExprError(Diag(TheCall->getLocEnd(), 657 diag::err_typecheck_call_too_few_args) 658 << 0 /*function call*/ << TheCall->getSourceRange()); 659 return ExprError(Diag(TheCall->getLocEnd(), 660 diag::err_typecheck_call_too_many_args) 661 << 0 /*function call*/ << TheCall->getSourceRange()); 662 } 663 } 664 665 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 666 if (TheCall->getArg(i)->isTypeDependent() || 667 TheCall->getArg(i)->isValueDependent()) 668 continue; 669 670 llvm::APSInt Result(32); 671 if (!TheCall->getArg(i)->isIntegerConstantExpr(Result, Context)) 672 return ExprError(Diag(TheCall->getLocStart(), 673 diag::err_shufflevector_nonconstant_argument) 674 << TheCall->getArg(i)->getSourceRange()); 675 676 if (Result.getActiveBits() > 64 || Result.getZExtValue() >= numElements*2) 677 return ExprError(Diag(TheCall->getLocStart(), 678 diag::err_shufflevector_argument_too_large) 679 << TheCall->getArg(i)->getSourceRange()); 680 } 681 682 llvm::SmallVector<Expr*, 32> exprs; 683 684 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 685 exprs.push_back(TheCall->getArg(i)); 686 TheCall->setArg(i, 0); 687 } 688 689 return Owned(new (Context) ShuffleVectorExpr(Context, exprs.begin(), 690 exprs.size(), exprs[0]->getType(), 691 TheCall->getCallee()->getLocStart(), 692 TheCall->getRParenLoc())); 693} 694 695/// SemaBuiltinPrefetch - Handle __builtin_prefetch. 696// This is declared to take (const void*, ...) and can take two 697// optional constant int args. 698bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 699 unsigned NumArgs = TheCall->getNumArgs(); 700 701 if (NumArgs > 3) 702 return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_many_args) 703 << 0 /*function call*/ << TheCall->getSourceRange(); 704 705 // Argument 0 is checked for us and the remaining arguments must be 706 // constant integers. 707 for (unsigned i = 1; i != NumArgs; ++i) { 708 Expr *Arg = TheCall->getArg(i); 709 if (Arg->isTypeDependent()) 710 continue; 711 712 if (!Arg->getType()->isIntegralType()) 713 return Diag(TheCall->getLocStart(), diag::err_prefetch_invalid_arg_type) 714 << Arg->getSourceRange(); 715 716 ImpCastExprToType(Arg, Context.IntTy, CastExpr::CK_IntegralCast); 717 TheCall->setArg(i, Arg); 718 719 if (Arg->isValueDependent()) 720 continue; 721 722 llvm::APSInt Result; 723 if (!Arg->isIntegerConstantExpr(Result, Context)) 724 return Diag(TheCall->getLocStart(), diag::err_prefetch_invalid_arg_ice) 725 << SourceRange(Arg->getLocStart(), Arg->getLocEnd()); 726 727 // FIXME: gcc issues a warning and rewrites these to 0. These 728 // seems especially odd for the third argument since the default 729 // is 3. 730 if (i == 1) { 731 if (Result.getLimitedValue() > 1) 732 return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range) 733 << "0" << "1" << Arg->getSourceRange(); 734 } else { 735 if (Result.getLimitedValue() > 3) 736 return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range) 737 << "0" << "3" << Arg->getSourceRange(); 738 } 739 } 740 741 return false; 742} 743 744/// SemaBuiltinEHReturnDataRegNo - Handle __builtin_eh_return_data_regno, the 745/// operand must be an integer constant. 746bool Sema::SemaBuiltinEHReturnDataRegNo(CallExpr *TheCall) { 747 llvm::APSInt Result; 748 if (!TheCall->getArg(0)->isIntegerConstantExpr(Result, Context)) 749 return Diag(TheCall->getLocStart(), diag::err_expr_not_ice) 750 << TheCall->getArg(0)->getSourceRange(); 751 752 return false; 753} 754 755 756/// SemaBuiltinObjectSize - Handle __builtin_object_size(void *ptr, 757/// int type). This simply type checks that type is one of the defined 758/// constants (0-3). 759// For compatability check 0-3, llvm only handles 0 and 2. 760bool Sema::SemaBuiltinObjectSize(CallExpr *TheCall) { 761 Expr *Arg = TheCall->getArg(1); 762 if (Arg->isTypeDependent()) 763 return false; 764 765 QualType ArgType = Arg->getType(); 766 const BuiltinType *BT = ArgType->getAs<BuiltinType>(); 767 llvm::APSInt Result(32); 768 if (!BT || BT->getKind() != BuiltinType::Int) 769 return Diag(TheCall->getLocStart(), diag::err_object_size_invalid_argument) 770 << SourceRange(Arg->getLocStart(), Arg->getLocEnd()); 771 772 if (Arg->isValueDependent()) 773 return false; 774 775 if (!Arg->isIntegerConstantExpr(Result, Context)) { 776 return Diag(TheCall->getLocStart(), diag::err_object_size_invalid_argument) 777 << SourceRange(Arg->getLocStart(), Arg->getLocEnd()); 778 } 779 780 if (Result.getSExtValue() < 0 || Result.getSExtValue() > 3) { 781 return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range) 782 << "0" << "3" << SourceRange(Arg->getLocStart(), Arg->getLocEnd()); 783 } 784 785 return false; 786} 787 788/// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 789/// This checks that val is a constant 1. 790bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 791 Expr *Arg = TheCall->getArg(1); 792 if (Arg->isTypeDependent() || Arg->isValueDependent()) 793 return false; 794 795 llvm::APSInt Result(32); 796 if (!Arg->isIntegerConstantExpr(Result, Context) || Result != 1) 797 return Diag(TheCall->getLocStart(), diag::err_builtin_longjmp_invalid_val) 798 << SourceRange(Arg->getLocStart(), Arg->getLocEnd()); 799 800 return false; 801} 802 803// Handle i > 1 ? "x" : "y", recursivelly 804bool Sema::SemaCheckStringLiteral(const Expr *E, const CallExpr *TheCall, 805 bool HasVAListArg, 806 unsigned format_idx, unsigned firstDataArg) { 807 if (E->isTypeDependent() || E->isValueDependent()) 808 return false; 809 810 switch (E->getStmtClass()) { 811 case Stmt::ConditionalOperatorClass: { 812 const ConditionalOperator *C = cast<ConditionalOperator>(E); 813 return SemaCheckStringLiteral(C->getTrueExpr(), TheCall, 814 HasVAListArg, format_idx, firstDataArg) 815 && SemaCheckStringLiteral(C->getRHS(), TheCall, 816 HasVAListArg, format_idx, firstDataArg); 817 } 818 819 case Stmt::ImplicitCastExprClass: { 820 const ImplicitCastExpr *Expr = cast<ImplicitCastExpr>(E); 821 return SemaCheckStringLiteral(Expr->getSubExpr(), TheCall, HasVAListArg, 822 format_idx, firstDataArg); 823 } 824 825 case Stmt::ParenExprClass: { 826 const ParenExpr *Expr = cast<ParenExpr>(E); 827 return SemaCheckStringLiteral(Expr->getSubExpr(), TheCall, HasVAListArg, 828 format_idx, firstDataArg); 829 } 830 831 case Stmt::DeclRefExprClass: { 832 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 833 834 // As an exception, do not flag errors for variables binding to 835 // const string literals. 836 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 837 bool isConstant = false; 838 QualType T = DR->getType(); 839 840 if (const ArrayType *AT = Context.getAsArrayType(T)) { 841 isConstant = AT->getElementType().isConstant(Context); 842 } else if (const PointerType *PT = T->getAs<PointerType>()) { 843 isConstant = T.isConstant(Context) && 844 PT->getPointeeType().isConstant(Context); 845 } 846 847 if (isConstant) { 848 const VarDecl *Def = 0; 849 if (const Expr *Init = VD->getDefinition(Def)) 850 return SemaCheckStringLiteral(Init, TheCall, 851 HasVAListArg, format_idx, firstDataArg); 852 } 853 854 // For vprintf* functions (i.e., HasVAListArg==true), we add a 855 // special check to see if the format string is a function parameter 856 // of the function calling the printf function. If the function 857 // has an attribute indicating it is a printf-like function, then we 858 // should suppress warnings concerning non-literals being used in a call 859 // to a vprintf function. For example: 860 // 861 // void 862 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){ 863 // va_list ap; 864 // va_start(ap, fmt); 865 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt". 866 // ... 867 // 868 // 869 // FIXME: We don't have full attribute support yet, so just check to see 870 // if the argument is a DeclRefExpr that references a parameter. We'll 871 // add proper support for checking the attribute later. 872 if (HasVAListArg) 873 if (isa<ParmVarDecl>(VD)) 874 return true; 875 } 876 877 return false; 878 } 879 880 case Stmt::CallExprClass: { 881 const CallExpr *CE = cast<CallExpr>(E); 882 if (const ImplicitCastExpr *ICE 883 = dyn_cast<ImplicitCastExpr>(CE->getCallee())) { 884 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) { 885 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(DRE->getDecl())) { 886 if (const FormatArgAttr *FA = FD->getAttr<FormatArgAttr>()) { 887 unsigned ArgIndex = FA->getFormatIdx(); 888 const Expr *Arg = CE->getArg(ArgIndex - 1); 889 890 return SemaCheckStringLiteral(Arg, TheCall, HasVAListArg, 891 format_idx, firstDataArg); 892 } 893 } 894 } 895 } 896 897 return false; 898 } 899 case Stmt::ObjCStringLiteralClass: 900 case Stmt::StringLiteralClass: { 901 const StringLiteral *StrE = NULL; 902 903 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 904 StrE = ObjCFExpr->getString(); 905 else 906 StrE = cast<StringLiteral>(E); 907 908 if (StrE) { 909 CheckPrintfString(StrE, E, TheCall, HasVAListArg, format_idx, 910 firstDataArg); 911 return true; 912 } 913 914 return false; 915 } 916 917 default: 918 return false; 919 } 920} 921 922void 923Sema::CheckNonNullArguments(const NonNullAttr *NonNull, 924 const CallExpr *TheCall) { 925 for (NonNullAttr::iterator i = NonNull->begin(), e = NonNull->end(); 926 i != e; ++i) { 927 const Expr *ArgExpr = TheCall->getArg(*i); 928 if (ArgExpr->isNullPointerConstant(Context, 929 Expr::NPC_ValueDependentIsNotNull)) 930 Diag(TheCall->getCallee()->getLocStart(), diag::warn_null_arg) 931 << ArgExpr->getSourceRange(); 932 } 933} 934 935/// CheckPrintfArguments - Check calls to printf (and similar functions) for 936/// correct use of format strings. 937/// 938/// HasVAListArg - A predicate indicating whether the printf-like 939/// function is passed an explicit va_arg argument (e.g., vprintf) 940/// 941/// format_idx - The index into Args for the format string. 942/// 943/// Improper format strings to functions in the printf family can be 944/// the source of bizarre bugs and very serious security holes. A 945/// good source of information is available in the following paper 946/// (which includes additional references): 947/// 948/// FormatGuard: Automatic Protection From printf Format String 949/// Vulnerabilities, Proceedings of the 10th USENIX Security Symposium, 2001. 950/// 951/// Functionality implemented: 952/// 953/// We can statically check the following properties for string 954/// literal format strings for non v.*printf functions (where the 955/// arguments are passed directly): 956// 957/// (1) Are the number of format conversions equal to the number of 958/// data arguments? 959/// 960/// (2) Does each format conversion correctly match the type of the 961/// corresponding data argument? (TODO) 962/// 963/// Moreover, for all printf functions we can: 964/// 965/// (3) Check for a missing format string (when not caught by type checking). 966/// 967/// (4) Check for no-operation flags; e.g. using "#" with format 968/// conversion 'c' (TODO) 969/// 970/// (5) Check the use of '%n', a major source of security holes. 971/// 972/// (6) Check for malformed format conversions that don't specify anything. 973/// 974/// (7) Check for empty format strings. e.g: printf(""); 975/// 976/// (8) Check that the format string is a wide literal. 977/// 978/// All of these checks can be done by parsing the format string. 979/// 980/// For now, we ONLY do (1), (3), (5), (6), (7), and (8). 981void 982Sema::CheckPrintfArguments(const CallExpr *TheCall, bool HasVAListArg, 983 unsigned format_idx, unsigned firstDataArg) { 984 const Expr *Fn = TheCall->getCallee(); 985 986 // The way the format attribute works in GCC, the implicit this argument 987 // of member functions is counted. However, it doesn't appear in our own 988 // lists, so decrement format_idx in that case. 989 if (isa<CXXMemberCallExpr>(TheCall)) { 990 // Catch a format attribute mistakenly referring to the object argument. 991 if (format_idx == 0) 992 return; 993 --format_idx; 994 if(firstDataArg != 0) 995 --firstDataArg; 996 } 997 998 // CHECK: printf-like function is called with no format string. 999 if (format_idx >= TheCall->getNumArgs()) { 1000 Diag(TheCall->getRParenLoc(), diag::warn_printf_missing_format_string) 1001 << Fn->getSourceRange(); 1002 return; 1003 } 1004 1005 const Expr *OrigFormatExpr = TheCall->getArg(format_idx)->IgnoreParenCasts(); 1006 1007 // CHECK: format string is not a string literal. 1008 // 1009 // Dynamically generated format strings are difficult to 1010 // automatically vet at compile time. Requiring that format strings 1011 // are string literals: (1) permits the checking of format strings by 1012 // the compiler and thereby (2) can practically remove the source of 1013 // many format string exploits. 1014 1015 // Format string can be either ObjC string (e.g. @"%d") or 1016 // C string (e.g. "%d") 1017 // ObjC string uses the same format specifiers as C string, so we can use 1018 // the same format string checking logic for both ObjC and C strings. 1019 if (SemaCheckStringLiteral(OrigFormatExpr, TheCall, HasVAListArg, format_idx, 1020 firstDataArg)) 1021 return; // Literal format string found, check done! 1022 1023 // If there are no arguments specified, warn with -Wformat-security, otherwise 1024 // warn only with -Wformat-nonliteral. 1025 if (TheCall->getNumArgs() == format_idx+1) 1026 Diag(TheCall->getArg(format_idx)->getLocStart(), 1027 diag::warn_printf_nonliteral_noargs) 1028 << OrigFormatExpr->getSourceRange(); 1029 else 1030 Diag(TheCall->getArg(format_idx)->getLocStart(), 1031 diag::warn_printf_nonliteral) 1032 << OrigFormatExpr->getSourceRange(); 1033} 1034 1035void Sema::CheckPrintfString(const StringLiteral *FExpr, 1036 const Expr *OrigFormatExpr, 1037 const CallExpr *TheCall, bool HasVAListArg, 1038 unsigned format_idx, unsigned firstDataArg) { 1039 1040 static bool UseAlternatePrintfChecking = false; 1041 if (UseAlternatePrintfChecking) { 1042 AlternateCheckPrintfString(FExpr, OrigFormatExpr, TheCall, 1043 HasVAListArg, format_idx, firstDataArg); 1044 return; 1045 } 1046 1047 1048 const ObjCStringLiteral *ObjCFExpr = 1049 dyn_cast<ObjCStringLiteral>(OrigFormatExpr); 1050 1051 // CHECK: is the format string a wide literal? 1052 if (FExpr->isWide()) { 1053 Diag(FExpr->getLocStart(), 1054 diag::warn_printf_format_string_is_wide_literal) 1055 << OrigFormatExpr->getSourceRange(); 1056 return; 1057 } 1058 1059 // Str - The format string. NOTE: this is NOT null-terminated! 1060 const char *Str = FExpr->getStrData(); 1061 1062 // CHECK: empty format string? 1063 unsigned StrLen = FExpr->getByteLength(); 1064 1065 if (StrLen == 0) { 1066 Diag(FExpr->getLocStart(), diag::warn_printf_empty_format_string) 1067 << OrigFormatExpr->getSourceRange(); 1068 return; 1069 } 1070 1071 // We process the format string using a binary state machine. The 1072 // current state is stored in CurrentState. 1073 enum { 1074 state_OrdChr, 1075 state_Conversion 1076 } CurrentState = state_OrdChr; 1077 1078 // numConversions - The number of conversions seen so far. This is 1079 // incremented as we traverse the format string. 1080 unsigned numConversions = 0; 1081 1082 // numDataArgs - The number of data arguments after the format 1083 // string. This can only be determined for non vprintf-like 1084 // functions. For those functions, this value is 1 (the sole 1085 // va_arg argument). 1086 unsigned numDataArgs = TheCall->getNumArgs()-firstDataArg; 1087 1088 // Inspect the format string. 1089 unsigned StrIdx = 0; 1090 1091 // LastConversionIdx - Index within the format string where we last saw 1092 // a '%' character that starts a new format conversion. 1093 unsigned LastConversionIdx = 0; 1094 1095 for (; StrIdx < StrLen; ++StrIdx) { 1096 1097 // Is the number of detected conversion conversions greater than 1098 // the number of matching data arguments? If so, stop. 1099 if (!HasVAListArg && numConversions > numDataArgs) break; 1100 1101 // Handle "\0" 1102 if (Str[StrIdx] == '\0') { 1103 // The string returned by getStrData() is not null-terminated, 1104 // so the presence of a null character is likely an error. 1105 Diag(getLocationOfStringLiteralByte(FExpr, StrIdx), 1106 diag::warn_printf_format_string_contains_null_char) 1107 << OrigFormatExpr->getSourceRange(); 1108 return; 1109 } 1110 1111 // Ordinary characters (not processing a format conversion). 1112 if (CurrentState == state_OrdChr) { 1113 if (Str[StrIdx] == '%') { 1114 CurrentState = state_Conversion; 1115 LastConversionIdx = StrIdx; 1116 } 1117 continue; 1118 } 1119 1120 // Seen '%'. Now processing a format conversion. 1121 switch (Str[StrIdx]) { 1122 // Handle dynamic precision or width specifier. 1123 case '*': { 1124 ++numConversions; 1125 1126 if (!HasVAListArg) { 1127 if (numConversions > numDataArgs) { 1128 SourceLocation Loc = getLocationOfStringLiteralByte(FExpr, StrIdx); 1129 1130 if (Str[StrIdx-1] == '.') 1131 Diag(Loc, diag::warn_printf_asterisk_precision_missing_arg) 1132 << OrigFormatExpr->getSourceRange(); 1133 else 1134 Diag(Loc, diag::warn_printf_asterisk_width_missing_arg) 1135 << OrigFormatExpr->getSourceRange(); 1136 1137 // Don't do any more checking. We'll just emit spurious errors. 1138 return; 1139 } 1140 1141 // Perform type checking on width/precision specifier. 1142 const Expr *E = TheCall->getArg(format_idx+numConversions); 1143 if (const BuiltinType *BT = E->getType()->getAs<BuiltinType>()) 1144 if (BT->getKind() == BuiltinType::Int) 1145 break; 1146 1147 SourceLocation Loc = getLocationOfStringLiteralByte(FExpr, StrIdx); 1148 1149 if (Str[StrIdx-1] == '.') 1150 Diag(Loc, diag::warn_printf_asterisk_precision_wrong_type) 1151 << E->getType() << E->getSourceRange(); 1152 else 1153 Diag(Loc, diag::warn_printf_asterisk_width_wrong_type) 1154 << E->getType() << E->getSourceRange(); 1155 1156 break; 1157 } 1158 } 1159 1160 // Characters which can terminate a format conversion 1161 // (e.g. "%d"). Characters that specify length modifiers or 1162 // other flags are handled by the default case below. 1163 // 1164 // FIXME: additional checks will go into the following cases. 1165 case 'i': 1166 case 'd': 1167 case 'o': 1168 case 'u': 1169 case 'x': 1170 case 'X': 1171 case 'e': 1172 case 'E': 1173 case 'f': 1174 case 'F': 1175 case 'g': 1176 case 'G': 1177 case 'a': 1178 case 'A': 1179 case 'c': 1180 case 's': 1181 case 'p': 1182 ++numConversions; 1183 CurrentState = state_OrdChr; 1184 break; 1185 1186 case 'm': 1187 // FIXME: Warn in situations where this isn't supported! 1188 CurrentState = state_OrdChr; 1189 break; 1190 1191 // CHECK: Are we using "%n"? Issue a warning. 1192 case 'n': { 1193 ++numConversions; 1194 CurrentState = state_OrdChr; 1195 SourceLocation Loc = getLocationOfStringLiteralByte(FExpr, 1196 LastConversionIdx); 1197 1198 Diag(Loc, diag::warn_printf_write_back)<<OrigFormatExpr->getSourceRange(); 1199 break; 1200 } 1201 1202 // Handle "%@" 1203 case '@': 1204 // %@ is allowed in ObjC format strings only. 1205 if (ObjCFExpr != NULL) 1206 CurrentState = state_OrdChr; 1207 else { 1208 // Issue a warning: invalid format conversion. 1209 SourceLocation Loc = 1210 getLocationOfStringLiteralByte(FExpr, LastConversionIdx); 1211 1212 Diag(Loc, diag::warn_printf_invalid_conversion) 1213 << std::string(Str+LastConversionIdx, 1214 Str+std::min(LastConversionIdx+2, StrLen)) 1215 << OrigFormatExpr->getSourceRange(); 1216 } 1217 ++numConversions; 1218 break; 1219 1220 // Handle "%%" 1221 case '%': 1222 // Sanity check: Was the first "%" character the previous one? 1223 // If not, we will assume that we have a malformed format 1224 // conversion, and that the current "%" character is the start 1225 // of a new conversion. 1226 if (StrIdx - LastConversionIdx == 1) 1227 CurrentState = state_OrdChr; 1228 else { 1229 // Issue a warning: invalid format conversion. 1230 SourceLocation Loc = 1231 getLocationOfStringLiteralByte(FExpr, LastConversionIdx); 1232 1233 Diag(Loc, diag::warn_printf_invalid_conversion) 1234 << std::string(Str+LastConversionIdx, Str+StrIdx) 1235 << OrigFormatExpr->getSourceRange(); 1236 1237 // This conversion is broken. Advance to the next format 1238 // conversion. 1239 LastConversionIdx = StrIdx; 1240 ++numConversions; 1241 } 1242 break; 1243 1244 default: 1245 // This case catches all other characters: flags, widths, etc. 1246 // We should eventually process those as well. 1247 break; 1248 } 1249 } 1250 1251 if (CurrentState == state_Conversion) { 1252 // Issue a warning: invalid format conversion. 1253 SourceLocation Loc = 1254 getLocationOfStringLiteralByte(FExpr, LastConversionIdx); 1255 1256 Diag(Loc, diag::warn_printf_invalid_conversion) 1257 << std::string(Str+LastConversionIdx, 1258 Str+std::min(LastConversionIdx+2, StrLen)) 1259 << OrigFormatExpr->getSourceRange(); 1260 return; 1261 } 1262 1263 if (!HasVAListArg) { 1264 // CHECK: Does the number of format conversions exceed the number 1265 // of data arguments? 1266 if (numConversions > numDataArgs) { 1267 SourceLocation Loc = 1268 getLocationOfStringLiteralByte(FExpr, LastConversionIdx); 1269 1270 Diag(Loc, diag::warn_printf_insufficient_data_args) 1271 << OrigFormatExpr->getSourceRange(); 1272 } 1273 // CHECK: Does the number of data arguments exceed the number of 1274 // format conversions in the format string? 1275 else if (numConversions < numDataArgs) 1276 Diag(TheCall->getArg(format_idx+numConversions+1)->getLocStart(), 1277 diag::warn_printf_too_many_data_args) 1278 << OrigFormatExpr->getSourceRange(); 1279 } 1280} 1281 1282void 1283Sema::AlternateCheckPrintfString(const StringLiteral *FExpr, 1284 const Expr *OrigFormatExpr, 1285 const CallExpr *TheCall, bool HasVAListArg, 1286 unsigned format_idx, unsigned firstDataArg) { 1287 1288 1289} 1290 1291//===--- CHECK: Return Address of Stack Variable --------------------------===// 1292 1293static DeclRefExpr* EvalVal(Expr *E); 1294static DeclRefExpr* EvalAddr(Expr* E); 1295 1296/// CheckReturnStackAddr - Check if a return statement returns the address 1297/// of a stack variable. 1298void 1299Sema::CheckReturnStackAddr(Expr *RetValExp, QualType lhsType, 1300 SourceLocation ReturnLoc) { 1301 1302 // Perform checking for returned stack addresses. 1303 if (lhsType->isPointerType() || lhsType->isBlockPointerType()) { 1304 if (DeclRefExpr *DR = EvalAddr(RetValExp)) 1305 Diag(DR->getLocStart(), diag::warn_ret_stack_addr) 1306 << DR->getDecl()->getDeclName() << RetValExp->getSourceRange(); 1307 1308 // Skip over implicit cast expressions when checking for block expressions. 1309 RetValExp = RetValExp->IgnoreParenCasts(); 1310 1311 if (BlockExpr *C = dyn_cast<BlockExpr>(RetValExp)) 1312 if (C->hasBlockDeclRefExprs()) 1313 Diag(C->getLocStart(), diag::err_ret_local_block) 1314 << C->getSourceRange(); 1315 1316 if (AddrLabelExpr *ALE = dyn_cast<AddrLabelExpr>(RetValExp)) 1317 Diag(ALE->getLocStart(), diag::warn_ret_addr_label) 1318 << ALE->getSourceRange(); 1319 1320 } else if (lhsType->isReferenceType()) { 1321 // Perform checking for stack values returned by reference. 1322 // Check for a reference to the stack 1323 if (DeclRefExpr *DR = EvalVal(RetValExp)) 1324 Diag(DR->getLocStart(), diag::warn_ret_stack_ref) 1325 << DR->getDecl()->getDeclName() << RetValExp->getSourceRange(); 1326 } 1327} 1328 1329/// EvalAddr - EvalAddr and EvalVal are mutually recursive functions that 1330/// check if the expression in a return statement evaluates to an address 1331/// to a location on the stack. The recursion is used to traverse the 1332/// AST of the return expression, with recursion backtracking when we 1333/// encounter a subexpression that (1) clearly does not lead to the address 1334/// of a stack variable or (2) is something we cannot determine leads to 1335/// the address of a stack variable based on such local checking. 1336/// 1337/// EvalAddr processes expressions that are pointers that are used as 1338/// references (and not L-values). EvalVal handles all other values. 1339/// At the base case of the recursion is a check for a DeclRefExpr* in 1340/// the refers to a stack variable. 1341/// 1342/// This implementation handles: 1343/// 1344/// * pointer-to-pointer casts 1345/// * implicit conversions from array references to pointers 1346/// * taking the address of fields 1347/// * arbitrary interplay between "&" and "*" operators 1348/// * pointer arithmetic from an address of a stack variable 1349/// * taking the address of an array element where the array is on the stack 1350static DeclRefExpr* EvalAddr(Expr *E) { 1351 // We should only be called for evaluating pointer expressions. 1352 assert((E->getType()->isAnyPointerType() || 1353 E->getType()->isBlockPointerType() || 1354 E->getType()->isObjCQualifiedIdType()) && 1355 "EvalAddr only works on pointers"); 1356 1357 // Our "symbolic interpreter" is just a dispatch off the currently 1358 // viewed AST node. We then recursively traverse the AST by calling 1359 // EvalAddr and EvalVal appropriately. 1360 switch (E->getStmtClass()) { 1361 case Stmt::ParenExprClass: 1362 // Ignore parentheses. 1363 return EvalAddr(cast<ParenExpr>(E)->getSubExpr()); 1364 1365 case Stmt::UnaryOperatorClass: { 1366 // The only unary operator that make sense to handle here 1367 // is AddrOf. All others don't make sense as pointers. 1368 UnaryOperator *U = cast<UnaryOperator>(E); 1369 1370 if (U->getOpcode() == UnaryOperator::AddrOf) 1371 return EvalVal(U->getSubExpr()); 1372 else 1373 return NULL; 1374 } 1375 1376 case Stmt::BinaryOperatorClass: { 1377 // Handle pointer arithmetic. All other binary operators are not valid 1378 // in this context. 1379 BinaryOperator *B = cast<BinaryOperator>(E); 1380 BinaryOperator::Opcode op = B->getOpcode(); 1381 1382 if (op != BinaryOperator::Add && op != BinaryOperator::Sub) 1383 return NULL; 1384 1385 Expr *Base = B->getLHS(); 1386 1387 // Determine which argument is the real pointer base. It could be 1388 // the RHS argument instead of the LHS. 1389 if (!Base->getType()->isPointerType()) Base = B->getRHS(); 1390 1391 assert (Base->getType()->isPointerType()); 1392 return EvalAddr(Base); 1393 } 1394 1395 // For conditional operators we need to see if either the LHS or RHS are 1396 // valid DeclRefExpr*s. If one of them is valid, we return it. 1397 case Stmt::ConditionalOperatorClass: { 1398 ConditionalOperator *C = cast<ConditionalOperator>(E); 1399 1400 // Handle the GNU extension for missing LHS. 1401 if (Expr *lhsExpr = C->getLHS()) 1402 if (DeclRefExpr* LHS = EvalAddr(lhsExpr)) 1403 return LHS; 1404 1405 return EvalAddr(C->getRHS()); 1406 } 1407 1408 // For casts, we need to handle conversions from arrays to 1409 // pointer values, and pointer-to-pointer conversions. 1410 case Stmt::ImplicitCastExprClass: 1411 case Stmt::CStyleCastExprClass: 1412 case Stmt::CXXFunctionalCastExprClass: { 1413 Expr* SubExpr = cast<CastExpr>(E)->getSubExpr(); 1414 QualType T = SubExpr->getType(); 1415 1416 if (SubExpr->getType()->isPointerType() || 1417 SubExpr->getType()->isBlockPointerType() || 1418 SubExpr->getType()->isObjCQualifiedIdType()) 1419 return EvalAddr(SubExpr); 1420 else if (T->isArrayType()) 1421 return EvalVal(SubExpr); 1422 else 1423 return 0; 1424 } 1425 1426 // C++ casts. For dynamic casts, static casts, and const casts, we 1427 // are always converting from a pointer-to-pointer, so we just blow 1428 // through the cast. In the case the dynamic cast doesn't fail (and 1429 // return NULL), we take the conservative route and report cases 1430 // where we return the address of a stack variable. For Reinterpre 1431 // FIXME: The comment about is wrong; we're not always converting 1432 // from pointer to pointer. I'm guessing that this code should also 1433 // handle references to objects. 1434 case Stmt::CXXStaticCastExprClass: 1435 case Stmt::CXXDynamicCastExprClass: 1436 case Stmt::CXXConstCastExprClass: 1437 case Stmt::CXXReinterpretCastExprClass: { 1438 Expr *S = cast<CXXNamedCastExpr>(E)->getSubExpr(); 1439 if (S->getType()->isPointerType() || S->getType()->isBlockPointerType()) 1440 return EvalAddr(S); 1441 else 1442 return NULL; 1443 } 1444 1445 // Everything else: we simply don't reason about them. 1446 default: 1447 return NULL; 1448 } 1449} 1450 1451 1452/// EvalVal - This function is complements EvalAddr in the mutual recursion. 1453/// See the comments for EvalAddr for more details. 1454static DeclRefExpr* EvalVal(Expr *E) { 1455 1456 // We should only be called for evaluating non-pointer expressions, or 1457 // expressions with a pointer type that are not used as references but instead 1458 // are l-values (e.g., DeclRefExpr with a pointer type). 1459 1460 // Our "symbolic interpreter" is just a dispatch off the currently 1461 // viewed AST node. We then recursively traverse the AST by calling 1462 // EvalAddr and EvalVal appropriately. 1463 switch (E->getStmtClass()) { 1464 case Stmt::DeclRefExprClass: { 1465 // DeclRefExpr: the base case. When we hit a DeclRefExpr we are looking 1466 // at code that refers to a variable's name. We check if it has local 1467 // storage within the function, and if so, return the expression. 1468 DeclRefExpr *DR = cast<DeclRefExpr>(E); 1469 1470 if (VarDecl *V = dyn_cast<VarDecl>(DR->getDecl())) 1471 if (V->hasLocalStorage() && !V->getType()->isReferenceType()) return DR; 1472 1473 return NULL; 1474 } 1475 1476 case Stmt::ParenExprClass: 1477 // Ignore parentheses. 1478 return EvalVal(cast<ParenExpr>(E)->getSubExpr()); 1479 1480 case Stmt::UnaryOperatorClass: { 1481 // The only unary operator that make sense to handle here 1482 // is Deref. All others don't resolve to a "name." This includes 1483 // handling all sorts of rvalues passed to a unary operator. 1484 UnaryOperator *U = cast<UnaryOperator>(E); 1485 1486 if (U->getOpcode() == UnaryOperator::Deref) 1487 return EvalAddr(U->getSubExpr()); 1488 1489 return NULL; 1490 } 1491 1492 case Stmt::ArraySubscriptExprClass: { 1493 // Array subscripts are potential references to data on the stack. We 1494 // retrieve the DeclRefExpr* for the array variable if it indeed 1495 // has local storage. 1496 return EvalAddr(cast<ArraySubscriptExpr>(E)->getBase()); 1497 } 1498 1499 case Stmt::ConditionalOperatorClass: { 1500 // For conditional operators we need to see if either the LHS or RHS are 1501 // non-NULL DeclRefExpr's. If one is non-NULL, we return it. 1502 ConditionalOperator *C = cast<ConditionalOperator>(E); 1503 1504 // Handle the GNU extension for missing LHS. 1505 if (Expr *lhsExpr = C->getLHS()) 1506 if (DeclRefExpr *LHS = EvalVal(lhsExpr)) 1507 return LHS; 1508 1509 return EvalVal(C->getRHS()); 1510 } 1511 1512 // Accesses to members are potential references to data on the stack. 1513 case Stmt::MemberExprClass: { 1514 MemberExpr *M = cast<MemberExpr>(E); 1515 1516 // Check for indirect access. We only want direct field accesses. 1517 if (!M->isArrow()) 1518 return EvalVal(M->getBase()); 1519 else 1520 return NULL; 1521 } 1522 1523 // Everything else: we simply don't reason about them. 1524 default: 1525 return NULL; 1526 } 1527} 1528 1529//===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===// 1530 1531/// Check for comparisons of floating point operands using != and ==. 1532/// Issue a warning if these are no self-comparisons, as they are not likely 1533/// to do what the programmer intended. 1534void Sema::CheckFloatComparison(SourceLocation loc, Expr* lex, Expr *rex) { 1535 bool EmitWarning = true; 1536 1537 Expr* LeftExprSansParen = lex->IgnoreParens(); 1538 Expr* RightExprSansParen = rex->IgnoreParens(); 1539 1540 // Special case: check for x == x (which is OK). 1541 // Do not emit warnings for such cases. 1542 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 1543 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 1544 if (DRL->getDecl() == DRR->getDecl()) 1545 EmitWarning = false; 1546 1547 1548 // Special case: check for comparisons against literals that can be exactly 1549 // represented by APFloat. In such cases, do not emit a warning. This 1550 // is a heuristic: often comparison against such literals are used to 1551 // detect if a value in a variable has not changed. This clearly can 1552 // lead to false negatives. 1553 if (EmitWarning) { 1554 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 1555 if (FLL->isExact()) 1556 EmitWarning = false; 1557 } else 1558 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)){ 1559 if (FLR->isExact()) 1560 EmitWarning = false; 1561 } 1562 } 1563 1564 // Check for comparisons with builtin types. 1565 if (EmitWarning) 1566 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 1567 if (CL->isBuiltinCall(Context)) 1568 EmitWarning = false; 1569 1570 if (EmitWarning) 1571 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 1572 if (CR->isBuiltinCall(Context)) 1573 EmitWarning = false; 1574 1575 // Emit the diagnostic. 1576 if (EmitWarning) 1577 Diag(loc, diag::warn_floatingpoint_eq) 1578 << lex->getSourceRange() << rex->getSourceRange(); 1579} 1580 1581//===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 1582//===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 1583 1584namespace { 1585 1586/// Structure recording the 'active' range of an integer-valued 1587/// expression. 1588struct IntRange { 1589 /// The number of bits active in the int. 1590 unsigned Width; 1591 1592 /// True if the int is known not to have negative values. 1593 bool NonNegative; 1594 1595 IntRange() {} 1596 IntRange(unsigned Width, bool NonNegative) 1597 : Width(Width), NonNegative(NonNegative) 1598 {} 1599 1600 // Returns the range of the bool type. 1601 static IntRange forBoolType() { 1602 return IntRange(1, true); 1603 } 1604 1605 // Returns the range of an integral type. 1606 static IntRange forType(ASTContext &C, QualType T) { 1607 return forCanonicalType(C, T->getCanonicalTypeInternal().getTypePtr()); 1608 } 1609 1610 // Returns the range of an integeral type based on its canonical 1611 // representation. 1612 static IntRange forCanonicalType(ASTContext &C, const Type *T) { 1613 assert(T->isCanonicalUnqualified()); 1614 1615 if (const VectorType *VT = dyn_cast<VectorType>(T)) 1616 T = VT->getElementType().getTypePtr(); 1617 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 1618 T = CT->getElementType().getTypePtr(); 1619 if (const EnumType *ET = dyn_cast<EnumType>(T)) 1620 T = ET->getDecl()->getIntegerType().getTypePtr(); 1621 1622 const BuiltinType *BT = cast<BuiltinType>(T); 1623 assert(BT->isInteger()); 1624 1625 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 1626 } 1627 1628 // Returns the supremum of two ranges: i.e. their conservative merge. 1629 static IntRange join(const IntRange &L, const IntRange &R) { 1630 return IntRange(std::max(L.Width, R.Width), 1631 L.NonNegative && R.NonNegative); 1632 } 1633 1634 // Returns the infinum of two ranges: i.e. their aggressive merge. 1635 static IntRange meet(const IntRange &L, const IntRange &R) { 1636 return IntRange(std::min(L.Width, R.Width), 1637 L.NonNegative || R.NonNegative); 1638 } 1639}; 1640 1641IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, unsigned MaxWidth) { 1642 if (value.isSigned() && value.isNegative()) 1643 return IntRange(value.getMinSignedBits(), false); 1644 1645 if (value.getBitWidth() > MaxWidth) 1646 value.trunc(MaxWidth); 1647 1648 // isNonNegative() just checks the sign bit without considering 1649 // signedness. 1650 return IntRange(value.getActiveBits(), true); 1651} 1652 1653IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 1654 unsigned MaxWidth) { 1655 if (result.isInt()) 1656 return GetValueRange(C, result.getInt(), MaxWidth); 1657 1658 if (result.isVector()) { 1659 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 1660 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 1661 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 1662 R = IntRange::join(R, El); 1663 } 1664 return R; 1665 } 1666 1667 if (result.isComplexInt()) { 1668 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 1669 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 1670 return IntRange::join(R, I); 1671 } 1672 1673 // This can happen with lossless casts to intptr_t of "based" lvalues. 1674 // Assume it might use arbitrary bits. 1675 // FIXME: The only reason we need to pass the type in here is to get 1676 // the sign right on this one case. It would be nice if APValue 1677 // preserved this. 1678 assert(result.isLValue()); 1679 return IntRange(MaxWidth, Ty->isUnsignedIntegerType()); 1680} 1681 1682/// Pseudo-evaluate the given integer expression, estimating the 1683/// range of values it might take. 1684/// 1685/// \param MaxWidth - the width to which the value will be truncated 1686IntRange GetExprRange(ASTContext &C, Expr *E, unsigned MaxWidth) { 1687 E = E->IgnoreParens(); 1688 1689 // Try a full evaluation first. 1690 Expr::EvalResult result; 1691 if (E->Evaluate(result, C)) 1692 return GetValueRange(C, result.Val, E->getType(), MaxWidth); 1693 1694 // I think we only want to look through implicit casts here; if the 1695 // user has an explicit widening cast, we should treat the value as 1696 // being of the new, wider type. 1697 if (ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E)) { 1698 if (CE->getCastKind() == CastExpr::CK_NoOp) 1699 return GetExprRange(C, CE->getSubExpr(), MaxWidth); 1700 1701 IntRange OutputTypeRange = IntRange::forType(C, CE->getType()); 1702 1703 bool isIntegerCast = (CE->getCastKind() == CastExpr::CK_IntegralCast); 1704 if (!isIntegerCast && CE->getCastKind() == CastExpr::CK_Unknown) 1705 isIntegerCast = CE->getSubExpr()->getType()->isIntegerType(); 1706 1707 // Assume that non-integer casts can span the full range of the type. 1708 if (!isIntegerCast) 1709 return OutputTypeRange; 1710 1711 IntRange SubRange 1712 = GetExprRange(C, CE->getSubExpr(), 1713 std::min(MaxWidth, OutputTypeRange.Width)); 1714 1715 // Bail out if the subexpr's range is as wide as the cast type. 1716 if (SubRange.Width >= OutputTypeRange.Width) 1717 return OutputTypeRange; 1718 1719 // Otherwise, we take the smaller width, and we're non-negative if 1720 // either the output type or the subexpr is. 1721 return IntRange(SubRange.Width, 1722 SubRange.NonNegative || OutputTypeRange.NonNegative); 1723 } 1724 1725 if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 1726 // If we can fold the condition, just take that operand. 1727 bool CondResult; 1728 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 1729 return GetExprRange(C, CondResult ? CO->getTrueExpr() 1730 : CO->getFalseExpr(), 1731 MaxWidth); 1732 1733 // Otherwise, conservatively merge. 1734 IntRange L = GetExprRange(C, CO->getTrueExpr(), MaxWidth); 1735 IntRange R = GetExprRange(C, CO->getFalseExpr(), MaxWidth); 1736 return IntRange::join(L, R); 1737 } 1738 1739 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 1740 switch (BO->getOpcode()) { 1741 1742 // Boolean-valued operations are single-bit and positive. 1743 case BinaryOperator::LAnd: 1744 case BinaryOperator::LOr: 1745 case BinaryOperator::LT: 1746 case BinaryOperator::GT: 1747 case BinaryOperator::LE: 1748 case BinaryOperator::GE: 1749 case BinaryOperator::EQ: 1750 case BinaryOperator::NE: 1751 return IntRange::forBoolType(); 1752 1753 // Operations with opaque sources are black-listed. 1754 case BinaryOperator::PtrMemD: 1755 case BinaryOperator::PtrMemI: 1756 return IntRange::forType(C, E->getType()); 1757 1758 // Bitwise-and uses the *infinum* of the two source ranges. 1759 case BinaryOperator::And: 1760 return IntRange::meet(GetExprRange(C, BO->getLHS(), MaxWidth), 1761 GetExprRange(C, BO->getRHS(), MaxWidth)); 1762 1763 // Left shift gets black-listed based on a judgement call. 1764 case BinaryOperator::Shl: 1765 return IntRange::forType(C, E->getType()); 1766 1767 // Right shift by a constant can narrow its left argument. 1768 case BinaryOperator::Shr: { 1769 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth); 1770 1771 // If the shift amount is a positive constant, drop the width by 1772 // that much. 1773 llvm::APSInt shift; 1774 if (BO->getRHS()->isIntegerConstantExpr(shift, C) && 1775 shift.isNonNegative()) { 1776 unsigned zext = shift.getZExtValue(); 1777 if (zext >= L.Width) 1778 L.Width = (L.NonNegative ? 0 : 1); 1779 else 1780 L.Width -= zext; 1781 } 1782 1783 return L; 1784 } 1785 1786 // Comma acts as its right operand. 1787 case BinaryOperator::Comma: 1788 return GetExprRange(C, BO->getRHS(), MaxWidth); 1789 1790 // Black-list pointer subtractions. 1791 case BinaryOperator::Sub: 1792 if (BO->getLHS()->getType()->isPointerType()) 1793 return IntRange::forType(C, E->getType()); 1794 // fallthrough 1795 1796 default: 1797 break; 1798 } 1799 1800 // Treat every other operator as if it were closed on the 1801 // narrowest type that encompasses both operands. 1802 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth); 1803 IntRange R = GetExprRange(C, BO->getRHS(), MaxWidth); 1804 return IntRange::join(L, R); 1805 } 1806 1807 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 1808 switch (UO->getOpcode()) { 1809 // Boolean-valued operations are white-listed. 1810 case UnaryOperator::LNot: 1811 return IntRange::forBoolType(); 1812 1813 // Operations with opaque sources are black-listed. 1814 case UnaryOperator::Deref: 1815 case UnaryOperator::AddrOf: // should be impossible 1816 case UnaryOperator::OffsetOf: 1817 return IntRange::forType(C, E->getType()); 1818 1819 default: 1820 return GetExprRange(C, UO->getSubExpr(), MaxWidth); 1821 } 1822 } 1823 1824 FieldDecl *BitField = E->getBitField(); 1825 if (BitField) { 1826 llvm::APSInt BitWidthAP = BitField->getBitWidth()->EvaluateAsInt(C); 1827 unsigned BitWidth = BitWidthAP.getZExtValue(); 1828 1829 return IntRange(BitWidth, BitField->getType()->isUnsignedIntegerType()); 1830 } 1831 1832 return IntRange::forType(C, E->getType()); 1833} 1834 1835/// Checks whether the given value, which currently has the given 1836/// source semantics, has the same value when coerced through the 1837/// target semantics. 1838bool IsSameFloatAfterCast(const llvm::APFloat &value, 1839 const llvm::fltSemantics &Src, 1840 const llvm::fltSemantics &Tgt) { 1841 llvm::APFloat truncated = value; 1842 1843 bool ignored; 1844 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 1845 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 1846 1847 return truncated.bitwiseIsEqual(value); 1848} 1849 1850/// Checks whether the given value, which currently has the given 1851/// source semantics, has the same value when coerced through the 1852/// target semantics. 1853/// 1854/// The value might be a vector of floats (or a complex number). 1855bool IsSameFloatAfterCast(const APValue &value, 1856 const llvm::fltSemantics &Src, 1857 const llvm::fltSemantics &Tgt) { 1858 if (value.isFloat()) 1859 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 1860 1861 if (value.isVector()) { 1862 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 1863 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 1864 return false; 1865 return true; 1866 } 1867 1868 assert(value.isComplexFloat()); 1869 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 1870 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 1871} 1872 1873} // end anonymous namespace 1874 1875/// \brief Implements -Wsign-compare. 1876/// 1877/// \param lex the left-hand expression 1878/// \param rex the right-hand expression 1879/// \param OpLoc the location of the joining operator 1880/// \param Equality whether this is an "equality-like" join, which 1881/// suppresses the warning in some cases 1882void Sema::CheckSignCompare(Expr *lex, Expr *rex, SourceLocation OpLoc, 1883 const PartialDiagnostic &PD, bool Equality) { 1884 // Don't warn if we're in an unevaluated context. 1885 if (ExprEvalContexts.back().Context == Unevaluated) 1886 return; 1887 1888 // If either expression is value-dependent, don't warn. We'll get another 1889 // chance at instantiation time. 1890 if (lex->isValueDependent() || rex->isValueDependent()) 1891 return; 1892 1893 QualType lt = lex->getType(), rt = rex->getType(); 1894 1895 // Only warn if both operands are integral. 1896 if (!lt->isIntegerType() || !rt->isIntegerType()) 1897 return; 1898 1899 // In C, the width of a bitfield determines its type, and the 1900 // declared type only contributes the signedness. This duplicates 1901 // the work that will later be done by UsualUnaryConversions. 1902 // Eventually, this check will be reorganized in a way that avoids 1903 // this duplication. 1904 if (!getLangOptions().CPlusPlus) { 1905 QualType tmp; 1906 tmp = Context.isPromotableBitField(lex); 1907 if (!tmp.isNull()) lt = tmp; 1908 tmp = Context.isPromotableBitField(rex); 1909 if (!tmp.isNull()) rt = tmp; 1910 } 1911 1912 // The rule is that the signed operand becomes unsigned, so isolate the 1913 // signed operand. 1914 Expr *signedOperand = lex, *unsignedOperand = rex; 1915 QualType signedType = lt, unsignedType = rt; 1916 if (lt->isSignedIntegerType()) { 1917 if (rt->isSignedIntegerType()) return; 1918 } else { 1919 if (!rt->isSignedIntegerType()) return; 1920 std::swap(signedOperand, unsignedOperand); 1921 std::swap(signedType, unsignedType); 1922 } 1923 1924 unsigned unsignedWidth = Context.getIntWidth(unsignedType); 1925 unsigned signedWidth = Context.getIntWidth(signedType); 1926 1927 // If the unsigned type is strictly smaller than the signed type, 1928 // then (1) the result type will be signed and (2) the unsigned 1929 // value will fit fully within the signed type, and thus the result 1930 // of the comparison will be exact. 1931 if (signedWidth > unsignedWidth) 1932 return; 1933 1934 // Otherwise, calculate the effective ranges. 1935 IntRange signedRange = GetExprRange(Context, signedOperand, signedWidth); 1936 IntRange unsignedRange = GetExprRange(Context, unsignedOperand, unsignedWidth); 1937 1938 // We should never be unable to prove that the unsigned operand is 1939 // non-negative. 1940 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 1941 1942 // If the signed operand is non-negative, then the signed->unsigned 1943 // conversion won't change it. 1944 if (signedRange.NonNegative) 1945 return; 1946 1947 // For (in)equality comparisons, if the unsigned operand is a 1948 // constant which cannot collide with a overflowed signed operand, 1949 // then reinterpreting the signed operand as unsigned will not 1950 // change the result of the comparison. 1951 if (Equality && unsignedRange.Width < unsignedWidth) 1952 return; 1953 1954 Diag(OpLoc, PD) 1955 << lt << rt << lex->getSourceRange() << rex->getSourceRange(); 1956} 1957 1958/// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 1959static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, unsigned diag) { 1960 S.Diag(E->getExprLoc(), diag) << E->getType() << T << E->getSourceRange(); 1961} 1962 1963/// Implements -Wconversion. 1964void Sema::CheckImplicitConversion(Expr *E, QualType T) { 1965 // Don't diagnose in unevaluated contexts. 1966 if (ExprEvalContexts.back().Context == Sema::Unevaluated) 1967 return; 1968 1969 // Don't diagnose for value-dependent expressions. 1970 if (E->isValueDependent()) 1971 return; 1972 1973 const Type *Source = Context.getCanonicalType(E->getType()).getTypePtr(); 1974 const Type *Target = Context.getCanonicalType(T).getTypePtr(); 1975 1976 // Never diagnose implicit casts to bool. 1977 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 1978 return; 1979 1980 // Strip vector types. 1981 if (isa<VectorType>(Source)) { 1982 if (!isa<VectorType>(Target)) 1983 return DiagnoseImpCast(*this, E, T, diag::warn_impcast_vector_scalar); 1984 1985 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 1986 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 1987 } 1988 1989 // Strip complex types. 1990 if (isa<ComplexType>(Source)) { 1991 if (!isa<ComplexType>(Target)) 1992 return DiagnoseImpCast(*this, E, T, diag::warn_impcast_complex_scalar); 1993 1994 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 1995 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 1996 } 1997 1998 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 1999 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 2000 2001 // If the source is floating point... 2002 if (SourceBT && SourceBT->isFloatingPoint()) { 2003 // ...and the target is floating point... 2004 if (TargetBT && TargetBT->isFloatingPoint()) { 2005 // ...then warn if we're dropping FP rank. 2006 2007 // Builtin FP kinds are ordered by increasing FP rank. 2008 if (SourceBT->getKind() > TargetBT->getKind()) { 2009 // Don't warn about float constants that are precisely 2010 // representable in the target type. 2011 Expr::EvalResult result; 2012 if (E->Evaluate(result, Context)) { 2013 // Value might be a float, a float vector, or a float complex. 2014 if (IsSameFloatAfterCast(result.Val, 2015 Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 2016 Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 2017 return; 2018 } 2019 2020 DiagnoseImpCast(*this, E, T, diag::warn_impcast_float_precision); 2021 } 2022 return; 2023 } 2024 2025 // If the target is integral, always warn. 2026 if ((TargetBT && TargetBT->isInteger())) 2027 // TODO: don't warn for integer values? 2028 return DiagnoseImpCast(*this, E, T, diag::warn_impcast_float_integer); 2029 2030 return; 2031 } 2032 2033 if (!Source->isIntegerType() || !Target->isIntegerType()) 2034 return; 2035 2036 IntRange SourceRange = GetExprRange(Context, E, Context.getIntWidth(E->getType())); 2037 IntRange TargetRange = IntRange::forCanonicalType(Context, Target); 2038 2039 // FIXME: also signed<->unsigned? 2040 2041 if (SourceRange.Width > TargetRange.Width) { 2042 // People want to build with -Wshorten-64-to-32 and not -Wconversion 2043 // and by god we'll let them. 2044 if (SourceRange.Width == 64 && TargetRange.Width == 32) 2045 return DiagnoseImpCast(*this, E, T, diag::warn_impcast_integer_64_32); 2046 return DiagnoseImpCast(*this, E, T, diag::warn_impcast_integer_precision); 2047 } 2048 2049 return; 2050} 2051 2052// MarkLive - Mark all the blocks reachable from e as live. Returns the total 2053// number of blocks just marked live. 2054static unsigned MarkLive(CFGBlock *e, llvm::BitVector &live) { 2055 unsigned count = 0; 2056 std::queue<CFGBlock*> workq; 2057 // Prep work queue 2058 live.set(e->getBlockID()); 2059 ++count; 2060 workq.push(e); 2061 // Solve 2062 while (!workq.empty()) { 2063 CFGBlock *item = workq.front(); 2064 workq.pop(); 2065 for (CFGBlock::succ_iterator I=item->succ_begin(), 2066 E=item->succ_end(); 2067 I != E; 2068 ++I) { 2069 if ((*I) && !live[(*I)->getBlockID()]) { 2070 live.set((*I)->getBlockID()); 2071 ++count; 2072 workq.push(*I); 2073 } 2074 } 2075 } 2076 return count; 2077} 2078 2079static SourceLocation GetUnreachableLoc(CFGBlock &b, SourceRange &R1, 2080 SourceRange &R2) { 2081 Stmt *S; 2082 unsigned sn = 0; 2083 R1 = R2 = SourceRange(); 2084 2085 top: 2086 if (sn < b.size()) 2087 S = b[sn].getStmt(); 2088 else if (b.getTerminator()) 2089 S = b.getTerminator(); 2090 else 2091 return SourceLocation(); 2092 2093 switch (S->getStmtClass()) { 2094 case Expr::BinaryOperatorClass: { 2095 BinaryOperator *BO = cast<BinaryOperator>(S); 2096 if (BO->getOpcode() == BinaryOperator::Comma) { 2097 if (sn+1 < b.size()) 2098 return b[sn+1].getStmt()->getLocStart(); 2099 CFGBlock *n = &b; 2100 while (1) { 2101 if (n->getTerminator()) 2102 return n->getTerminator()->getLocStart(); 2103 if (n->succ_size() != 1) 2104 return SourceLocation(); 2105 n = n[0].succ_begin()[0]; 2106 if (n->pred_size() != 1) 2107 return SourceLocation(); 2108 if (!n->empty()) 2109 return n[0][0].getStmt()->getLocStart(); 2110 } 2111 } 2112 R1 = BO->getLHS()->getSourceRange(); 2113 R2 = BO->getRHS()->getSourceRange(); 2114 return BO->getOperatorLoc(); 2115 } 2116 case Expr::UnaryOperatorClass: { 2117 const UnaryOperator *UO = cast<UnaryOperator>(S); 2118 R1 = UO->getSubExpr()->getSourceRange(); 2119 return UO->getOperatorLoc(); 2120 } 2121 case Expr::CompoundAssignOperatorClass: { 2122 const CompoundAssignOperator *CAO = cast<CompoundAssignOperator>(S); 2123 R1 = CAO->getLHS()->getSourceRange(); 2124 R2 = CAO->getRHS()->getSourceRange(); 2125 return CAO->getOperatorLoc(); 2126 } 2127 case Expr::ConditionalOperatorClass: { 2128 const ConditionalOperator *CO = cast<ConditionalOperator>(S); 2129 return CO->getQuestionLoc(); 2130 } 2131 case Expr::MemberExprClass: { 2132 const MemberExpr *ME = cast<MemberExpr>(S); 2133 R1 = ME->getSourceRange(); 2134 return ME->getMemberLoc(); 2135 } 2136 case Expr::ArraySubscriptExprClass: { 2137 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(S); 2138 R1 = ASE->getLHS()->getSourceRange(); 2139 R2 = ASE->getRHS()->getSourceRange(); 2140 return ASE->getRBracketLoc(); 2141 } 2142 case Expr::CStyleCastExprClass: { 2143 const CStyleCastExpr *CSC = cast<CStyleCastExpr>(S); 2144 R1 = CSC->getSubExpr()->getSourceRange(); 2145 return CSC->getLParenLoc(); 2146 } 2147 case Expr::CXXFunctionalCastExprClass: { 2148 const CXXFunctionalCastExpr *CE = cast <CXXFunctionalCastExpr>(S); 2149 R1 = CE->getSubExpr()->getSourceRange(); 2150 return CE->getTypeBeginLoc(); 2151 } 2152 case Expr::ImplicitCastExprClass: 2153 ++sn; 2154 goto top; 2155 case Stmt::CXXTryStmtClass: { 2156 return cast<CXXTryStmt>(S)->getHandler(0)->getCatchLoc(); 2157 } 2158 default: ; 2159 } 2160 R1 = S->getSourceRange(); 2161 return S->getLocStart(); 2162} 2163 2164static SourceLocation MarkLiveTop(CFGBlock *e, llvm::BitVector &live, 2165 SourceManager &SM) { 2166 std::queue<CFGBlock*> workq; 2167 // Prep work queue 2168 workq.push(e); 2169 SourceRange R1, R2; 2170 SourceLocation top = GetUnreachableLoc(*e, R1, R2); 2171 bool FromMainFile = false; 2172 bool FromSystemHeader = false; 2173 bool TopValid = false; 2174 if (top.isValid()) { 2175 FromMainFile = SM.isFromMainFile(top); 2176 FromSystemHeader = SM.isInSystemHeader(top); 2177 TopValid = true; 2178 } 2179 // Solve 2180 while (!workq.empty()) { 2181 CFGBlock *item = workq.front(); 2182 workq.pop(); 2183 SourceLocation c = GetUnreachableLoc(*item, R1, R2); 2184 if (c.isValid() 2185 && (!TopValid 2186 || (SM.isFromMainFile(c) && !FromMainFile) 2187 || (FromSystemHeader && !SM.isInSystemHeader(c)) 2188 || SM.isBeforeInTranslationUnit(c, top))) { 2189 top = c; 2190 FromMainFile = SM.isFromMainFile(top); 2191 FromSystemHeader = SM.isInSystemHeader(top); 2192 } 2193 live.set(item->getBlockID()); 2194 for (CFGBlock::succ_iterator I=item->succ_begin(), 2195 E=item->succ_end(); 2196 I != E; 2197 ++I) { 2198 if ((*I) && !live[(*I)->getBlockID()]) { 2199 live.set((*I)->getBlockID()); 2200 workq.push(*I); 2201 } 2202 } 2203 } 2204 return top; 2205} 2206 2207static int LineCmp(const void *p1, const void *p2) { 2208 SourceLocation *Line1 = (SourceLocation *)p1; 2209 SourceLocation *Line2 = (SourceLocation *)p2; 2210 return !(*Line1 < *Line2); 2211} 2212 2213namespace { 2214 struct ErrLoc { 2215 SourceLocation Loc; 2216 SourceRange R1; 2217 SourceRange R2; 2218 ErrLoc(SourceLocation l, SourceRange r1, SourceRange r2) 2219 : Loc(l), R1(r1), R2(r2) { } 2220 }; 2221} 2222 2223/// CheckUnreachable - Check for unreachable code. 2224void Sema::CheckUnreachable(AnalysisContext &AC) { 2225 unsigned count; 2226 // We avoid checking when there are errors, as the CFG won't faithfully match 2227 // the user's code. 2228 if (getDiagnostics().hasErrorOccurred()) 2229 return; 2230 if (Diags.getDiagnosticLevel(diag::warn_unreachable) == Diagnostic::Ignored) 2231 return; 2232 2233 CFG *cfg = AC.getCFG(); 2234 if (cfg == 0) 2235 return; 2236 2237 llvm::BitVector live(cfg->getNumBlockIDs()); 2238 // Mark all live things first. 2239 count = MarkLive(&cfg->getEntry(), live); 2240 2241 if (count == cfg->getNumBlockIDs()) 2242 // If there are no dead blocks, we're done. 2243 return; 2244 2245 SourceRange R1, R2; 2246 2247 llvm::SmallVector<ErrLoc, 24> lines; 2248 bool AddEHEdges = AC.getAddEHEdges(); 2249 // First, give warnings for blocks with no predecessors, as they 2250 // can't be part of a loop. 2251 for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) { 2252 CFGBlock &b = **I; 2253 if (!live[b.getBlockID()]) { 2254 if (b.pred_begin() == b.pred_end()) { 2255 if (!AddEHEdges && b.getTerminator() 2256 && isa<CXXTryStmt>(b.getTerminator())) { 2257 // When not adding EH edges from calls, catch clauses 2258 // can otherwise seem dead. Avoid noting them as dead. 2259 count += MarkLive(&b, live); 2260 continue; 2261 } 2262 SourceLocation c = GetUnreachableLoc(b, R1, R2); 2263 if (!c.isValid()) { 2264 // Blocks without a location can't produce a warning, so don't mark 2265 // reachable blocks from here as live. 2266 live.set(b.getBlockID()); 2267 ++count; 2268 continue; 2269 } 2270 lines.push_back(ErrLoc(c, R1, R2)); 2271 // Avoid excessive errors by marking everything reachable from here 2272 count += MarkLive(&b, live); 2273 } 2274 } 2275 } 2276 2277 if (count < cfg->getNumBlockIDs()) { 2278 // And then give warnings for the tops of loops. 2279 for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) { 2280 CFGBlock &b = **I; 2281 if (!live[b.getBlockID()]) 2282 // Avoid excessive errors by marking everything reachable from here 2283 lines.push_back(ErrLoc(MarkLiveTop(&b, live, 2284 Context.getSourceManager()), 2285 SourceRange(), SourceRange())); 2286 } 2287 } 2288 2289 llvm::array_pod_sort(lines.begin(), lines.end(), LineCmp); 2290 for (llvm::SmallVector<ErrLoc, 24>::iterator I = lines.begin(), 2291 E = lines.end(); 2292 I != E; 2293 ++I) 2294 if (I->Loc.isValid()) 2295 Diag(I->Loc, diag::warn_unreachable) << I->R1 << I->R2; 2296} 2297 2298/// CheckFallThrough - Check that we don't fall off the end of a 2299/// Statement that should return a value. 2300/// 2301/// \returns AlwaysFallThrough iff we always fall off the end of the statement, 2302/// MaybeFallThrough iff we might or might not fall off the end, 2303/// NeverFallThroughOrReturn iff we never fall off the end of the statement or 2304/// return. We assume NeverFallThrough iff we never fall off the end of the 2305/// statement but we may return. We assume that functions not marked noreturn 2306/// will return. 2307Sema::ControlFlowKind Sema::CheckFallThrough(AnalysisContext &AC) { 2308 CFG *cfg = AC.getCFG(); 2309 if (cfg == 0) 2310 // FIXME: This should be NeverFallThrough 2311 return NeverFallThroughOrReturn; 2312 2313 // The CFG leaves in dead things, and we don't want the dead code paths to 2314 // confuse us, so we mark all live things first. 2315 std::queue<CFGBlock*> workq; 2316 llvm::BitVector live(cfg->getNumBlockIDs()); 2317 unsigned count = MarkLive(&cfg->getEntry(), live); 2318 2319 bool AddEHEdges = AC.getAddEHEdges(); 2320 if (!AddEHEdges && count != cfg->getNumBlockIDs()) 2321 // When there are things remaining dead, and we didn't add EH edges 2322 // from CallExprs to the catch clauses, we have to go back and 2323 // mark them as live. 2324 for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) { 2325 CFGBlock &b = **I; 2326 if (!live[b.getBlockID()]) { 2327 if (b.pred_begin() == b.pred_end()) { 2328 if (b.getTerminator() && isa<CXXTryStmt>(b.getTerminator())) 2329 // When not adding EH edges from calls, catch clauses 2330 // can otherwise seem dead. Avoid noting them as dead. 2331 count += MarkLive(&b, live); 2332 continue; 2333 } 2334 } 2335 } 2336 2337 // Now we know what is live, we check the live precessors of the exit block 2338 // and look for fall through paths, being careful to ignore normal returns, 2339 // and exceptional paths. 2340 bool HasLiveReturn = false; 2341 bool HasFakeEdge = false; 2342 bool HasPlainEdge = false; 2343 bool HasAbnormalEdge = false; 2344 for (CFGBlock::pred_iterator I=cfg->getExit().pred_begin(), 2345 E = cfg->getExit().pred_end(); 2346 I != E; 2347 ++I) { 2348 CFGBlock& B = **I; 2349 if (!live[B.getBlockID()]) 2350 continue; 2351 if (B.size() == 0) { 2352 if (B.getTerminator() && isa<CXXTryStmt>(B.getTerminator())) { 2353 HasAbnormalEdge = true; 2354 continue; 2355 } 2356 2357 // A labeled empty statement, or the entry block... 2358 HasPlainEdge = true; 2359 continue; 2360 } 2361 Stmt *S = B[B.size()-1]; 2362 if (isa<ReturnStmt>(S)) { 2363 HasLiveReturn = true; 2364 continue; 2365 } 2366 if (isa<ObjCAtThrowStmt>(S)) { 2367 HasFakeEdge = true; 2368 continue; 2369 } 2370 if (isa<CXXThrowExpr>(S)) { 2371 HasFakeEdge = true; 2372 continue; 2373 } 2374 if (const AsmStmt *AS = dyn_cast<AsmStmt>(S)) { 2375 if (AS->isMSAsm()) { 2376 HasFakeEdge = true; 2377 HasLiveReturn = true; 2378 continue; 2379 } 2380 } 2381 if (isa<CXXTryStmt>(S)) { 2382 HasAbnormalEdge = true; 2383 continue; 2384 } 2385 2386 bool NoReturnEdge = false; 2387 if (CallExpr *C = dyn_cast<CallExpr>(S)) { 2388 if (B.succ_begin()[0] != &cfg->getExit()) { 2389 HasAbnormalEdge = true; 2390 continue; 2391 } 2392 Expr *CEE = C->getCallee()->IgnoreParenCasts(); 2393 if (CEE->getType().getNoReturnAttr()) { 2394 NoReturnEdge = true; 2395 HasFakeEdge = true; 2396 } else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CEE)) { 2397 ValueDecl *VD = DRE->getDecl(); 2398 if (VD->hasAttr<NoReturnAttr>()) { 2399 NoReturnEdge = true; 2400 HasFakeEdge = true; 2401 } 2402 } 2403 } 2404 // FIXME: Add noreturn message sends. 2405 if (NoReturnEdge == false) 2406 HasPlainEdge = true; 2407 } 2408 if (!HasPlainEdge) { 2409 if (HasLiveReturn) 2410 return NeverFallThrough; 2411 return NeverFallThroughOrReturn; 2412 } 2413 if (HasAbnormalEdge || HasFakeEdge || HasLiveReturn) 2414 return MaybeFallThrough; 2415 // This says AlwaysFallThrough for calls to functions that are not marked 2416 // noreturn, that don't return. If people would like this warning to be more 2417 // accurate, such functions should be marked as noreturn. 2418 return AlwaysFallThrough; 2419} 2420 2421/// CheckFallThroughForFunctionDef - Check that we don't fall off the end of a 2422/// function that should return a value. Check that we don't fall off the end 2423/// of a noreturn function. We assume that functions and blocks not marked 2424/// noreturn will return. 2425void Sema::CheckFallThroughForFunctionDef(Decl *D, Stmt *Body, 2426 AnalysisContext &AC) { 2427 // FIXME: Would be nice if we had a better way to control cascading errors, 2428 // but for now, avoid them. The problem is that when Parse sees: 2429 // int foo() { return a; } 2430 // The return is eaten and the Sema code sees just: 2431 // int foo() { } 2432 // which this code would then warn about. 2433 if (getDiagnostics().hasErrorOccurred()) 2434 return; 2435 2436 bool ReturnsVoid = false; 2437 bool HasNoReturn = false; 2438 if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 2439 // If the result type of the function is a dependent type, we don't know 2440 // whether it will be void or not, so don't 2441 if (FD->getResultType()->isDependentType()) 2442 return; 2443 if (FD->getResultType()->isVoidType()) 2444 ReturnsVoid = true; 2445 if (FD->hasAttr<NoReturnAttr>()) 2446 HasNoReturn = true; 2447 } else if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) { 2448 if (MD->getResultType()->isVoidType()) 2449 ReturnsVoid = true; 2450 if (MD->hasAttr<NoReturnAttr>()) 2451 HasNoReturn = true; 2452 } 2453 2454 // Short circuit for compilation speed. 2455 if ((Diags.getDiagnosticLevel(diag::warn_maybe_falloff_nonvoid_function) 2456 == Diagnostic::Ignored || ReturnsVoid) 2457 && (Diags.getDiagnosticLevel(diag::warn_noreturn_function_has_return_expr) 2458 == Diagnostic::Ignored || !HasNoReturn) 2459 && (Diags.getDiagnosticLevel(diag::warn_suggest_noreturn_block) 2460 == Diagnostic::Ignored || !ReturnsVoid)) 2461 return; 2462 // FIXME: Function try block 2463 if (CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body)) { 2464 switch (CheckFallThrough(AC)) { 2465 case MaybeFallThrough: 2466 if (HasNoReturn) 2467 Diag(Compound->getRBracLoc(), diag::warn_falloff_noreturn_function); 2468 else if (!ReturnsVoid) 2469 Diag(Compound->getRBracLoc(),diag::warn_maybe_falloff_nonvoid_function); 2470 break; 2471 case AlwaysFallThrough: 2472 if (HasNoReturn) 2473 Diag(Compound->getRBracLoc(), diag::warn_falloff_noreturn_function); 2474 else if (!ReturnsVoid) 2475 Diag(Compound->getRBracLoc(), diag::warn_falloff_nonvoid_function); 2476 break; 2477 case NeverFallThroughOrReturn: 2478 if (ReturnsVoid && !HasNoReturn) 2479 Diag(Compound->getLBracLoc(), diag::warn_suggest_noreturn_function); 2480 break; 2481 case NeverFallThrough: 2482 break; 2483 } 2484 } 2485} 2486 2487/// CheckFallThroughForBlock - Check that we don't fall off the end of a block 2488/// that should return a value. Check that we don't fall off the end of a 2489/// noreturn block. We assume that functions and blocks not marked noreturn 2490/// will return. 2491void Sema::CheckFallThroughForBlock(QualType BlockTy, Stmt *Body, 2492 AnalysisContext &AC) { 2493 // FIXME: Would be nice if we had a better way to control cascading errors, 2494 // but for now, avoid them. The problem is that when Parse sees: 2495 // int foo() { return a; } 2496 // The return is eaten and the Sema code sees just: 2497 // int foo() { } 2498 // which this code would then warn about. 2499 if (getDiagnostics().hasErrorOccurred()) 2500 return; 2501 bool ReturnsVoid = false; 2502 bool HasNoReturn = false; 2503 if (const FunctionType *FT =BlockTy->getPointeeType()->getAs<FunctionType>()){ 2504 if (FT->getResultType()->isVoidType()) 2505 ReturnsVoid = true; 2506 if (FT->getNoReturnAttr()) 2507 HasNoReturn = true; 2508 } 2509 2510 // Short circuit for compilation speed. 2511 if (ReturnsVoid 2512 && !HasNoReturn 2513 && (Diags.getDiagnosticLevel(diag::warn_suggest_noreturn_block) 2514 == Diagnostic::Ignored || !ReturnsVoid)) 2515 return; 2516 // FIXME: Funtion try block 2517 if (CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body)) { 2518 switch (CheckFallThrough(AC)) { 2519 case MaybeFallThrough: 2520 if (HasNoReturn) 2521 Diag(Compound->getRBracLoc(), diag::err_noreturn_block_has_return_expr); 2522 else if (!ReturnsVoid) 2523 Diag(Compound->getRBracLoc(), diag::err_maybe_falloff_nonvoid_block); 2524 break; 2525 case AlwaysFallThrough: 2526 if (HasNoReturn) 2527 Diag(Compound->getRBracLoc(), diag::err_noreturn_block_has_return_expr); 2528 else if (!ReturnsVoid) 2529 Diag(Compound->getRBracLoc(), diag::err_falloff_nonvoid_block); 2530 break; 2531 case NeverFallThroughOrReturn: 2532 if (ReturnsVoid) 2533 Diag(Compound->getLBracLoc(), diag::warn_suggest_noreturn_block); 2534 break; 2535 case NeverFallThrough: 2536 break; 2537 } 2538 } 2539} 2540 2541/// CheckParmsForFunctionDef - Check that the parameters of the given 2542/// function are appropriate for the definition of a function. This 2543/// takes care of any checks that cannot be performed on the 2544/// declaration itself, e.g., that the types of each of the function 2545/// parameters are complete. 2546bool Sema::CheckParmsForFunctionDef(FunctionDecl *FD) { 2547 bool HasInvalidParm = false; 2548 for (unsigned p = 0, NumParams = FD->getNumParams(); p < NumParams; ++p) { 2549 ParmVarDecl *Param = FD->getParamDecl(p); 2550 2551 // C99 6.7.5.3p4: the parameters in a parameter type list in a 2552 // function declarator that is part of a function definition of 2553 // that function shall not have incomplete type. 2554 // 2555 // This is also C++ [dcl.fct]p6. 2556 if (!Param->isInvalidDecl() && 2557 RequireCompleteType(Param->getLocation(), Param->getType(), 2558 diag::err_typecheck_decl_incomplete_type)) { 2559 Param->setInvalidDecl(); 2560 HasInvalidParm = true; 2561 } 2562 2563 // C99 6.9.1p5: If the declarator includes a parameter type list, the 2564 // declaration of each parameter shall include an identifier. 2565 if (Param->getIdentifier() == 0 && 2566 !Param->isImplicit() && 2567 !getLangOptions().CPlusPlus) 2568 Diag(Param->getLocation(), diag::err_parameter_name_omitted); 2569 } 2570 2571 return HasInvalidParm; 2572} 2573