CGExprConstant.cpp revision aaed50dfb52d358a407dbbdd1ca323f9328e957a
1//===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This contains code to emit Constant Expr nodes as LLVM code. 11// 12//===----------------------------------------------------------------------===// 13 14#include "CodeGenFunction.h" 15#include "CodeGenModule.h" 16#include "CGObjCRuntime.h" 17#include "clang/AST/APValue.h" 18#include "clang/AST/ASTContext.h" 19#include "clang/AST/RecordLayout.h" 20#include "clang/AST/StmtVisitor.h" 21#include "clang/Basic/Builtins.h" 22#include "llvm/Constants.h" 23#include "llvm/Function.h" 24#include "llvm/GlobalVariable.h" 25#include "llvm/Support/Compiler.h" 26#include "llvm/Target/TargetData.h" 27using namespace clang; 28using namespace CodeGen; 29 30namespace { 31 32class VISIBILITY_HIDDEN ConstStructBuilder { 33 CodeGenModule &CGM; 34 CodeGenFunction *CGF; 35 36 bool Packed; 37 38 unsigned NextFieldOffsetInBytes; 39 40 std::vector<llvm::Constant *> Elements; 41 42 ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF) 43 : CGM(CGM), CGF(CGF), Packed(false), NextFieldOffsetInBytes(0) { } 44 45 bool AppendField(const FieldDecl *Field, uint64_t FieldOffset, 46 const Expr *InitExpr) { 47 uint64_t FieldOffsetInBytes = FieldOffset / 8; 48 49 assert(NextFieldOffsetInBytes <= FieldOffsetInBytes 50 && "Field offset mismatch!"); 51 52 // Emit the field. 53 llvm::Constant *C = CGM.EmitConstantExpr(InitExpr, Field->getType(), CGF); 54 if (!C) 55 return false; 56 57 unsigned FieldAlignment = getAlignment(C); 58 59 // Round up the field offset to the alignment of the field type. 60 uint64_t AlignedNextFieldOffsetInBytes = 61 llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment); 62 63 if (AlignedNextFieldOffsetInBytes > FieldOffsetInBytes) { 64 std::vector<llvm::Constant *> PackedElements; 65 66 assert(!Packed && "Alignment is wrong even with a packed struct!"); 67 68 // Convert the struct to a packed struct. 69 uint64_t ElementOffsetInBytes = 0; 70 71 for (unsigned i = 0, e = Elements.size(); i != e; ++i) { 72 llvm::Constant *C = Elements[i]; 73 74 unsigned ElementAlign = 75 CGM.getTargetData().getABITypeAlignment(C->getType()); 76 uint64_t AlignedElementOffsetInBytes = 77 llvm::RoundUpToAlignment(ElementOffsetInBytes, ElementAlign); 78 79 if (AlignedElementOffsetInBytes > ElementOffsetInBytes) { 80 // We need some padding. 81 uint64_t NumBytes = 82 AlignedElementOffsetInBytes - ElementOffsetInBytes; 83 84 const llvm::Type *Ty = llvm::Type::Int8Ty; 85 if (NumBytes > 1) 86 Ty = llvm::ArrayType::get(Ty, NumBytes); 87 88 llvm::Constant *Padding = llvm::Constant::getNullValue(Ty); 89 PackedElements.push_back(Padding); 90 ElementOffsetInBytes += getSizeInBytes(Padding); 91 } 92 93 PackedElements.push_back(C); 94 ElementOffsetInBytes += getSizeInBytes(C); 95 } 96 97 assert(ElementOffsetInBytes == NextFieldOffsetInBytes && 98 "Packing the struct changed its size!"); 99 100 Elements = PackedElements; 101 Packed = true; 102 AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes; 103 } 104 105 if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) { 106 // We need to append padding. 107 AppendPadding(FieldOffsetInBytes - NextFieldOffsetInBytes); 108 109 assert(NextFieldOffsetInBytes == FieldOffsetInBytes && 110 "Did not add enough padding!"); 111 112 AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes; 113 } 114 115 // Add the field. 116 Elements.push_back(C); 117 NextFieldOffsetInBytes = AlignedNextFieldOffsetInBytes + getSizeInBytes(C); 118 119 return true; 120 } 121 122 bool AppendBitField(const FieldDecl *Field, uint64_t FieldOffset, 123 const Expr *InitExpr) { 124 llvm::ConstantInt *CI = 125 cast_or_null<llvm::ConstantInt>(CGM.EmitConstantExpr(InitExpr, 126 Field->getType(), 127 CGF)); 128 // FIXME: Can this ever happen? 129 if (!CI) 130 return false; 131 132 if (FieldOffset > NextFieldOffsetInBytes * 8) { 133 // We need to add padding. 134 uint64_t NumBytes = 135 llvm::RoundUpToAlignment(FieldOffset - 136 NextFieldOffsetInBytes * 8, 8) / 8; 137 138 AppendPadding(NumBytes); 139 } 140 141 uint64_t FieldSize = 142 Field->getBitWidth()->EvaluateAsInt(CGM.getContext()).getZExtValue(); 143 144 llvm::APInt FieldValue = CI->getValue(); 145 146 // Promote the size of FieldValue if necessary 147 // FIXME: This should never occur, but currently it can because initializer 148 // constants are cast to bool, and because clang is not enforcing bitfield 149 // width limits. 150 if (FieldSize > FieldValue.getBitWidth()) 151 FieldValue.zext(FieldSize); 152 153 // Truncate the size of FieldValue to the bit field size. 154 if (FieldSize < FieldValue.getBitWidth()) 155 FieldValue.trunc(FieldSize); 156 157 if (FieldOffset < NextFieldOffsetInBytes * 8) { 158 // Either part of the field or the entire field can go into the previous 159 // byte. 160 assert(!Elements.empty() && "Elements can't be empty!"); 161 162 unsigned BitsInPreviousByte = 163 NextFieldOffsetInBytes * 8 - FieldOffset; 164 165 bool FitsCompletelyInPreviousByte = 166 BitsInPreviousByte >= FieldValue.getBitWidth(); 167 168 llvm::APInt Tmp = FieldValue; 169 170 if (!FitsCompletelyInPreviousByte) { 171 unsigned NewFieldWidth = FieldSize - BitsInPreviousByte; 172 173 if (CGM.getTargetData().isBigEndian()) { 174 Tmp = Tmp.lshr(NewFieldWidth); 175 Tmp.trunc(BitsInPreviousByte); 176 177 // We want the remaining high bits. 178 FieldValue.trunc(NewFieldWidth); 179 } else { 180 Tmp.trunc(BitsInPreviousByte); 181 182 // We want the remaining low bits. 183 FieldValue = FieldValue.lshr(BitsInPreviousByte); 184 FieldValue.trunc(NewFieldWidth); 185 } 186 } 187 188 Tmp.zext(8); 189 if (CGM.getTargetData().isBigEndian()) { 190 if (FitsCompletelyInPreviousByte) 191 Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth()); 192 } else { 193 Tmp = Tmp.shl(8 - BitsInPreviousByte); 194 } 195 196 // Or in the bits that go into the previous byte. 197 Tmp |= cast<llvm::ConstantInt>(Elements.back())->getValue(); 198 Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp); 199 200 if (FitsCompletelyInPreviousByte) 201 return true; 202 } 203 204 while (FieldValue.getBitWidth() > 8) { 205 llvm::APInt Tmp; 206 207 if (CGM.getTargetData().isBigEndian()) { 208 // We want the high bits. 209 Tmp = FieldValue; 210 Tmp = Tmp.lshr(Tmp.getBitWidth() - 8); 211 Tmp.trunc(8); 212 } else { 213 // We want the low bits. 214 Tmp = FieldValue; 215 Tmp.trunc(8); 216 217 FieldValue = FieldValue.lshr(8); 218 } 219 220 Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp)); 221 NextFieldOffsetInBytes++; 222 223 FieldValue.trunc(FieldValue.getBitWidth() - 8); 224 } 225 226 assert(FieldValue.getBitWidth() > 0 && 227 "Should have at least one bit left!"); 228 assert(FieldValue.getBitWidth() <= 8 && 229 "Should not have more than a byte left!"); 230 231 if (FieldValue.getBitWidth() < 8) { 232 if (CGM.getTargetData().isBigEndian()) { 233 unsigned BitWidth = FieldValue.getBitWidth(); 234 235 FieldValue.zext(8); 236 FieldValue = FieldValue << (8 - BitWidth); 237 } else 238 FieldValue.zext(8); 239 } 240 241 // Append the last element. 242 Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), 243 FieldValue)); 244 NextFieldOffsetInBytes++; 245 return true; 246 } 247 248 void AppendPadding(uint64_t NumBytes) { 249 if (!NumBytes) 250 return; 251 252 const llvm::Type *Ty = llvm::Type::Int8Ty; 253 if (NumBytes > 1) 254 Ty = llvm::ArrayType::get(Ty, NumBytes); 255 256 llvm::Constant *C = llvm::Constant::getNullValue(Ty); 257 Elements.push_back(C); 258 assert(getAlignment(C) == 1 && "Padding must have 1 byte alignment!"); 259 260 NextFieldOffsetInBytes += getSizeInBytes(C); 261 } 262 263 void AppendTailPadding(uint64_t RecordSize) { 264 assert(RecordSize % 8 == 0 && "Invalid record size!"); 265 266 uint64_t RecordSizeInBytes = RecordSize / 8; 267 assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!"); 268 269 unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes; 270 AppendPadding(NumPadBytes); 271 } 272 273 bool Build(const InitListExpr *ILE) { 274 RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl(); 275 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); 276 277 unsigned FieldNo = 0; 278 unsigned ElementNo = 0; 279 for (RecordDecl::field_iterator Field = RD->field_begin(), 280 FieldEnd = RD->field_end(); 281 ElementNo < ILE->getNumInits() && Field != FieldEnd; 282 ++Field, ++FieldNo) { 283 if (Field->isBitField()) { 284 if (!Field->getIdentifier()) 285 continue; 286 287 if (!AppendBitField(*Field, Layout.getFieldOffset(FieldNo), 288 ILE->getInit(ElementNo))) 289 return false; 290 } else { 291 if (!AppendField(*Field, Layout.getFieldOffset(FieldNo), 292 ILE->getInit(ElementNo))) 293 return false; 294 } 295 296 ElementNo++; 297 } 298 299 uint64_t LayoutSizeInBytes = Layout.getSize() / 8; 300 301 if (NextFieldOffsetInBytes > LayoutSizeInBytes) { 302 // If the struct is bigger than the size of the record type, 303 // we must have a flexible array member at the end. 304 assert(RD->hasFlexibleArrayMember() && 305 "Must have flexible array member if struct is bigger than type!"); 306 307 // No tail padding is necessary. 308 return true; 309 } 310 311 // Append tail padding if necessary. 312 AppendTailPadding(Layout.getSize()); 313 314 assert(Layout.getSize() / 8 == NextFieldOffsetInBytes && 315 "Tail padding mismatch!"); 316 317 return true; 318 } 319 320 unsigned getAlignment(const llvm::Constant *C) const { 321 if (Packed) 322 return 1; 323 324 return CGM.getTargetData().getABITypeAlignment(C->getType()); 325 } 326 327 uint64_t getSizeInBytes(const llvm::Constant *C) const { 328 return CGM.getTargetData().getTypeAllocSize(C->getType()); 329 } 330 331public: 332 static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF, 333 const InitListExpr *ILE) { 334 ConstStructBuilder Builder(CGM, CGF); 335 336 if (!Builder.Build(ILE)) 337 return 0; 338 339 llvm::Constant *Result = 340 llvm::ConstantStruct::get(Builder.Elements, Builder.Packed); 341 342 assert(llvm::RoundUpToAlignment(Builder.NextFieldOffsetInBytes, 343 Builder.getAlignment(Result)) == 344 Builder.getSizeInBytes(Result) && "Size mismatch!"); 345 346 return Result; 347 } 348}; 349 350class VISIBILITY_HIDDEN ConstExprEmitter : 351 public StmtVisitor<ConstExprEmitter, llvm::Constant*> { 352 CodeGenModule &CGM; 353 CodeGenFunction *CGF; 354 llvm::LLVMContext &VMContext; 355public: 356 ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf) 357 : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) { 358 } 359 360 //===--------------------------------------------------------------------===// 361 // Visitor Methods 362 //===--------------------------------------------------------------------===// 363 364 llvm::Constant *VisitStmt(Stmt *S) { 365 return 0; 366 } 367 368 llvm::Constant *VisitParenExpr(ParenExpr *PE) { 369 return Visit(PE->getSubExpr()); 370 } 371 372 llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { 373 return Visit(E->getInitializer()); 374 } 375 376 llvm::Constant *VisitCastExpr(CastExpr* E) { 377 // GCC cast to union extension 378 if (E->getType()->isUnionType()) { 379 const llvm::Type *Ty = ConvertType(E->getType()); 380 Expr *SubExpr = E->getSubExpr(); 381 382 llvm::Constant *C = 383 CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF); 384 if (!C) 385 return 0; 386 387 // Build a struct with the union sub-element as the first member, 388 // and padded to the appropriate size 389 std::vector<llvm::Constant*> Elts; 390 std::vector<const llvm::Type*> Types; 391 Elts.push_back(C); 392 Types.push_back(C->getType()); 393 unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType()); 394 unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(Ty); 395 396 assert(CurSize <= TotalSize && "Union size mismatch!"); 397 if (unsigned NumPadBytes = TotalSize - CurSize) { 398 const llvm::Type *Ty = llvm::Type::Int8Ty; 399 if (NumPadBytes > 1) 400 Ty = llvm::ArrayType::get(Ty, NumPadBytes); 401 402 Elts.push_back(llvm::Constant::getNullValue(Ty)); 403 Types.push_back(Ty); 404 } 405 406 llvm::StructType* STy = llvm::StructType::get(Types, false); 407 return llvm::ConstantStruct::get(STy, Elts); 408 } 409 410 // Explicit and implicit no-op casts 411 QualType Ty = E->getType(), SubTy = E->getSubExpr()->getType(); 412 if (CGM.getContext().hasSameUnqualifiedType(Ty, SubTy)) { 413 return Visit(E->getSubExpr()); 414 } 415 return 0; 416 } 417 418 llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { 419 return Visit(DAE->getExpr()); 420 } 421 422 llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) { 423 std::vector<llvm::Constant*> Elts; 424 const llvm::ArrayType *AType = 425 cast<llvm::ArrayType>(ConvertType(ILE->getType())); 426 unsigned NumInitElements = ILE->getNumInits(); 427 // FIXME: Check for wide strings 428 // FIXME: Check for NumInitElements exactly equal to 1?? 429 if (NumInitElements > 0 && 430 (isa<StringLiteral>(ILE->getInit(0)) || 431 isa<ObjCEncodeExpr>(ILE->getInit(0))) && 432 ILE->getType()->getArrayElementTypeNoTypeQual()->isCharType()) 433 return Visit(ILE->getInit(0)); 434 const llvm::Type *ElemTy = AType->getElementType(); 435 unsigned NumElements = AType->getNumElements(); 436 437 // Initialising an array requires us to automatically 438 // initialise any elements that have not been initialised explicitly 439 unsigned NumInitableElts = std::min(NumInitElements, NumElements); 440 441 // Copy initializer elements. 442 unsigned i = 0; 443 bool RewriteType = false; 444 for (; i < NumInitableElts; ++i) { 445 Expr *Init = ILE->getInit(i); 446 llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF); 447 if (!C) 448 return 0; 449 RewriteType |= (C->getType() != ElemTy); 450 Elts.push_back(C); 451 } 452 453 // Initialize remaining array elements. 454 // FIXME: This doesn't handle member pointers correctly! 455 for (; i < NumElements; ++i) 456 Elts.push_back(llvm::Constant::getNullValue(ElemTy)); 457 458 if (RewriteType) { 459 // FIXME: Try to avoid packing the array 460 std::vector<const llvm::Type*> Types; 461 for (unsigned i = 0; i < Elts.size(); ++i) 462 Types.push_back(Elts[i]->getType()); 463 const llvm::StructType *SType = llvm::StructType::get(Types, true); 464 return llvm::ConstantStruct::get(SType, Elts); 465 } 466 467 return llvm::ConstantArray::get(AType, Elts); 468 } 469 470 void InsertBitfieldIntoStruct(std::vector<llvm::Constant*>& Elts, 471 FieldDecl* Field, Expr* E) { 472 // Calculate the value to insert 473 llvm::Constant *C = CGM.EmitConstantExpr(E, Field->getType(), CGF); 474 if (!C) 475 return; 476 477 llvm::ConstantInt *CI = dyn_cast<llvm::ConstantInt>(C); 478 if (!CI) { 479 CGM.ErrorUnsupported(E, "bitfield initialization"); 480 return; 481 } 482 llvm::APInt V = CI->getValue(); 483 484 // Calculate information about the relevant field 485 const llvm::Type* Ty = CI->getType(); 486 const llvm::TargetData &TD = CGM.getTypes().getTargetData(); 487 unsigned size = TD.getTypeAllocSizeInBits(Ty); 488 CodeGenTypes::BitFieldInfo Info = CGM.getTypes().getBitFieldInfo(Field); 489 unsigned FieldOffset = Info.FieldNo * size; 490 491 FieldOffset += Info.Start; 492 493 // Find where to start the insertion 494 // FIXME: This is O(n^2) in the number of bit-fields! 495 // FIXME: This won't work if the struct isn't completely packed! 496 unsigned offset = 0, i = 0; 497 while (offset < (FieldOffset & -8)) 498 offset += TD.getTypeAllocSizeInBits(Elts[i++]->getType()); 499 500 // Advance over 0 sized elements (must terminate in bounds since 501 // the bitfield must have a size). 502 while (TD.getTypeAllocSizeInBits(Elts[i]->getType()) == 0) 503 ++i; 504 505 // Promote the size of V if necessary 506 // FIXME: This should never occur, but currently it can because initializer 507 // constants are cast to bool, and because clang is not enforcing bitfield 508 // width limits. 509 if (Info.Size > V.getBitWidth()) 510 V.zext(Info.Size); 511 512 // Insert the bits into the struct 513 // FIXME: This algorthm is only correct on X86! 514 // FIXME: THis algorthm assumes bit-fields only have byte-size elements! 515 unsigned bitsToInsert = Info.Size; 516 unsigned curBits = std::min(8 - (FieldOffset & 7), bitsToInsert); 517 unsigned byte = V.getLoBits(curBits).getZExtValue() << (FieldOffset & 7); 518 do { 519 llvm::Constant* byteC = 520 llvm::ConstantInt::get(llvm::Type::Int8Ty, byte); 521 Elts[i] = llvm::ConstantExpr::getOr(Elts[i], byteC); 522 ++i; 523 V = V.lshr(curBits); 524 bitsToInsert -= curBits; 525 526 if (!bitsToInsert) 527 break; 528 529 curBits = bitsToInsert > 8 ? 8 : bitsToInsert; 530 byte = V.getLoBits(curBits).getZExtValue(); 531 } while (true); 532 } 533 534 llvm::Constant *EmitStructInitialization(InitListExpr *ILE) { 535 return ConstStructBuilder::BuildStruct(CGM, CGF, ILE); 536 537 // FIXME: Remove the old struct builder once we're sure that the new one 538 // works well enough! 539 const llvm::StructType *SType = 540 cast<llvm::StructType>(ConvertType(ILE->getType())); 541 RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl(); 542 std::vector<llvm::Constant*> Elts; 543 544 // Initialize the whole structure to zero. 545 // FIXME: This doesn't handle member pointers correctly! 546 for (unsigned i = 0; i < SType->getNumElements(); ++i) { 547 const llvm::Type *FieldTy = SType->getElementType(i); 548 Elts.push_back(llvm::Constant::getNullValue(FieldTy)); 549 } 550 551 // Copy initializer elements. Skip padding fields. 552 unsigned EltNo = 0; // Element no in ILE 553 bool RewriteType = false; 554 for (RecordDecl::field_iterator Field = RD->field_begin(), 555 FieldEnd = RD->field_end(); 556 EltNo < ILE->getNumInits() && Field != FieldEnd; ++Field) { 557 if (Field->isBitField()) { 558 if (!Field->getIdentifier()) 559 continue; 560 InsertBitfieldIntoStruct(Elts, *Field, ILE->getInit(EltNo)); 561 } else { 562 unsigned FieldNo = CGM.getTypes().getLLVMFieldNo(*Field); 563 llvm::Constant *C = CGM.EmitConstantExpr(ILE->getInit(EltNo), 564 Field->getType(), CGF); 565 if (!C) return 0; 566 RewriteType |= (C->getType() != Elts[FieldNo]->getType()); 567 Elts[FieldNo] = C; 568 } 569 EltNo++; 570 } 571 572 if (RewriteType) { 573 // FIXME: Make this work for non-packed structs 574 assert(SType->isPacked() && "Cannot recreate unpacked structs"); 575 std::vector<const llvm::Type*> Types; 576 for (unsigned i = 0; i < Elts.size(); ++i) 577 Types.push_back(Elts[i]->getType()); 578 SType = llvm::StructType::get(Types, true); 579 } 580 581 return llvm::ConstantStruct::get(SType, Elts); 582 } 583 584 llvm::Constant *EmitUnion(llvm::Constant *C, const llvm::Type *Ty) { 585 if (!C) 586 return 0; 587 588 // Build a struct with the union sub-element as the first member, 589 // and padded to the appropriate size 590 std::vector<llvm::Constant*> Elts; 591 std::vector<const llvm::Type*> Types; 592 Elts.push_back(C); 593 Types.push_back(C->getType()); 594 unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType()); 595 unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(Ty); 596 597 assert(CurSize <= TotalSize && "Union size mismatch!"); 598 if (unsigned NumPadBytes = TotalSize - CurSize) { 599 const llvm::Type *Ty = llvm::Type::Int8Ty; 600 if (NumPadBytes > 1) 601 Ty = llvm::ArrayType::get(Ty, NumPadBytes); 602 603 Elts.push_back(llvm::Constant::getNullValue(Ty)); 604 Types.push_back(Ty); 605 } 606 607 llvm::StructType* STy = llvm::StructType::get(Types, false); 608 return llvm::ConstantStruct::get(STy, Elts); 609 } 610 611 llvm::Constant *EmitUnionInitialization(InitListExpr *ILE) { 612 return ConstStructBuilder::BuildStruct(CGM, CGF, ILE); 613 614 const llvm::Type *Ty = ConvertType(ILE->getType()); 615 616 FieldDecl* curField = ILE->getInitializedFieldInUnion(); 617 if (!curField) { 618 // There's no field to initialize, so value-initialize the union. 619#ifndef NDEBUG 620 // Make sure that it's really an empty and not a failure of 621 // semantic analysis. 622 RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl(); 623 for (RecordDecl::field_iterator Field = RD->field_begin(), 624 FieldEnd = RD->field_end(); 625 Field != FieldEnd; ++Field) 626 assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed"); 627#endif 628 return llvm::Constant::getNullValue(Ty); 629 } 630 631 if (curField->isBitField()) { 632 // Create a dummy struct for bit-field insertion 633 unsigned NumElts = CGM.getTargetData().getTypeAllocSize(Ty); 634 llvm::Constant* NV = 635 llvm::Constant::getNullValue(llvm::Type::Int8Ty); 636 std::vector<llvm::Constant*> Elts(NumElts, NV); 637 638 InsertBitfieldIntoStruct(Elts, curField, ILE->getInit(0)); 639 const llvm::ArrayType *RetTy = 640 llvm::ArrayType::get(NV->getType(), NumElts); 641 return llvm::ConstantArray::get(RetTy, Elts); 642 } 643 644 llvm::Constant *InitElem; 645 if (ILE->getNumInits() > 0) { 646 Expr *Init = ILE->getInit(0); 647 InitElem = CGM.EmitConstantExpr(Init, Init->getType(), CGF); 648 } else { 649 InitElem = CGM.EmitNullConstant(curField->getType()); 650 } 651 return EmitUnion(InitElem, Ty); 652 } 653 654 llvm::Constant *EmitVectorInitialization(InitListExpr *ILE) { 655 const llvm::VectorType *VType = 656 cast<llvm::VectorType>(ConvertType(ILE->getType())); 657 const llvm::Type *ElemTy = VType->getElementType(); 658 std::vector<llvm::Constant*> Elts; 659 unsigned NumElements = VType->getNumElements(); 660 unsigned NumInitElements = ILE->getNumInits(); 661 662 unsigned NumInitableElts = std::min(NumInitElements, NumElements); 663 664 // Copy initializer elements. 665 unsigned i = 0; 666 for (; i < NumInitableElts; ++i) { 667 Expr *Init = ILE->getInit(i); 668 llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF); 669 if (!C) 670 return 0; 671 Elts.push_back(C); 672 } 673 674 for (; i < NumElements; ++i) 675 Elts.push_back(llvm::Constant::getNullValue(ElemTy)); 676 677 return llvm::ConstantVector::get(VType, Elts); 678 } 679 680 llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) { 681 return CGM.EmitNullConstant(E->getType()); 682 } 683 684 llvm::Constant *VisitInitListExpr(InitListExpr *ILE) { 685 if (ILE->getType()->isScalarType()) { 686 // We have a scalar in braces. Just use the first element. 687 if (ILE->getNumInits() > 0) { 688 Expr *Init = ILE->getInit(0); 689 return CGM.EmitConstantExpr(Init, Init->getType(), CGF); 690 } 691 return CGM.EmitNullConstant(ILE->getType()); 692 } 693 694 if (ILE->getType()->isArrayType()) 695 return EmitArrayInitialization(ILE); 696 697 if (ILE->getType()->isStructureType()) 698 return EmitStructInitialization(ILE); 699 700 if (ILE->getType()->isUnionType()) 701 return EmitUnionInitialization(ILE); 702 703 if (ILE->getType()->isVectorType()) 704 return EmitVectorInitialization(ILE); 705 706 assert(0 && "Unable to handle InitListExpr"); 707 // Get rid of control reaches end of void function warning. 708 // Not reached. 709 return 0; 710 } 711 712 llvm::Constant *VisitStringLiteral(StringLiteral *E) { 713 assert(!E->getType()->isPointerType() && "Strings are always arrays"); 714 715 // This must be a string initializing an array in a static initializer. 716 // Don't emit it as the address of the string, emit the string data itself 717 // as an inline array. 718 return llvm::ConstantArray::get(CGM.GetStringForStringLiteral(E), false); 719 } 720 721 llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) { 722 // This must be an @encode initializing an array in a static initializer. 723 // Don't emit it as the address of the string, emit the string data itself 724 // as an inline array. 725 std::string Str; 726 CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str); 727 const ConstantArrayType *CAT = cast<ConstantArrayType>(E->getType()); 728 729 // Resize the string to the right size, adding zeros at the end, or 730 // truncating as needed. 731 Str.resize(CAT->getSize().getZExtValue(), '\0'); 732 return llvm::ConstantArray::get(Str, false); 733 } 734 735 llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) { 736 return Visit(E->getSubExpr()); 737 } 738 739 // Utility methods 740 const llvm::Type *ConvertType(QualType T) { 741 return CGM.getTypes().ConvertType(T); 742 } 743 744public: 745 llvm::Constant *EmitLValue(Expr *E) { 746 switch (E->getStmtClass()) { 747 default: break; 748 case Expr::CompoundLiteralExprClass: { 749 // Note that due to the nature of compound literals, this is guaranteed 750 // to be the only use of the variable, so we just generate it here. 751 CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E); 752 llvm::Constant* C = Visit(CLE->getInitializer()); 753 // FIXME: "Leaked" on failure. 754 if (C) 755 C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), 756 E->getType().isConstQualified(), 757 llvm::GlobalValue::InternalLinkage, 758 C, ".compoundliteral"); 759 return C; 760 } 761 case Expr::DeclRefExprClass: 762 case Expr::QualifiedDeclRefExprClass: { 763 NamedDecl *Decl = cast<DeclRefExpr>(E)->getDecl(); 764 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl)) 765 return CGM.GetAddrOfFunction(GlobalDecl(FD)); 766 if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) { 767 // We can never refer to a variable with local storage. 768 if (!VD->hasLocalStorage()) { 769 if (VD->isFileVarDecl() || VD->hasExternalStorage()) 770 return CGM.GetAddrOfGlobalVar(VD); 771 else if (VD->isBlockVarDecl()) { 772 assert(CGF && "Can't access static local vars without CGF"); 773 return CGF->GetAddrOfStaticLocalVar(VD); 774 } 775 } 776 } 777 break; 778 } 779 case Expr::StringLiteralClass: 780 return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E)); 781 case Expr::ObjCEncodeExprClass: 782 return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E)); 783 case Expr::ObjCStringLiteralClass: { 784 ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E); 785 llvm::Constant *C = CGM.getObjCRuntime().GenerateConstantString(SL); 786 return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType())); 787 } 788 case Expr::PredefinedExprClass: { 789 // __func__/__FUNCTION__ -> "". __PRETTY_FUNCTION__ -> "top level". 790 std::string Str; 791 if (cast<PredefinedExpr>(E)->getIdentType() == 792 PredefinedExpr::PrettyFunction) 793 Str = "top level"; 794 795 return CGM.GetAddrOfConstantCString(Str, ".tmp"); 796 } 797 case Expr::AddrLabelExprClass: { 798 assert(CGF && "Invalid address of label expression outside function."); 799 unsigned id = CGF->GetIDForAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel()); 800 llvm::Constant *C = llvm::ConstantInt::get(llvm::Type::Int32Ty, id); 801 return llvm::ConstantExpr::getIntToPtr(C, ConvertType(E->getType())); 802 } 803 case Expr::CallExprClass: { 804 CallExpr* CE = cast<CallExpr>(E); 805 if (CE->isBuiltinCall(CGM.getContext()) != 806 Builtin::BI__builtin___CFStringMakeConstantString) 807 break; 808 const Expr *Arg = CE->getArg(0)->IgnoreParenCasts(); 809 const StringLiteral *Literal = cast<StringLiteral>(Arg); 810 // FIXME: need to deal with UCN conversion issues. 811 return CGM.GetAddrOfConstantCFString(Literal); 812 } 813 case Expr::BlockExprClass: { 814 std::string FunctionName; 815 if (CGF) 816 FunctionName = CGF->CurFn->getName(); 817 else 818 FunctionName = "global"; 819 820 return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str()); 821 } 822 } 823 824 return 0; 825 } 826}; 827 828} // end anonymous namespace. 829 830llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E, 831 QualType DestType, 832 CodeGenFunction *CGF) { 833 Expr::EvalResult Result; 834 835 bool Success = false; 836 837 if (DestType->isReferenceType()) 838 Success = E->EvaluateAsLValue(Result, Context); 839 else 840 Success = E->Evaluate(Result, Context); 841 842 if (Success) { 843 assert(!Result.HasSideEffects && 844 "Constant expr should not have any side effects!"); 845 switch (Result.Val.getKind()) { 846 case APValue::Uninitialized: 847 assert(0 && "Constant expressions should be initialized."); 848 return 0; 849 case APValue::LValue: { 850 const llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType); 851 llvm::Constant *Offset = 852 llvm::ConstantInt::get(llvm::Type::Int64Ty, 853 Result.Val.getLValueOffset()); 854 855 llvm::Constant *C; 856 if (const Expr *LVBase = Result.Val.getLValueBase()) { 857 C = ConstExprEmitter(*this, CGF).EmitLValue(const_cast<Expr*>(LVBase)); 858 859 // Apply offset if necessary. 860 if (!Offset->isNullValue()) { 861 const llvm::Type *Type = 862 llvm::PointerType::getUnqual(llvm::Type::Int8Ty); 863 llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Type); 864 Casted = llvm::ConstantExpr::getGetElementPtr(Casted, &Offset, 1); 865 C = llvm::ConstantExpr::getBitCast(Casted, C->getType()); 866 } 867 868 // Convert to the appropriate type; this could be an lvalue for 869 // an integer. 870 if (isa<llvm::PointerType>(DestTy)) 871 return llvm::ConstantExpr::getBitCast(C, DestTy); 872 873 return llvm::ConstantExpr::getPtrToInt(C, DestTy); 874 } else { 875 C = Offset; 876 877 // Convert to the appropriate type; this could be an lvalue for 878 // an integer. 879 if (isa<llvm::PointerType>(DestTy)) 880 return llvm::ConstantExpr::getIntToPtr(C, DestTy); 881 882 // If the types don't match this should only be a truncate. 883 if (C->getType() != DestTy) 884 return llvm::ConstantExpr::getTrunc(C, DestTy); 885 886 return C; 887 } 888 } 889 case APValue::Int: { 890 llvm::Constant *C = llvm::ConstantInt::get(VMContext, 891 Result.Val.getInt()); 892 893 if (C->getType() == llvm::Type::Int1Ty) { 894 const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType()); 895 C = llvm::ConstantExpr::getZExt(C, BoolTy); 896 } 897 return C; 898 } 899 case APValue::ComplexInt: { 900 llvm::Constant *Complex[2]; 901 902 Complex[0] = llvm::ConstantInt::get(VMContext, 903 Result.Val.getComplexIntReal()); 904 Complex[1] = llvm::ConstantInt::get(VMContext, 905 Result.Val.getComplexIntImag()); 906 907 return llvm::ConstantStruct::get(Complex, 2); 908 } 909 case APValue::Float: 910 return llvm::ConstantFP::get(VMContext, Result.Val.getFloat()); 911 case APValue::ComplexFloat: { 912 llvm::Constant *Complex[2]; 913 914 Complex[0] = llvm::ConstantFP::get(VMContext, 915 Result.Val.getComplexFloatReal()); 916 Complex[1] = llvm::ConstantFP::get(VMContext, 917 Result.Val.getComplexFloatImag()); 918 919 return llvm::ConstantStruct::get(Complex, 2); 920 } 921 case APValue::Vector: { 922 llvm::SmallVector<llvm::Constant *, 4> Inits; 923 unsigned NumElts = Result.Val.getVectorLength(); 924 925 for (unsigned i = 0; i != NumElts; ++i) { 926 APValue &Elt = Result.Val.getVectorElt(i); 927 if (Elt.isInt()) 928 Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt())); 929 else 930 Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat())); 931 } 932 return llvm::ConstantVector::get(&Inits[0], Inits.size()); 933 } 934 } 935 } 936 937 llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E)); 938 if (C && C->getType() == llvm::Type::Int1Ty) { 939 const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType()); 940 C = llvm::ConstantExpr::getZExt(C, BoolTy); 941 } 942 return C; 943} 944 945llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) { 946 // Always return an LLVM null constant for now; this will change when we 947 // get support for IRGen of member pointers. 948 return llvm::Constant::getNullValue(getTypes().ConvertType(T)); 949} 950