1 2/*---------------------------------------------------------------*/ 3/*--- begin host_mips_defs.c ---*/ 4/*---------------------------------------------------------------*/ 5 6/* 7 This file is part of Valgrind, a dynamic binary instrumentation 8 framework. 9 10 Copyright (C) 2010-2012 RT-RK 11 mips-valgrind@rt-rk.com 12 13 This program is free software; you can redistribute it and/or 14 modify it under the terms of the GNU General Public License as 15 published by the Free Software Foundation; either version 2 of the 16 License, or (at your option) any later version. 17 18 This program is distributed in the hope that it will be useful, but 19 WITHOUT ANY WARRANTY; without even the implied warranty of 20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 21 General Public License for more details. 22 23 You should have received a copy of the GNU General Public License 24 along with this program; if not, write to the Free Software 25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 26 02111-1307, USA. 27 28 The GNU General Public License is contained in the file COPYING. 29*/ 30 31#include "libvex_basictypes.h" 32#include "libvex.h" 33#include "libvex_trc_values.h" 34 35#include "main_util.h" 36#include "host_generic_regs.h" 37#include "host_mips_defs.h" 38 39/*---------------- Registers ----------------*/ 40 41void ppHRegMIPS(HReg reg, Bool mode64) 42{ 43 Int r; 44 static HChar *ireg32_names[35] 45 = { "$0", "$1", "$2", "$3", "$4", "$5", "$6", "$7", 46 "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", 47 "$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23", 48 "$24", "$25", "$26", "$27", "$28", "$29", "$30", "$31", 49 "%32", "%33", "%34", 50 }; 51 52 static HChar *freg32_names[32] 53 = { "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", 54 "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15", 55 "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23", 56 "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "f30", "$f31" 57 }; 58 59 static HChar *freg64_names[32] 60 = { "$d0", "$d1", "$d2", "$d3", "$d4", "$d5", "$d6", "$d7", 61 "$d8", "$d9", "$d10", "$d11", "$d12", "$d13", "$d14", "$d15", 62 }; 63 64 /* Be generic for all virtual regs. */ 65 if (hregIsVirtual(reg)) { 66 ppHReg(reg); 67 return; 68 } 69 70 /* But specific for real regs. */ 71 vassert(hregClass(reg) == HRcInt32 || hregClass(reg) == HRcInt64 || 72 hregClass(reg) == HRcFlt32 || hregClass(reg) == HRcFlt64); 73 74 /* But specific for real regs. */ 75 { 76 switch (hregClass(reg)) { 77 case HRcInt32: 78 r = hregNumber(reg); 79 vassert(r >= 0 && r < 32); 80 vex_printf("%s", ireg32_names[r]); 81 return; 82 case HRcFlt32: 83 r = hregNumber(reg); 84 vassert(r >= 0 && r < 32); 85 vex_printf("%s", freg32_names[r]); 86 return; 87 case HRcFlt64: 88 r = hregNumber(reg); 89 vassert(r >= 0 && r < 32); 90 vex_printf("%s", freg64_names[r]); 91 return; 92 default: 93 vpanic("ppHRegMIPS"); 94 break; 95 } 96 } 97 98 return; 99} 100 101#define MkHRegGPR(_n, _mode64) \ 102 mkHReg(_n, _mode64 ? HRcInt64 : HRcInt32, False) 103 104HReg hregMIPS_GPR0(Bool mode64) 105{ 106 return MkHRegGPR(0, mode64); 107} 108 109HReg hregMIPS_GPR1(Bool mode64) 110{ 111 return MkHRegGPR(1, mode64); 112} 113 114HReg hregMIPS_GPR2(Bool mode64) 115{ 116 return MkHRegGPR(2, mode64); 117} 118 119HReg hregMIPS_GPR3(Bool mode64) 120{ 121 return MkHRegGPR(3, mode64); 122} 123 124HReg hregMIPS_GPR4(Bool mode64) 125{ 126 return MkHRegGPR(4, mode64); 127} 128 129HReg hregMIPS_GPR5(Bool mode64) 130{ 131 return MkHRegGPR(5, mode64); 132} 133 134HReg hregMIPS_GPR6(Bool mode64) 135{ 136 return MkHRegGPR(6, mode64); 137} 138 139HReg hregMIPS_GPR7(Bool mode64) 140{ 141 return MkHRegGPR(7, mode64); 142} 143 144HReg hregMIPS_GPR8(Bool mode64) 145{ 146 return MkHRegGPR(8, mode64); 147} 148 149HReg hregMIPS_GPR9(Bool mode64) 150{ 151 return MkHRegGPR(9, mode64); 152} 153 154HReg hregMIPS_GPR10(Bool mode64) 155{ 156 return MkHRegGPR(10, mode64); 157} 158 159HReg hregMIPS_GPR11(Bool mode64) 160{ 161 return MkHRegGPR(11, mode64); 162} 163 164HReg hregMIPS_GPR12(Bool mode64) 165{ 166 return MkHRegGPR(12, mode64); 167} 168 169HReg hregMIPS_GPR13(Bool mode64) 170{ 171 return MkHRegGPR(13, mode64); 172} 173 174HReg hregMIPS_GPR14(Bool mode64) 175{ 176 return MkHRegGPR(14, mode64); 177} 178 179HReg hregMIPS_GPR15(Bool mode64) 180{ 181 return MkHRegGPR(15, mode64); 182} 183 184HReg hregMIPS_GPR16(Bool mode64) 185{ 186 return MkHRegGPR(16, mode64); 187} 188 189HReg hregMIPS_GPR17(Bool mode64) 190{ 191 return MkHRegGPR(17, mode64); 192} 193 194HReg hregMIPS_GPR18(Bool mode64) 195{ 196 return MkHRegGPR(18, mode64); 197} 198 199HReg hregMIPS_GPR19(Bool mode64) 200{ 201 return MkHRegGPR(19, mode64); 202} 203 204HReg hregMIPS_GPR20(Bool mode64) 205{ 206 return MkHRegGPR(20, mode64); 207} 208 209HReg hregMIPS_GPR21(Bool mode64) 210{ 211 return MkHRegGPR(21, mode64); 212} 213 214HReg hregMIPS_GPR22(Bool mode64) 215{ 216 return MkHRegGPR(22, mode64); 217} 218 219HReg hregMIPS_GPR23(Bool mode64) 220{ 221 return MkHRegGPR(23, mode64); 222} 223 224HReg hregMIPS_GPR24(Bool mode64) 225{ 226 return MkHRegGPR(24, mode64); 227} 228 229HReg hregMIPS_GPR25(Bool mode64) 230{ 231 return MkHRegGPR(25, mode64); 232} 233 234HReg hregMIPS_GPR26(Bool mode64) 235{ 236 return MkHRegGPR(26, mode64); 237} 238 239HReg hregMIPS_GPR27(Bool mode64) 240{ 241 return MkHRegGPR(27, mode64); 242} 243 244HReg hregMIPS_GPR28(Bool mode64) 245{ 246 return MkHRegGPR(28, mode64); 247} 248 249HReg hregMIPS_GPR29(Bool mode64) 250{ 251 return MkHRegGPR(29, mode64); 252} 253 254HReg hregMIPS_GPR30(Bool mode64) 255{ 256 return MkHRegGPR(30, mode64); 257} 258 259HReg hregMIPS_GPR31(Bool mode64) 260{ 261 return MkHRegGPR(31, mode64); 262} 263 264#define MkHRegFPR(_n, _mode64) \ 265 mkHReg(_n, _mode64 ? HRcFlt64 : HRcFlt32, False) 266 267HReg hregMIPS_F0(Bool mode64) 268{ 269 return MkHRegFPR(0, mode64); 270} 271 272HReg hregMIPS_F1(Bool mode64) 273{ 274 return MkHRegFPR(1, mode64); 275} 276 277HReg hregMIPS_F2(Bool mode64) 278{ 279 return MkHRegFPR(2, mode64); 280} 281 282HReg hregMIPS_F3(Bool mode64) 283{ 284 return MkHRegFPR(3, mode64); 285} 286 287HReg hregMIPS_F4(Bool mode64) 288{ 289 return MkHRegFPR(4, mode64); 290} 291 292HReg hregMIPS_F5(Bool mode64) 293{ 294 return MkHRegFPR(5, mode64); 295} 296 297HReg hregMIPS_F6(Bool mode64) 298{ 299 return MkHRegFPR(6, mode64); 300} 301 302HReg hregMIPS_F7(Bool mode64) 303{ 304 return MkHRegFPR(7, mode64); 305} 306 307HReg hregMIPS_F8(Bool mode64) 308{ 309 return MkHRegFPR(8, mode64); 310} 311 312HReg hregMIPS_F9(Bool mode64) 313{ 314 return MkHRegFPR(9, mode64); 315} 316 317HReg hregMIPS_F10(Bool mode64) 318{ 319 return MkHRegFPR(10, mode64); 320} 321 322HReg hregMIPS_F11(Bool mode64) 323{ 324 return MkHRegFPR(11, mode64); 325} 326 327HReg hregMIPS_F12(Bool mode64) 328{ 329 return MkHRegFPR(12, mode64); 330} 331 332HReg hregMIPS_F13(Bool mode64) 333{ 334 return MkHRegFPR(13, mode64); 335} 336 337HReg hregMIPS_F14(Bool mode64) 338{ 339 return MkHRegFPR(14, mode64); 340} 341 342HReg hregMIPS_F15(Bool mode64) 343{ 344 return MkHRegFPR(15, mode64); 345} 346 347HReg hregMIPS_F16(Bool mode64) 348{ 349 return MkHRegFPR(16, mode64); 350} 351 352HReg hregMIPS_F17(Bool mode64) 353{ 354 return MkHRegFPR(17, mode64); 355} 356 357HReg hregMIPS_F18(Bool mode64) 358{ 359 return MkHRegFPR(18, mode64); 360} 361 362HReg hregMIPS_F19(Bool mode64) 363{ 364 return MkHRegFPR(19, mode64); 365} 366 367HReg hregMIPS_F20(Bool mode64) 368{ 369 return MkHRegFPR(20, mode64); 370} 371 372HReg hregMIPS_F21(Bool mode64) 373{ 374 return MkHRegFPR(21, mode64); 375} 376 377HReg hregMIPS_F22(Bool mode64) 378{ 379 return MkHRegFPR(22, mode64); 380} 381 382HReg hregMIPS_F23(Bool mode64) 383{ 384 return MkHRegFPR(23, mode64); 385} 386 387HReg hregMIPS_F24(Bool mode64) 388{ 389 return MkHRegFPR(24, mode64); 390} 391 392HReg hregMIPS_F25(Bool mode64) 393{ 394 return MkHRegFPR(25, mode64); 395} 396 397HReg hregMIPS_F26(Bool mode64) 398{ 399 return MkHRegFPR(26, mode64); 400} 401 402HReg hregMIPS_F27(Bool mode64) 403{ 404 return MkHRegFPR(27, mode64); 405} 406 407HReg hregMIPS_F28(Bool mode64) 408{ 409 return MkHRegFPR(28, mode64); 410} 411 412HReg hregMIPS_F29(Bool mode64) 413{ 414 return MkHRegFPR(29, mode64); 415} 416 417HReg hregMIPS_F30(Bool mode64) 418{ 419 return MkHRegFPR(30, mode64); 420} 421 422HReg hregMIPS_F31(Bool mode64) 423{ 424 return MkHRegFPR(31, mode64); 425} 426 427HReg hregMIPS_PC(Bool mode64) 428{ 429 return mkHReg(32, mode64 ? HRcFlt64 : HRcFlt32, False); 430} 431 432HReg hregMIPS_HI(Bool mode64) 433{ 434 return mkHReg(33, mode64 ? HRcFlt64 : HRcFlt32, False); 435} 436 437HReg hregMIPS_LO(Bool mode64) 438{ 439 return mkHReg(34, mode64 ? HRcFlt64 : HRcFlt32, False); 440} 441 442HReg hregMIPS_D0(void) 443{ 444 return mkHReg(0, HRcFlt64, False); 445} 446 447HReg hregMIPS_D1(void) 448{ 449 return mkHReg(2, HRcFlt64, False); 450} 451 452HReg hregMIPS_D2(void) 453{ 454 return mkHReg(4, HRcFlt64, False); 455} 456 457HReg hregMIPS_D3(void) 458{ 459 return mkHReg(6, HRcFlt64, False); 460} 461 462HReg hregMIPS_D4(void) 463{ 464 return mkHReg(8, HRcFlt64, False); 465} 466 467HReg hregMIPS_D5(void) 468{ 469 return mkHReg(10, HRcFlt64, False); 470} 471 472HReg hregMIPS_D6(void) 473{ 474 return mkHReg(12, HRcFlt64, False); 475} 476 477HReg hregMIPS_D7(void) 478{ 479 return mkHReg(14, HRcFlt64, False); 480} 481 482HReg hregMIPS_D8(void) 483{ 484 return mkHReg(16, HRcFlt64, False); 485} 486 487HReg hregMIPS_D9(void) 488{ 489 return mkHReg(18, HRcFlt64, False); 490} 491 492HReg hregMIPS_D10(void) 493{ 494 return mkHReg(20, HRcFlt64, False); 495} 496 497HReg hregMIPS_D11(void) 498{ 499 return mkHReg(22, HRcFlt64, False); 500} 501 502HReg hregMIPS_D12(void) 503{ 504 return mkHReg(24, HRcFlt64, False); 505} 506 507HReg hregMIPS_D13(void) 508{ 509 return mkHReg(26, HRcFlt64, False); 510} 511 512HReg hregMIPS_D14(void) 513{ 514 return mkHReg(28, HRcFlt64, False); 515} 516 517HReg hregMIPS_D15(void) 518{ 519 return mkHReg(30, HRcFlt64, False); 520} 521 522HReg hregMIPS_FIR(void) 523{ 524 return mkHReg(35, HRcInt32, False); 525} 526 527HReg hregMIPS_FCCR(void) 528{ 529 return mkHReg(36, HRcInt32, False); 530} 531 532HReg hregMIPS_FEXR(void) 533{ 534 return mkHReg(37, HRcInt32, False); 535} 536 537HReg hregMIPS_FENR(void) 538{ 539 return mkHReg(38, HRcInt32, False); 540} 541 542HReg hregMIPS_FCSR(void) 543{ 544 return mkHReg(39, HRcInt32, False); 545} 546 547HReg hregMIPS_COND(void) 548{ 549 return mkHReg(47, HRcInt32, False); 550} 551 552void getAllocableRegs_MIPS(Int * nregs, HReg ** arr, Bool mode64) 553{ 554 if (mode64) 555 *nregs = 27; 556 else 557 *nregs = 34; 558 UInt i = 0; 559 *arr = LibVEX_Alloc(*nregs * sizeof(HReg)); 560 561 //ZERO = constant 0 562 //AT = assembler temporary 563 // callee saves ones are listed first, since we prefer them 564 // if they're available 565 (*arr)[i++] = hregMIPS_GPR16(mode64); 566 (*arr)[i++] = hregMIPS_GPR17(mode64); 567 (*arr)[i++] = hregMIPS_GPR18(mode64); 568 (*arr)[i++] = hregMIPS_GPR19(mode64); 569 (*arr)[i++] = hregMIPS_GPR20(mode64); 570 (*arr)[i++] = hregMIPS_GPR21(mode64); 571 (*arr)[i++] = hregMIPS_GPR22(mode64); 572 if (!mode64) 573 (*arr)[i++] = hregMIPS_GPR23(mode64); 574 575 // otherwise we'll have to slum it out with caller-saves ones 576 if (mode64) { 577 (*arr)[i++] = hregMIPS_GPR8(mode64); 578 (*arr)[i++] = hregMIPS_GPR9(mode64); 579 (*arr)[i++] = hregMIPS_GPR10(mode64); 580 (*arr)[i++] = hregMIPS_GPR11(mode64); 581 } 582 (*arr)[i++] = hregMIPS_GPR12(mode64); 583 (*arr)[i++] = hregMIPS_GPR13(mode64); 584 (*arr)[i++] = hregMIPS_GPR14(mode64); 585 (*arr)[i++] = hregMIPS_GPR15(mode64); 586 (*arr)[i++] = hregMIPS_GPR24(mode64); 587 /***********mips32********************/ 588 // t0 (=dispatch_ctr) 589 // t1 spill reg temp 590 // t2 (=guest_state) 591 // t3 (=PC = next guest address) 592 // K0 and K1 are reserved for OS kernel 593 // GP = global pointer 594 // SP = stack pointer 595 // FP = frame pointer 596 // RA = link register 597 // + PC, HI and LO 598 (*arr)[i++] = hregMIPS_F20(mode64); 599 (*arr)[i++] = hregMIPS_F21(mode64); 600 (*arr)[i++] = hregMIPS_F22(mode64); 601 (*arr)[i++] = hregMIPS_F23(mode64); 602 (*arr)[i++] = hregMIPS_F24(mode64); 603 (*arr)[i++] = hregMIPS_F25(mode64); 604 (*arr)[i++] = hregMIPS_F26(mode64); 605 (*arr)[i++] = hregMIPS_F27(mode64); 606 (*arr)[i++] = hregMIPS_F28(mode64); 607 (*arr)[i++] = hregMIPS_F29(mode64); 608 (*arr)[i++] = hregMIPS_F30(mode64); 609 if (!mode64) { 610 /* Fake double floating point */ 611 (*arr)[i++] = hregMIPS_D0(); 612 (*arr)[i++] = hregMIPS_D1(); 613 (*arr)[i++] = hregMIPS_D2(); 614 (*arr)[i++] = hregMIPS_D3(); 615 (*arr)[i++] = hregMIPS_D4(); 616 (*arr)[i++] = hregMIPS_D5(); 617 (*arr)[i++] = hregMIPS_D6(); 618 (*arr)[i++] = hregMIPS_D7(); 619 (*arr)[i++] = hregMIPS_D8(); 620 (*arr)[i++] = hregMIPS_D9(); 621 } 622 vassert(i == *nregs); 623 624} 625 626/*----------------- Condition Codes ----------------------*/ 627 628HChar *showMIPSCondCode(MIPSCondCode cond) 629{ 630 HChar* ret; 631 switch (cond) { 632 case MIPScc_EQ: 633 ret = "EQ"; /* equal */ 634 break; 635 case MIPScc_NE: 636 ret = "NEQ"; /* not equal */ 637 break; 638 case MIPScc_HS: 639 ret = "GE"; /* >=u (Greater Than or Equal) */ 640 break; 641 case MIPScc_LO: 642 ret = "LT"; /* <u (lower) */ 643 break; 644 case MIPScc_MI: 645 ret = "mi"; /* minus (negative) */ 646 break; 647 case MIPScc_PL: 648 ret = "pl"; /* plus (zero or +ve) */ 649 break; 650 case MIPScc_VS: 651 ret = "vs"; /* overflow */ 652 break; 653 case MIPScc_VC: 654 ret = "vc"; /* no overflow */ 655 break; 656 case MIPScc_HI: 657 ret = "hi"; /* >u (higher) */ 658 break; 659 case MIPScc_LS: 660 ret = "ls"; /* <=u (lower or same) */ 661 break; 662 case MIPScc_GE: 663 ret = "ge"; /* >=s (signed greater or equal) */ 664 break; 665 case MIPScc_LT: 666 ret = "lt"; /* <s (signed less than) */ 667 break; 668 case MIPScc_GT: 669 ret = "gt"; /* >s (signed greater) */ 670 break; 671 case MIPScc_LE: 672 ret = "le"; /* <=s (signed less or equal) */ 673 break; 674 case MIPScc_AL: 675 ret = "al"; /* always (unconditional) */ 676 break; 677 case MIPScc_NV: 678 ret = "nv"; /* never (unconditional): */ 679 break; 680 default: 681 vpanic("showMIPSCondCode"); 682 break; 683 } 684 return ret; 685} 686 687HChar *showMIPSFpOp(MIPSFpOp op) 688{ 689 HChar *ret; 690 switch (op) { 691 case Mfp_ADDD: 692 ret = "ADD.D"; 693 break; 694 case Mfp_SUBD: 695 ret = "SUB.D"; 696 break; 697 case Mfp_MULD: 698 ret = "MUL.D"; 699 break; 700 case Mfp_DIVD: 701 ret = "DIV.D"; 702 break; 703 case Mfp_MADDD: 704 ret = "MADD.D"; 705 break; 706 case Mfp_MSUBD: 707 ret = "MSUB.D"; 708 break; 709 case Mfp_MADDS: 710 ret = "MADD.S"; 711 break; 712 case Mfp_MSUBS: 713 ret = "MSUB.S"; 714 break; 715 case Mfp_ADDS: 716 ret = "ADD.S"; 717 break; 718 case Mfp_SUBS: 719 ret = "SUB.S"; 720 break; 721 case Mfp_MULS: 722 ret = "MUL.S"; 723 break; 724 case Mfp_DIVS: 725 ret = "DIV.S"; 726 break; 727 case Mfp_SQRTS: 728 ret = "SQRT.S"; 729 break; 730 case Mfp_SQRTD: 731 ret = "SQRT.D"; 732 break; 733 case Mfp_RSQRTS: 734 ret = "RSQRT.S"; 735 break; 736 case Mfp_RSQRTD: 737 ret = "RSQRT.D"; 738 break; 739 case Mfp_RECIPS: 740 ret = "RECIP.S"; 741 break; 742 case Mfp_RECIPD: 743 ret = "RECIP.D"; 744 break; 745 case Mfp_ABSS: 746 ret = "ABS.S"; 747 break; 748 case Mfp_ABSD: 749 ret = "ABS.D"; 750 break; 751 case Mfp_NEGS: 752 ret = "NEG.S"; 753 break; 754 case Mfp_NEGD: 755 ret = "NEG.D"; 756 break; 757 case Mfp_MOVS: 758 ret = "MOV.S"; 759 break; 760 case Mfp_MOVD: 761 ret = "MOV.D"; 762 break; 763 case Mfp_RES: 764 ret = "RES"; 765 break; 766 case Mfp_ROUNDWS: 767 ret = "ROUND.W.S"; 768 break; 769 case Mfp_ROUNDWD: 770 ret = "ROUND.W.D"; 771 break; 772 case Mfp_FLOORWS: 773 ret = "FLOOR.W.S"; 774 break; 775 case Mfp_FLOORWD: 776 ret = "FLOOR.W.D"; 777 break; 778 case Mfp_RSQRTE: 779 ret = "frsqrte"; 780 break; 781 case Mfp_CVTDW: 782 case Mfp_CVTD: 783 ret = "CVT.D"; 784 break; 785 case Mfp_CVTSD: 786 case Mfp_CVTSW: 787 ret = "CVT.S"; 788 break; 789 case Mfp_CVTWS: 790 case Mfp_CVTWD: 791 ret = "CVT.W"; 792 break; 793 case Mfp_TRUWD: 794 case Mfp_TRUWS: 795 ret = "TRUNC.W"; 796 break; 797 case Mfp_TRULD: 798 case Mfp_TRULS: 799 ret = "TRUNC.L"; 800 break; 801 case Mfp_CEILWS: 802 case Mfp_CEILWD: 803 ret = "CEIL.W"; 804 break; 805 case Mfp_CEILLS: 806 case Mfp_CEILLD: 807 ret = "CEIL.L"; 808 break; 809 case Mfp_CMP: 810 ret = "C.cond.d"; 811 break; 812 default: 813 vpanic("showMIPSFpOp"); 814 break; 815 } 816 return ret; 817} 818 819/* --------- MIPSAMode: memory address expressions. --------- */ 820 821MIPSAMode *MIPSAMode_IR(Int idx, HReg base) 822{ 823 MIPSAMode *am = LibVEX_Alloc(sizeof(MIPSAMode)); 824 am->tag = Mam_IR; 825 am->Mam.IR.base = base; 826 am->Mam.IR.index = idx; 827 828 return am; 829} 830 831MIPSAMode *MIPSAMode_RR(HReg idx, HReg base) 832{ 833 MIPSAMode *am = LibVEX_Alloc(sizeof(MIPSAMode)); 834 am->tag = Mam_RR; 835 am->Mam.RR.base = base; 836 am->Mam.RR.index = idx; 837 838 return am; 839} 840 841MIPSAMode *dopyMIPSAMode(MIPSAMode * am) 842{ 843 MIPSAMode* ret; 844 switch (am->tag) { 845 case Mam_IR: 846 ret = MIPSAMode_IR(am->Mam.IR.index, am->Mam.IR.base); 847 break; 848 case Mam_RR: 849 ret = MIPSAMode_RR(am->Mam.RR.index, am->Mam.RR.base); 850 break; 851 default: 852 vpanic("dopyMIPSAMode"); 853 break; 854 } 855 return ret; 856} 857 858MIPSAMode *nextMIPSAModeFloat(MIPSAMode * am) 859{ 860 MIPSAMode* ret; 861 switch (am->tag) { 862 case Mam_IR: 863 ret = MIPSAMode_IR(am->Mam.IR.index + 8, am->Mam.IR.base); 864 break; 865 case Mam_RR: 866 ret = MIPSAMode_RR(am->Mam.RR.index + 1, am->Mam.RR.base); 867 break; 868 default: 869 vpanic("dopyMIPSAMode"); 870 break; 871 } 872 return ret; 873} 874 875MIPSAMode *nextMIPSAModeInt(MIPSAMode * am) 876{ 877 MIPSAMode* ret; 878 switch (am->tag) { 879 case Mam_IR: 880 ret = MIPSAMode_IR(am->Mam.IR.index + 4, am->Mam.IR.base); 881 break; 882 case Mam_RR: 883 ret = MIPSAMode_RR(am->Mam.RR.index + 1, am->Mam.RR.base); 884 break; 885 default: 886 vpanic("dopyMIPSAMode"); 887 break; 888 } 889 return ret; 890} 891 892void ppMIPSAMode(MIPSAMode * am, Bool mode64) 893{ 894 switch (am->tag) { 895 case Mam_IR: 896 if (am->Mam.IR.index == 0) 897 vex_printf("0("); 898 else 899 vex_printf("%d(", (Int) am->Mam.IR.index); 900 ppHRegMIPS(am->Mam.IR.base, mode64); 901 vex_printf(")"); 902 return; 903 case Mam_RR: 904 ppHRegMIPS(am->Mam.RR.base, mode64); 905 vex_printf(", "); 906 ppHRegMIPS(am->Mam.RR.index, mode64); 907 return; 908 default: 909 vpanic("ppMIPSAMode"); 910 break; 911 } 912} 913 914static void addRegUsage_MIPSAMode(HRegUsage * u, MIPSAMode * am) 915{ 916 switch (am->tag) { 917 case Mam_IR: 918 addHRegUse(u, HRmRead, am->Mam.IR.base); 919 return; 920 case Mam_RR: 921 addHRegUse(u, HRmRead, am->Mam.RR.base); 922 addHRegUse(u, HRmRead, am->Mam.RR.index); 923 return; 924 default: 925 vpanic("addRegUsage_MIPSAMode"); 926 break; 927 } 928} 929 930static void mapRegs_MIPSAMode(HRegRemap * m, MIPSAMode * am) 931{ 932 switch (am->tag) { 933 case Mam_IR: 934 am->Mam.IR.base = lookupHRegRemap(m, am->Mam.IR.base); 935 return; 936 case Mam_RR: 937 am->Mam.RR.base = lookupHRegRemap(m, am->Mam.RR.base); 938 am->Mam.RR.index = lookupHRegRemap(m, am->Mam.RR.index); 939 return; 940 default: 941 vpanic("mapRegs_MIPSAMode"); 942 break; 943 } 944} 945 946/* --------- Operand, which can be a reg or a u16/s16. --------- */ 947 948MIPSRH *MIPSRH_Imm(Bool syned, UShort imm16) 949{ 950 MIPSRH *op = LibVEX_Alloc(sizeof(MIPSRH)); 951 op->tag = Mrh_Imm; 952 op->Mrh.Imm.syned = syned; 953 op->Mrh.Imm.imm16 = imm16; 954 /* If this is a signed value, ensure it's not -32768, so that we 955 are guaranteed always to be able to negate if needed. */ 956 if (syned) 957 vassert(imm16 != 0x8000); 958 vassert(syned == True || syned == False); 959 return op; 960} 961 962MIPSRH *MIPSRH_Reg(HReg reg) 963{ 964 MIPSRH *op = LibVEX_Alloc(sizeof(MIPSRH)); 965 op->tag = Mrh_Reg; 966 op->Mrh.Reg.reg = reg; 967 return op; 968} 969 970void ppMIPSRH(MIPSRH * op, Bool mode64) 971{ 972 MIPSRHTag tag = op->tag; 973 switch (tag) { 974 case Mrh_Imm: 975 if (op->Mrh.Imm.syned) 976 vex_printf("%d", (Int) (Short) op->Mrh.Imm.imm16); 977 else 978 vex_printf("%u", (UInt) (UShort) op->Mrh.Imm.imm16); 979 return; 980 case Mrh_Reg: 981 ppHRegMIPS(op->Mrh.Reg.reg, mode64); 982 return; 983 default: 984 vpanic("ppMIPSRH"); 985 break; 986 } 987} 988 989/* An MIPSRH can only be used in a "read" context (what would it mean 990 to write or modify a literal?) and so we enumerate its registers 991 accordingly. */ 992static void addRegUsage_MIPSRH(HRegUsage * u, MIPSRH * op) 993{ 994 switch (op->tag) { 995 case Mrh_Imm: 996 return; 997 case Mrh_Reg: 998 addHRegUse(u, HRmRead, op->Mrh.Reg.reg); 999 return; 1000 default: 1001 vpanic("addRegUsage_MIPSRH"); 1002 break; 1003 } 1004} 1005 1006static void mapRegs_MIPSRH(HRegRemap * m, MIPSRH * op) 1007{ 1008 switch (op->tag) { 1009 case Mrh_Imm: 1010 return; 1011 case Mrh_Reg: 1012 op->Mrh.Reg.reg = lookupHRegRemap(m, op->Mrh.Reg.reg); 1013 return; 1014 default: 1015 vpanic("mapRegs_MIPSRH"); 1016 break; 1017 } 1018} 1019 1020/* --------- Instructions. --------- */ 1021 1022HChar *showMIPSUnaryOp(MIPSUnaryOp op) 1023{ 1024 HChar* ret; 1025 switch (op) { 1026 case Mun_CLO: 1027 ret = "clo"; 1028 break; 1029 case Mun_CLZ: 1030 ret = "clz"; 1031 break; 1032 case Mun_NOP: 1033 ret = "nop"; 1034 break; 1035 default: 1036 vpanic("showMIPSUnaryOp"); 1037 break; 1038 } 1039 return ret; 1040} 1041 1042HChar *showMIPSAluOp(MIPSAluOp op, Bool immR) 1043{ 1044 HChar* ret; 1045 switch (op) { 1046 case Malu_ADD: 1047 ret = immR ? "addiu" : "addu"; 1048 break; 1049 case Malu_SUB: 1050 ret = "subu"; 1051 break; 1052 case Malu_AND: 1053 ret = immR ? "andi" : "and"; 1054 break; 1055 case Malu_OR: 1056 ret = immR ? "ori" : "or"; 1057 break; 1058 case Malu_NOR: 1059 vassert(immR == False); /*there's no nor with an immediate operand!? */ 1060 ret = "nor"; 1061 break; 1062 case Malu_XOR: 1063 ret = immR ? "xori" : "xor"; 1064 break; 1065 default: 1066 vpanic("showMIPSAluOp"); 1067 break; 1068 } 1069 return ret; 1070} 1071 1072HChar *showMIPSShftOp(MIPSShftOp op, Bool immR, Bool sz32) 1073{ 1074 HChar *ret; 1075 switch (op) { 1076 case Mshft_SRA: 1077 ret = immR ? (sz32 ? "sar" : "dsar") : (sz32 ? "sarv" : "dsrav"); 1078 break; 1079 case Mshft_SLL: 1080 ret = immR ? (sz32 ? "sll" : "dsll") : (sz32 ? "sllv" : "dsllv"); 1081 break; 1082 case Mshft_SRL: 1083 ret = immR ? (sz32 ? "srl" : "dsrl") : (sz32 ? "srlv" : "dsrlv"); 1084 break; 1085 default: 1086 vpanic("showMIPSShftOp"); 1087 break; 1088 } 1089 return ret; 1090} 1091 1092HChar *showMIPSMaccOp(MIPSMaccOp op, Bool variable) 1093{ 1094 HChar *ret; 1095 switch (op) { 1096 case Macc_ADD: 1097 ret = variable ? "madd" : "maddu"; 1098 break; 1099 case Macc_SUB: 1100 ret = variable ? "msub" : "msubu"; 1101 break; 1102 default: 1103 vpanic("showMIPSAccOp"); 1104 break; 1105 } 1106 return ret; 1107} 1108 1109MIPSInstr *MIPSInstr_LI(HReg dst, ULong imm) 1110{ 1111 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1112 i->tag = Min_LI; 1113 i->Min.LI.dst = dst; 1114 i->Min.LI.imm = imm; 1115 return i; 1116} 1117 1118MIPSInstr *MIPSInstr_Alu(MIPSAluOp op, HReg dst, HReg srcL, MIPSRH * srcR) 1119{ 1120 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1121 i->tag = Min_Alu; 1122 i->Min.Alu.op = op; 1123 i->Min.Alu.dst = dst; 1124 i->Min.Alu.srcL = srcL; 1125 i->Min.Alu.srcR = srcR; 1126 return i; 1127} 1128 1129MIPSInstr *MIPSInstr_Shft(MIPSShftOp op, Bool sz32, HReg dst, HReg srcL, 1130 MIPSRH * srcR) 1131{ 1132 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1133 i->tag = Min_Shft; 1134 i->Min.Shft.op = op; 1135 i->Min.Shft.sz32 = sz32; 1136 i->Min.Shft.dst = dst; 1137 i->Min.Shft.srcL = srcL; 1138 i->Min.Shft.srcR = srcR; 1139 return i; 1140} 1141 1142MIPSInstr *MIPSInstr_Unary(MIPSUnaryOp op, HReg dst, HReg src) 1143{ 1144 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1145 i->tag = Min_Unary; 1146 i->Min.Unary.op = op; 1147 i->Min.Unary.dst = dst; 1148 i->Min.Unary.src = src; 1149 return i; 1150} 1151 1152MIPSInstr *MIPSInstr_Cmp(Bool syned, Bool sz32, HReg dst, HReg srcL, HReg srcR, 1153 MIPSCondCode cond) 1154{ 1155 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1156 i->tag = Min_Cmp; 1157 i->Min.Cmp.syned = syned; 1158 i->Min.Cmp.sz32 = sz32; 1159 i->Min.Cmp.dst = dst; 1160 i->Min.Cmp.srcL = srcL; 1161 i->Min.Cmp.srcR = srcR; 1162 i->Min.Cmp.cond = cond; 1163 return i; 1164} 1165 1166/* multiply */ 1167MIPSInstr *MIPSInstr_Mul(Bool syned, Bool wid, Bool sz32, HReg dst, HReg srcL, 1168 HReg srcR) 1169{ 1170 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1171 i->tag = Min_Mul; 1172 i->Min.Mul.syned = syned; 1173 i->Min.Mul.widening = wid; /* widen=True else False */ 1174 i->Min.Mul.sz32 = sz32; /* True = 32 bits */ 1175 i->Min.Mul.dst = dst; 1176 i->Min.Mul.srcL = srcL; 1177 i->Min.Mul.srcR = srcR; 1178 return i; 1179} 1180 1181/* msub */ 1182MIPSInstr *MIPSInstr_Msub(Bool syned, HReg srcL, HReg srcR) 1183{ 1184 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1185 i->tag = Min_Macc; 1186 1187 i->Min.Macc.op = Macc_SUB; 1188 i->Min.Macc.syned = syned; 1189 i->Min.Macc.srcL = srcL; 1190 i->Min.Macc.srcR = srcR; 1191 return i; 1192} 1193 1194/* madd */ 1195MIPSInstr *MIPSInstr_Madd(Bool syned, HReg srcL, HReg srcR) 1196{ 1197 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1198 i->tag = Min_Macc; 1199 1200 i->Min.Macc.op = Macc_ADD; 1201 i->Min.Macc.syned = syned; 1202 i->Min.Macc.srcL = srcL; 1203 i->Min.Macc.srcR = srcR; 1204 return i; 1205} 1206 1207/* div */ 1208MIPSInstr *MIPSInstr_Div(Bool syned, Bool sz32, HReg srcL, HReg srcR) 1209{ 1210 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1211 i->tag = Min_Div; 1212 i->Min.Div.syned = syned; 1213 i->Min.Div.sz32 = sz32; /* True = 32 bits */ 1214 i->Min.Div.srcL = srcL; 1215 i->Min.Div.srcR = srcR; 1216 return i; 1217} 1218 1219MIPSInstr *MIPSInstr_Call(MIPSCondCode cond, Addr32 target, UInt argiregs, 1220 HReg src) 1221{ 1222 UInt mask; 1223 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1224 i->tag = Min_Call; 1225 i->Min.Call.cond = cond; 1226 i->Min.Call.target = target; 1227 i->Min.Call.argiregs = argiregs; 1228 i->Min.Call.src = src; 1229 /* Only r4 .. r7 inclusive may be used as arg regs. Hence: */ 1230 mask = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7); 1231 vassert(0 == (argiregs & ~mask)); 1232 return i; 1233} 1234 1235MIPSInstr *MIPSInstr_CallAlways(MIPSCondCode cond, Addr32 target, UInt argiregs) 1236{ 1237 UInt mask; 1238 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1239 i->tag = Min_Call; 1240 i->Min.Call.cond = cond; 1241 i->Min.Call.target = target; 1242 i->Min.Call.argiregs = argiregs; 1243 /* Only r4 .. r7 inclusive may be used as arg regs. Hence: */ 1244 mask = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7); 1245 vassert(0 == (argiregs & ~mask)); 1246 return i; 1247} 1248 1249MIPSInstr *MIPSInstr_XDirect ( Addr32 dstGA, MIPSAMode* amPC, 1250 MIPSCondCode cond, Bool toFastEP ) { 1251 MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr)); 1252 i->tag = Min_XDirect; 1253 i->Min.XDirect.dstGA = dstGA; 1254 i->Min.XDirect.amPC = amPC; 1255 i->Min.XDirect.cond = cond; 1256 i->Min.XDirect.toFastEP = toFastEP; 1257 return i; 1258} 1259 1260MIPSInstr *MIPSInstr_XIndir ( HReg dstGA, MIPSAMode* amPC, 1261 MIPSCondCode cond ) { 1262 MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr)); 1263 i->tag = Min_XIndir; 1264 i->Min.XIndir.dstGA = dstGA; 1265 i->Min.XIndir.amPC = amPC; 1266 i->Min.XIndir.cond = cond; 1267 return i; 1268} 1269 1270MIPSInstr *MIPSInstr_XAssisted ( HReg dstGA, MIPSAMode* amPC, 1271 MIPSCondCode cond, IRJumpKind jk ) { 1272 MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr)); 1273 i->tag = Min_XAssisted; 1274 i->Min.XAssisted.dstGA = dstGA; 1275 i->Min.XAssisted.amPC = amPC; 1276 i->Min.XAssisted.cond = cond; 1277 i->Min.XAssisted.jk = jk; 1278 return i; 1279} 1280 1281MIPSInstr *MIPSInstr_Load(UChar sz, HReg dst, MIPSAMode * src, Bool mode64) 1282{ 1283 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1284 i->tag = Min_Load; 1285 i->Min.Load.sz = sz; 1286 i->Min.Load.src = src; 1287 i->Min.Load.dst = dst; 1288 vassert(sz == 1 || sz == 2 || sz == 4 || sz == 8); 1289 1290 if (sz == 8) 1291 vassert(mode64); 1292 return i; 1293} 1294 1295MIPSInstr *MIPSInstr_Store(UChar sz, MIPSAMode * dst, HReg src, Bool mode64) 1296{ 1297 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1298 i->tag = Min_Store; 1299 i->Min.Store.sz = sz; 1300 i->Min.Store.src = src; 1301 i->Min.Store.dst = dst; 1302 vassert(sz == 1 || sz == 2 || sz == 4 || sz == 8); 1303 1304 if (sz == 8) 1305 vassert(mode64); 1306 return i; 1307} 1308 1309MIPSInstr *MIPSInstr_LoadL(UChar sz, HReg dst, MIPSAMode * src, Bool mode64) 1310{ 1311 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1312 i->tag = Min_LoadL; 1313 i->Min.LoadL.sz = sz; 1314 i->Min.LoadL.src = src; 1315 i->Min.LoadL.dst = dst; 1316 vassert(sz == 4 || sz == 8); 1317 1318 if (sz == 8) 1319 vassert(mode64); 1320 return i; 1321} 1322 1323MIPSInstr *MIPSInstr_StoreC(UChar sz, MIPSAMode * dst, HReg src, Bool mode64) 1324{ 1325 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1326 i->tag = Min_StoreC; 1327 i->Min.StoreC.sz = sz; 1328 i->Min.StoreC.src = src; 1329 i->Min.StoreC.dst = dst; 1330 vassert(sz == 4 || sz == 8); 1331 1332 if (sz == 8) 1333 vassert(mode64); 1334 return i; 1335} 1336 1337MIPSInstr *MIPSInstr_Mthi(HReg src) 1338{ 1339 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1340 i->tag = Min_Mthi; 1341 i->Min.MtHL.src = src; 1342 return i; 1343} 1344 1345MIPSInstr *MIPSInstr_Mtlo(HReg src) 1346{ 1347 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1348 i->tag = Min_Mtlo; 1349 i->Min.MtHL.src = src; 1350 return i; 1351} 1352 1353MIPSInstr *MIPSInstr_Mfhi(HReg dst) 1354{ 1355 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1356 i->tag = Min_Mfhi; 1357 i->Min.MfHL.dst = dst; 1358 return i; 1359} 1360 1361MIPSInstr *MIPSInstr_Mflo(HReg dst) 1362{ 1363 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1364 i->tag = Min_Mflo; 1365 i->Min.MfHL.dst = dst; 1366 return i; 1367} 1368 1369/* Read/Write Link Register */ 1370MIPSInstr *MIPSInstr_RdWrLR(Bool wrLR, HReg gpr) 1371{ 1372 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1373 i->tag = Min_RdWrLR; 1374 i->Min.RdWrLR.wrLR = wrLR; 1375 i->Min.RdWrLR.gpr = gpr; 1376 return i; 1377} 1378 1379MIPSInstr *MIPSInstr_FpLdSt(Bool isLoad, UChar sz, HReg reg, MIPSAMode * addr) 1380{ 1381 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1382 i->tag = Min_FpLdSt; 1383 i->Min.FpLdSt.isLoad = isLoad; 1384 i->Min.FpLdSt.sz = sz; 1385 i->Min.FpLdSt.reg = reg; 1386 i->Min.FpLdSt.addr = addr; 1387 vassert(sz == 4 || sz == 8); 1388 return i; 1389} 1390 1391MIPSInstr *MIPSInstr_FpUnary(MIPSFpOp op, HReg dst, HReg src) 1392{ 1393 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1394 i->tag = Min_FpUnary; 1395 i->Min.FpUnary.op = op; 1396 i->Min.FpUnary.dst = dst; 1397 i->Min.FpUnary.src = src; 1398 return i; 1399} 1400 1401MIPSInstr *MIPSInstr_FpBinary(MIPSFpOp op, HReg dst, HReg srcL, HReg srcR) 1402{ 1403 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1404 i->tag = Min_FpBinary; 1405 i->Min.FpBinary.op = op; 1406 i->Min.FpBinary.dst = dst; 1407 i->Min.FpBinary.srcL = srcL; 1408 i->Min.FpBinary.srcR = srcR; 1409 return i; 1410} 1411 1412MIPSInstr *MIPSInstr_FpConvert(MIPSFpOp op, HReg dst, HReg src) 1413{ 1414 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1415 i->tag = Min_FpConvert; 1416 i->Min.FpConvert.op = op; 1417 i->Min.FpConvert.dst = dst; 1418 i->Min.FpConvert.src = src; 1419 return i; 1420 1421} 1422 1423MIPSInstr *MIPSInstr_FpCompare(MIPSFpOp op, HReg dst, HReg srcL, HReg srcR, 1424 UChar cond1) 1425{ 1426 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1427 i->tag = Min_FpCompare; 1428 i->Min.FpCompare.op = op; 1429 i->Min.FpCompare.dst = dst; 1430 i->Min.FpCompare.srcL = srcL; 1431 i->Min.FpCompare.srcR = srcR; 1432 i->Min.FpCompare.cond1 = cond1; 1433 return i; 1434} 1435 1436MIPSInstr *MIPSInstr_MovCond(HReg dst, HReg argL, MIPSRH * argR, HReg condR, 1437 MIPSCondCode cond) 1438{ 1439 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1440 i->tag = Min_MovCond; 1441 i->Min.MovCond.dst = dst; 1442 i->Min.MovCond.srcL = argL; 1443 i->Min.MovCond.srcR = argR; 1444 i->Min.MovCond.condR = condR; 1445 i->Min.MovCond.cond = cond; 1446 return i; 1447} 1448 1449MIPSInstr *MIPSInstr_MtFCSR(HReg src) 1450{ 1451 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1452 i->tag = Min_MtFCSR; 1453 i->Min.MtFCSR.src = src; 1454 return i; 1455} 1456 1457MIPSInstr *MIPSInstr_MfFCSR(HReg dst) 1458{ 1459 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); 1460 i->tag = Min_MfFCSR; 1461 i->Min.MfFCSR.dst = dst; 1462 return i; 1463} 1464 1465MIPSInstr *MIPSInstr_EvCheck ( MIPSAMode* amCounter, 1466 MIPSAMode* amFailAddr ) { 1467 MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr)); 1468 i->tag = Min_EvCheck; 1469 i->Min.EvCheck.amCounter = amCounter; 1470 i->Min.EvCheck.amFailAddr = amFailAddr; 1471 return i; 1472} 1473 1474MIPSInstr* MIPSInstr_ProfInc ( void ) { 1475 MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr)); 1476 i->tag = Min_ProfInc; 1477 return i; 1478} 1479 1480/* -------- Pretty Print instructions ------------- */ 1481static void ppLoadImm(HReg dst, ULong imm, Bool mode64) 1482{ 1483 vex_printf("li "); 1484 ppHRegMIPS(dst, mode64); 1485 vex_printf(",0x%016llx", imm); 1486} 1487 1488void ppMIPSInstr(MIPSInstr * i, Bool mode64) 1489{ 1490 switch (i->tag) { 1491 case Min_LI: 1492 ppLoadImm(i->Min.LI.dst, i->Min.LI.imm, mode64); 1493 break; 1494 case Min_Alu: { 1495 HReg r_srcL = i->Min.Alu.srcL; 1496 MIPSRH *rh_srcR = i->Min.Alu.srcR; 1497 /* generic */ 1498 vex_printf("%s ", showMIPSAluOp(i->Min.Alu.op, 1499 toBool(rh_srcR->tag == Mrh_Imm))); 1500 ppHRegMIPS(i->Min.Alu.dst, mode64); 1501 vex_printf(","); 1502 ppHRegMIPS(r_srcL, mode64); 1503 vex_printf(","); 1504 ppMIPSRH(rh_srcR, mode64); 1505 return; 1506 } 1507 case Min_Shft: { 1508 HReg r_srcL = i->Min.Shft.srcL; 1509 MIPSRH *rh_srcR = i->Min.Shft.srcR; 1510 vex_printf("%s ", showMIPSShftOp(i->Min.Shft.op, 1511 toBool(rh_srcR->tag == Mrh_Imm), 1512 i->Min.Shft.sz32)); 1513 ppHRegMIPS(i->Min.Shft.dst, mode64); 1514 vex_printf(","); 1515 ppHRegMIPS(r_srcL, mode64); 1516 vex_printf(","); 1517 ppMIPSRH(rh_srcR, mode64); 1518 return; 1519 } 1520 case Min_Unary: { 1521 vex_printf("%s ", showMIPSUnaryOp(i->Min.Unary.op)); 1522 ppHRegMIPS(i->Min.Unary.dst, mode64); 1523 vex_printf(","); 1524 ppHRegMIPS(i->Min.Unary.src, mode64); 1525 return; 1526 } 1527 case Min_Cmp: { 1528 vex_printf("word_compare "); 1529 ppHRegMIPS(i->Min.Cmp.dst, mode64); 1530 vex_printf(" = %s ( ", showMIPSCondCode(i->Min.Cmp.cond)); 1531 ppHRegMIPS(i->Min.Cmp.srcL, mode64); 1532 vex_printf(", "); 1533 ppHRegMIPS(i->Min.Cmp.srcR, mode64); 1534 vex_printf(" )"); 1535 1536 return; 1537 } 1538 case Min_Mul: { 1539 switch (i->Min.Mul.widening) { 1540 case False: 1541 vex_printf("mul "); 1542 ppHRegMIPS(i->Min.Mul.dst, mode64); 1543 vex_printf(", "); 1544 ppHRegMIPS(i->Min.Mul.srcL, mode64); 1545 vex_printf(", "); 1546 ppHRegMIPS(i->Min.Mul.srcR, mode64); 1547 return; 1548 case True: 1549 vex_printf("%s%s ", i->Min.Mul.sz32 ? "mult" : "dmult", 1550 i->Min.Mul.syned ? "" : "u"); 1551 ppHRegMIPS(i->Min.Mul.dst, mode64); 1552 vex_printf(", "); 1553 ppHRegMIPS(i->Min.Mul.srcL, mode64); 1554 vex_printf(", "); 1555 ppHRegMIPS(i->Min.Mul.srcR, mode64); 1556 return; 1557 } 1558 break; 1559 } 1560 case Min_Mthi: { 1561 vex_printf("mthi "); 1562 ppHRegMIPS(i->Min.MtHL.src, mode64); 1563 return; 1564 } 1565 case Min_Mtlo: { 1566 vex_printf("mtlo "); 1567 ppHRegMIPS(i->Min.MtHL.src, mode64); 1568 return; 1569 } 1570 case Min_Mfhi: { 1571 vex_printf("mfhi "); 1572 ppHRegMIPS(i->Min.MfHL.dst, mode64); 1573 return; 1574 } 1575 case Min_Mflo: { 1576 vex_printf("mflo "); 1577 ppHRegMIPS(i->Min.MfHL.dst, mode64); 1578 return; 1579 } 1580 case Min_Macc: { 1581 vex_printf("%s ", showMIPSMaccOp(i->Min.Macc.op, i->Min.Macc.syned)); 1582 ppHRegMIPS(i->Min.Macc.srcL, mode64); 1583 vex_printf(", "); 1584 ppHRegMIPS(i->Min.Macc.srcR, mode64); 1585 return; 1586 } 1587 case Min_Div: { 1588 if (!i->Min.Div.sz32) 1589 vex_printf("d"); 1590 vex_printf("div"); 1591 vex_printf("%s ", i->Min.Div.syned ? "s" : "u"); 1592 ppHRegMIPS(i->Min.Div.srcL, mode64); 1593 vex_printf(", "); 1594 ppHRegMIPS(i->Min.Div.srcR, mode64); 1595 return; 1596 } 1597 case Min_Call: { 1598 Int n; 1599 vex_printf("call: "); 1600 if (i->Min.Call.cond != MIPScc_AL) { 1601 vex_printf("if (%s) ", showMIPSCondCode(i->Min.Call.cond)); 1602 } 1603 vex_printf("{ "); 1604 ppLoadImm(hregMIPS_GPR11(mode64), i->Min.Call.target, mode64); 1605 1606 vex_printf(" ; mtctr r10 ; bctrl ["); 1607 for (n = 0; n < 32; n++) { 1608 if (i->Min.Call.argiregs & (1 << n)) { 1609 vex_printf("r%d", n); 1610 if ((i->Min.Call.argiregs >> n) > 1) 1611 vex_printf(","); 1612 } 1613 } 1614 vex_printf("] }"); 1615 break; 1616 } 1617 case Min_XDirect: 1618 vex_printf("(xDirect) "); 1619 vex_printf("if (guest_COND.%s) { ", 1620 showMIPSCondCode(i->Min.XDirect.cond)); 1621 vex_printf("move $9, 0x%x,", i->Min.XDirect.dstGA); 1622 vex_printf("; sw $9, "); 1623 ppMIPSAMode(i->Min.XDirect.amPC, mode64); 1624 vex_printf("; move $9, $disp_cp_chain_me_to_%sEP; jalr $9; nop}", 1625 i->Min.XDirect.toFastEP ? "fast" : "slow"); 1626 return; 1627 case Min_XIndir: 1628 vex_printf("(xIndir) "); 1629 vex_printf("if (guest_COND.%s) { sw ", 1630 showMIPSCondCode(i->Min.XIndir.cond)); 1631 ppHRegMIPS(i->Min.XIndir.dstGA, mode64); 1632 vex_printf(", "); 1633 ppMIPSAMode(i->Min.XIndir.amPC, mode64); 1634 vex_printf("; move $9, $disp_indir; jalr $9; nop}"); 1635 return; 1636 case Min_XAssisted: 1637 vex_printf("(xAssisted) "); 1638 vex_printf("if (guest_COND.%s) { ", 1639 showMIPSCondCode(i->Min.XAssisted.cond)); 1640 vex_printf("sw "); 1641 ppHRegMIPS(i->Min.XAssisted.dstGA, mode64); 1642 vex_printf(", "); 1643 ppMIPSAMode(i->Min.XAssisted.amPC, mode64); 1644 vex_printf("; move $9, $IRJumpKind_to_TRCVAL(%d)", 1645 (Int)i->Min.XAssisted.jk); 1646 vex_printf("; move $9, $disp_assisted; jalr $9; nop; }"); 1647 return; 1648 case Min_Load: { 1649 Bool idxd = toBool(i->Min.Load.src->tag == Mam_RR); 1650 UChar sz = i->Min.Load.sz; 1651 UChar c_sz = sz == 1 ? 'b' : sz == 2 ? 'h' : sz == 4 ? 'w' : 'd'; 1652 vex_printf("l%c%s ", c_sz, idxd ? "x" : ""); 1653 ppHRegMIPS(i->Min.Load.dst, mode64); 1654 vex_printf(","); 1655 ppMIPSAMode(i->Min.Load.src, mode64); 1656 return; 1657 } 1658 case Min_Store: { 1659 UChar sz = i->Min.Store.sz; 1660 Bool idxd = toBool(i->Min.Store.dst->tag == Mam_RR); 1661 UChar c_sz = sz == 1 ? 'b' : sz == 2 ? 'h' : sz == 4 ? 'w' : 'd'; 1662 vex_printf("s%c%s ", c_sz, idxd ? "x" : ""); 1663 ppHRegMIPS(i->Min.Store.src, mode64); 1664 vex_printf(","); 1665 ppMIPSAMode(i->Min.Store.dst, mode64); 1666 return; 1667 } 1668 case Min_LoadL: { 1669 vex_printf("ll "); 1670 ppHRegMIPS(i->Min.LoadL.dst, mode64); 1671 vex_printf(","); 1672 ppMIPSAMode(i->Min.LoadL.src, mode64); 1673 return; 1674 } 1675 case Min_StoreC: { 1676 vex_printf("sc "); 1677 ppHRegMIPS(i->Min.StoreC.src, mode64); 1678 vex_printf(","); 1679 ppMIPSAMode(i->Min.StoreC.dst, mode64); 1680 return; 1681 } 1682 case Min_RdWrLR: { 1683 vex_printf("%s ", i->Min.RdWrLR.wrLR ? "mtlr" : "mflr"); 1684 ppHRegMIPS(i->Min.RdWrLR.gpr, mode64); 1685 return; 1686 } 1687 case Min_FpUnary: 1688 vex_printf("%s ", showMIPSFpOp(i->Min.FpUnary.op)); 1689 ppHRegMIPS(i->Min.FpUnary.dst, mode64); 1690 vex_printf(","); 1691 ppHRegMIPS(i->Min.FpUnary.src, mode64); 1692 return; 1693 case Min_FpBinary: 1694 vex_printf("%s", showMIPSFpOp(i->Min.FpBinary.op)); 1695 ppHRegMIPS(i->Min.FpBinary.dst, mode64); 1696 vex_printf(","); 1697 ppHRegMIPS(i->Min.FpBinary.srcL, mode64); 1698 vex_printf(","); 1699 ppHRegMIPS(i->Min.FpBinary.srcR, mode64); 1700 return; 1701 case Min_FpConvert: 1702 vex_printf("%s", showMIPSFpOp(i->Min.FpConvert.op)); 1703 ppHRegMIPS(i->Min.FpConvert.dst, mode64); 1704 vex_printf(","); 1705 ppHRegMIPS(i->Min.FpConvert.src, mode64); 1706 return; 1707 case Min_FpCompare: 1708 vex_printf("%s ", showMIPSFpOp(i->Min.FpCompare.op)); 1709 ppHRegMIPS(i->Min.FpCompare.srcL, mode64); 1710 vex_printf(","); 1711 ppHRegMIPS(i->Min.FpCompare.srcR, mode64); 1712 vex_printf(" cond: %c", i->Min.FpCompare.cond1); 1713 return; 1714 case Min_FpMulAcc: 1715 vex_printf("%s ", showMIPSFpOp(i->Min.FpMulAcc.op)); 1716 ppHRegMIPS(i->Min.FpMulAcc.dst, mode64); 1717 vex_printf(","); 1718 ppHRegMIPS(i->Min.FpMulAcc.srcML, mode64); 1719 vex_printf(","); 1720 ppHRegMIPS(i->Min.FpMulAcc.srcMR, mode64); 1721 vex_printf(","); 1722 ppHRegMIPS(i->Min.FpMulAcc.srcAcc, mode64); 1723 return; 1724 case Min_FpLdSt: { 1725 if (i->Min.FpLdSt.sz == 4) { 1726 if (i->Min.FpLdSt.isLoad) { 1727 vex_printf("lwc1 "); 1728 ppHRegMIPS(i->Min.FpLdSt.reg, mode64); 1729 vex_printf(","); 1730 ppMIPSAMode(i->Min.FpLdSt.addr, mode64); 1731 } else { 1732 vex_printf("swc1 "); 1733 ppHRegMIPS(i->Min.FpLdSt.reg, mode64); 1734 vex_printf(","); 1735 ppMIPSAMode(i->Min.FpLdSt.addr, mode64); 1736 } 1737 } else if (i->Min.FpLdSt.sz == 8) { 1738 if (i->Min.FpLdSt.isLoad) { 1739 if (mode64) 1740 vex_printf("ldc1 "); 1741 else 1742 vex_printf("lwc1 "); 1743 ppHRegMIPS(i->Min.FpLdSt.reg, mode64); 1744 vex_printf(","); 1745 ppMIPSAMode(i->Min.FpLdSt.addr, mode64); 1746 } else { 1747 if (mode64) 1748 vex_printf("sdc1 "); 1749 else 1750 vex_printf("swc1 "); 1751 ppHRegMIPS(i->Min.FpLdSt.reg, mode64); 1752 vex_printf(","); 1753 ppMIPSAMode(i->Min.FpLdSt.addr, mode64); 1754 } 1755 } 1756 return; 1757 } 1758 case Min_MovCond: { 1759 if (i->Min.MovCond.cond == MIPScc_MI) { 1760 vex_printf("\ncond move\n"); 1761 return; 1762 1763 } 1764 break; 1765 } 1766 case Min_MtFCSR: { 1767 vex_printf("ctc1 "); 1768 ppHRegMIPS(i->Min.MtFCSR.src, mode64); 1769 vex_printf(", $31"); 1770 return; 1771 } 1772 1773 case Min_MfFCSR: { 1774 vex_printf("ctc1 "); 1775 ppHRegMIPS(i->Min.MfFCSR.dst, mode64); 1776 vex_printf(", $31"); 1777 return; 1778 } 1779 case Min_EvCheck: 1780 vex_printf("(evCheck) lw $9, "); 1781 ppMIPSAMode(i->Min.EvCheck.amCounter, mode64); 1782 vex_printf("; addiu $9, $9, -1"); 1783 vex_printf("; sw $9, "); 1784 ppMIPSAMode(i->Min.EvCheck.amCounter, mode64); 1785 vex_printf("; bgez $t9, nofail; jalr *"); 1786 ppMIPSAMode(i->Min.EvCheck.amFailAddr, mode64); 1787 vex_printf("; nofail:"); 1788 return; 1789 case Min_ProfInc: 1790 vex_printf("(profInc) move $9, ($NotKnownYet); " 1791 "lw $8, 0($9); " 1792 "addiu $8, $8, 1; " 1793 "sw $8, 0($9); " 1794 "sltiu $1, $8, 1; " 1795 "lw $8, 4($9); " 1796 "addu $8, $8, $1; " 1797 "sw $8, 4($9); " ); 1798 return; 1799 default: 1800 vpanic("ppMIPSInstr"); 1801 break; 1802 } 1803} 1804 1805/* --------- Helpers for register allocation. --------- */ 1806 1807void getRegUsage_MIPSInstr(HRegUsage * u, MIPSInstr * i, Bool mode64) 1808{ 1809 initHRegUsage(u); 1810 switch (i->tag) { 1811 case Min_LI: 1812 addHRegUse(u, HRmWrite, i->Min.LI.dst); 1813 break; 1814 case Min_Alu: 1815 addHRegUse(u, HRmRead, i->Min.Alu.srcL); 1816 addRegUsage_MIPSRH(u, i->Min.Alu.srcR); 1817 addHRegUse(u, HRmWrite, i->Min.Alu.dst); 1818 return; 1819 case Min_Shft: 1820 addHRegUse(u, HRmRead, i->Min.Shft.srcL); 1821 addRegUsage_MIPSRH(u, i->Min.Shft.srcR); 1822 addHRegUse(u, HRmWrite, i->Min.Shft.dst); 1823 return; 1824 case Min_Cmp: 1825 addHRegUse(u, HRmRead, i->Min.Cmp.srcL); 1826 addHRegUse(u, HRmRead, i->Min.Cmp.srcR); 1827 addHRegUse(u, HRmWrite, i->Min.Cmp.dst); 1828 return; 1829 case Min_Unary: 1830 addHRegUse(u, HRmRead, i->Min.Unary.src); 1831 addHRegUse(u, HRmWrite, i->Min.Unary.dst); 1832 return; 1833 case Min_Mul: 1834 addHRegUse(u, HRmWrite, i->Min.Mul.dst); 1835 addHRegUse(u, HRmRead, i->Min.Mul.srcL); 1836 addHRegUse(u, HRmRead, i->Min.Mul.srcR); 1837 return; 1838 case Min_Mthi: 1839 case Min_Mtlo: 1840 addHRegUse(u, HRmWrite, hregMIPS_HI(mode64)); 1841 addHRegUse(u, HRmWrite, hregMIPS_LO(mode64)); 1842 addHRegUse(u, HRmRead, i->Min.MtHL.src); 1843 return; 1844 case Min_Mfhi: 1845 case Min_Mflo: 1846 addHRegUse(u, HRmRead, hregMIPS_HI(mode64)); 1847 addHRegUse(u, HRmRead, hregMIPS_LO(mode64)); 1848 addHRegUse(u, HRmWrite, i->Min.MfHL.dst); 1849 return; 1850 case Min_MtFCSR: 1851 addHRegUse(u, HRmRead, i->Min.MtFCSR.src); 1852 return; 1853 case Min_MfFCSR: 1854 addHRegUse(u, HRmWrite, i->Min.MfFCSR.dst); 1855 return; 1856 case Min_Macc: 1857 addHRegUse(u, HRmModify, hregMIPS_HI(mode64)); 1858 addHRegUse(u, HRmModify, hregMIPS_LO(mode64)); 1859 addHRegUse(u, HRmRead, i->Min.Macc.srcL); 1860 addHRegUse(u, HRmRead, i->Min.Macc.srcR); 1861 return; 1862 case Min_Div: 1863 addHRegUse(u, HRmWrite, hregMIPS_HI(mode64)); 1864 addHRegUse(u, HRmWrite, hregMIPS_LO(mode64)); 1865 addHRegUse(u, HRmRead, i->Min.Div.srcL); 1866 addHRegUse(u, HRmRead, i->Min.Div.srcR); 1867 return; 1868 case Min_Call: { 1869 if (i->Min.Call.cond != MIPScc_AL) 1870 addHRegUse(u, HRmRead, i->Min.Call.src); 1871 UInt argir; 1872 addHRegUse(u, HRmWrite, hregMIPS_GPR1(mode64)); 1873 1874 addHRegUse(u, HRmWrite, hregMIPS_GPR2(mode64)); 1875 addHRegUse(u, HRmWrite, hregMIPS_GPR3(mode64)); 1876 1877 addHRegUse(u, HRmWrite, hregMIPS_GPR4(mode64)); 1878 addHRegUse(u, HRmWrite, hregMIPS_GPR5(mode64)); 1879 addHRegUse(u, HRmWrite, hregMIPS_GPR6(mode64)); 1880 addHRegUse(u, HRmWrite, hregMIPS_GPR7(mode64)); 1881 1882 addHRegUse(u, HRmWrite, hregMIPS_GPR8(mode64)); 1883 addHRegUse(u, HRmWrite, hregMIPS_GPR9(mode64)); 1884 addHRegUse(u, HRmWrite, hregMIPS_GPR10(mode64)); 1885 addHRegUse(u, HRmWrite, hregMIPS_GPR11(mode64)); 1886 addHRegUse(u, HRmWrite, hregMIPS_GPR12(mode64)); 1887 addHRegUse(u, HRmWrite, hregMIPS_GPR13(mode64)); 1888 addHRegUse(u, HRmWrite, hregMIPS_GPR14(mode64)); 1889 addHRegUse(u, HRmWrite, hregMIPS_GPR15(mode64)); 1890 1891 addHRegUse(u, HRmWrite, hregMIPS_GPR24(mode64)); 1892 addHRegUse(u, HRmWrite, hregMIPS_GPR25(mode64)); 1893 addHRegUse(u, HRmWrite, hregMIPS_GPR26(mode64)); 1894 addHRegUse(u, HRmWrite, hregMIPS_GPR27(mode64)); 1895 1896 /* Now we have to state any parameter-carrying registers 1897 which might be read. This depends on the argiregs field. */ 1898 argir = i->Min.Call.argiregs; 1899 if (argir & (1 << 7)) 1900 addHRegUse(u, HRmRead, hregMIPS_GPR7(mode64)); 1901 if (argir & (1 << 6)) 1902 addHRegUse(u, HRmRead, hregMIPS_GPR6(mode64)); 1903 if (argir & (1 << 5)) 1904 addHRegUse(u, HRmRead, hregMIPS_GPR5(mode64)); 1905 if (argir & (1 << 4)) 1906 addHRegUse(u, HRmRead, hregMIPS_GPR4(mode64)); 1907 1908 vassert(0 == (argir & ~((1 << 4) | (1 << 5) | (1 << 6) | (1 << 7)))); 1909 return; 1910 } 1911 /* XDirect/XIndir/XAssisted are also a bit subtle. They 1912 conditionally exit the block. Hence we only need to list (1) 1913 the registers that they read, and (2) the registers that they 1914 write in the case where the block is not exited. (2) is 1915 empty, hence only (1) is relevant here. */ 1916 case Min_XDirect: 1917 addRegUsage_MIPSAMode(u, i->Min.XDirect.amPC); 1918 return; 1919 case Min_XIndir: 1920 addHRegUse(u, HRmRead, i->Min.XIndir.dstGA); 1921 addRegUsage_MIPSAMode(u, i->Min.XIndir.amPC); 1922 return; 1923 case Min_XAssisted: 1924 addHRegUse(u, HRmRead, i->Min.XAssisted.dstGA); 1925 addRegUsage_MIPSAMode(u, i->Min.XAssisted.amPC); 1926 return; 1927 case Min_Load: 1928 addRegUsage_MIPSAMode(u, i->Min.Load.src); 1929 addHRegUse(u, HRmWrite, i->Min.Load.dst); 1930 return; 1931 case Min_Store: 1932 addHRegUse(u, HRmRead, i->Min.Store.src); 1933 addRegUsage_MIPSAMode(u, i->Min.Store.dst); 1934 return; 1935 case Min_LoadL: 1936 addRegUsage_MIPSAMode(u, i->Min.LoadL.src); 1937 addHRegUse(u, HRmWrite, i->Min.LoadL.dst); 1938 return; 1939 case Min_StoreC: 1940 addHRegUse(u, HRmWrite, i->Min.StoreC.src); 1941 addHRegUse(u, HRmRead, i->Min.StoreC.src); 1942 addRegUsage_MIPSAMode(u, i->Min.StoreC.dst); 1943 return; 1944 case Min_RdWrLR: 1945 addHRegUse(u, (i->Min.RdWrLR.wrLR ? HRmRead : HRmWrite), 1946 i->Min.RdWrLR.gpr); 1947 return; 1948 case Min_FpLdSt: 1949 if (i->Min.FpLdSt.sz == 4) { 1950 addHRegUse(u, (i->Min.FpLdSt.isLoad ? HRmWrite : HRmRead), 1951 i->Min.FpLdSt.reg); 1952 addRegUsage_MIPSAMode(u, i->Min.FpLdSt.addr); 1953 return; 1954 } else if (i->Min.FpLdSt.sz == 8) { 1955 if (mode64) { 1956 addHRegUse(u, (i->Min.FpLdSt.isLoad ? HRmWrite : HRmRead), 1957 i->Min.FpLdSt.reg); 1958 addRegUsage_MIPSAMode(u, i->Min.FpLdSt.addr); 1959 } else { 1960 addHRegUse(u, (i->Min.FpLdSt.isLoad ? HRmWrite : HRmRead), 1961 i->Min.FpLdSt.reg); 1962 addRegUsage_MIPSAMode(u, i->Min.FpLdSt.addr); 1963 addRegUsage_MIPSAMode(u, nextMIPSAModeFloat(i->Min.FpLdSt.addr)); 1964 } 1965 return; 1966 } 1967 break; 1968 case Min_FpUnary: 1969 if (i->Min.FpUnary.op == Mfp_CVTD) { 1970 addHRegUse(u, HRmWrite, i->Min.FpUnary.dst); 1971 addHRegUse(u, HRmRead, i->Min.FpUnary.src); 1972 return; 1973 } else { 1974 addHRegUse(u, HRmWrite, i->Min.FpUnary.dst); 1975 addHRegUse(u, HRmRead, i->Min.FpUnary.src); 1976 return; 1977 } 1978 case Min_FpBinary: 1979 addHRegUse(u, HRmWrite, i->Min.FpBinary.dst); 1980 addHRegUse(u, HRmRead, i->Min.FpBinary.srcL); 1981 addHRegUse(u, HRmRead, i->Min.FpBinary.srcR); 1982 return; 1983 case Min_FpConvert: 1984 addHRegUse(u, HRmWrite, i->Min.FpConvert.dst); 1985 addHRegUse(u, HRmRead, i->Min.FpConvert.src); 1986 return; 1987 case Min_FpCompare: 1988 addHRegUse(u, HRmWrite, i->Min.FpCompare.dst); 1989 addHRegUse(u, HRmRead, i->Min.FpCompare.srcL); 1990 addHRegUse(u, HRmRead, i->Min.FpCompare.srcR); 1991 return; 1992 case Min_MovCond: 1993 if (i->Min.MovCond.srcR->tag == Mrh_Reg) { 1994 addHRegUse(u, HRmRead, i->Min.MovCond.srcR->Mrh.Reg.reg); 1995 } 1996 addHRegUse(u, HRmRead, i->Min.MovCond.srcL); 1997 addHRegUse(u, HRmRead, i->Min.MovCond.condR); 1998 addHRegUse(u, HRmWrite, i->Min.MovCond.dst); 1999 return; 2000 case Min_EvCheck: 2001 /* We expect both amodes only to mention %ebp, so this is in 2002 fact pointless, since %ebp isn't allocatable, but anyway.. */ 2003 addRegUsage_MIPSAMode(u, i->Min.EvCheck.amCounter); 2004 addRegUsage_MIPSAMode(u, i->Min.EvCheck.amFailAddr); 2005 return; 2006 case Min_ProfInc: 2007 /* does not use any registers. */ 2008 return; 2009 default: 2010 ppMIPSInstr(i, mode64); 2011 vpanic("getRegUsage_MIPSInstr"); 2012 break; 2013 } 2014} 2015 2016/* local helper */ 2017static void mapReg(HRegRemap * m, HReg * r) 2018{ 2019 *r = lookupHRegRemap(m, *r); 2020} 2021 2022void mapRegs_MIPSInstr(HRegRemap * m, MIPSInstr * i, Bool mode64) 2023{ 2024 switch (i->tag) { 2025 case Min_LI: 2026 mapReg(m, &i->Min.LI.dst); 2027 break; 2028 case Min_Alu: 2029 mapReg(m, &i->Min.Alu.srcL); 2030 mapRegs_MIPSRH(m, i->Min.Alu.srcR); 2031 mapReg(m, &i->Min.Alu.dst); 2032 return; 2033 case Min_Shft: 2034 mapReg(m, &i->Min.Shft.srcL); 2035 mapRegs_MIPSRH(m, i->Min.Shft.srcR); 2036 mapReg(m, &i->Min.Shft.dst); 2037 return; 2038 case Min_Cmp: 2039 mapReg(m, &i->Min.Cmp.srcL); 2040 mapReg(m, &i->Min.Cmp.srcR); 2041 mapReg(m, &i->Min.Cmp.dst); 2042 return; 2043 case Min_Unary: 2044 mapReg(m, &i->Min.Unary.src); 2045 mapReg(m, &i->Min.Unary.dst); 2046 return; 2047 case Min_Mul: 2048 mapReg(m, &i->Min.Mul.dst); 2049 mapReg(m, &i->Min.Mul.srcL); 2050 mapReg(m, &i->Min.Mul.srcR); 2051 return; 2052 case Min_Mthi: 2053 case Min_Mtlo: 2054 mapReg(m, &i->Min.MtHL.src); 2055 return; 2056 case Min_Mfhi: 2057 case Min_Mflo: 2058 mapReg(m, &i->Min.MfHL.dst); 2059 return; 2060 case Min_Macc: 2061 mapReg(m, &i->Min.Macc.srcL); 2062 mapReg(m, &i->Min.Macc.srcR); 2063 return; 2064 case Min_Div: 2065 mapReg(m, &i->Min.Div.srcL); 2066 mapReg(m, &i->Min.Div.srcR); 2067 return; 2068 case Min_Call: 2069 { 2070 if (i->Min.Call.cond != MIPScc_AL) 2071 mapReg(m, &i->Min.Call.src); 2072 return; 2073 } 2074 case Min_XDirect: 2075 mapRegs_MIPSAMode(m, i->Min.XDirect.amPC); 2076 return; 2077 case Min_XIndir: 2078 mapReg(m, &i->Min.XIndir.dstGA); 2079 mapRegs_MIPSAMode(m, i->Min.XIndir.amPC); 2080 return; 2081 case Min_XAssisted: 2082 mapReg(m, &i->Min.XAssisted.dstGA); 2083 mapRegs_MIPSAMode(m, i->Min.XAssisted.amPC); 2084 return; 2085 case Min_Load: 2086 mapRegs_MIPSAMode(m, i->Min.Load.src); 2087 mapReg(m, &i->Min.Load.dst); 2088 return; 2089 case Min_Store: 2090 mapReg(m, &i->Min.Store.src); 2091 mapRegs_MIPSAMode(m, i->Min.Store.dst); 2092 return; 2093 case Min_LoadL: 2094 mapRegs_MIPSAMode(m, i->Min.LoadL.src); 2095 mapReg(m, &i->Min.LoadL.dst); 2096 return; 2097 case Min_StoreC: 2098 mapReg(m, &i->Min.StoreC.src); 2099 mapRegs_MIPSAMode(m, i->Min.StoreC.dst); 2100 return; 2101 case Min_RdWrLR: 2102 mapReg(m, &i->Min.RdWrLR.gpr); 2103 return; 2104 case Min_FpLdSt: 2105 if (i->Min.FpLdSt.sz == 4) { 2106 mapReg(m, &i->Min.FpLdSt.reg); 2107 mapRegs_MIPSAMode(m, i->Min.FpLdSt.addr); 2108 return; 2109 } else if (i->Min.FpLdSt.sz == 8) { 2110 if (mode64) { 2111 mapReg(m, &i->Min.FpLdSt.reg); 2112 mapRegs_MIPSAMode(m, i->Min.FpLdSt.addr); 2113 } else { 2114 mapReg(m, &i->Min.FpLdSt.reg); 2115 mapRegs_MIPSAMode(m, i->Min.FpLdSt.addr); 2116 mapRegs_MIPSAMode(m, nextMIPSAModeFloat(i->Min.FpLdSt.addr)); 2117 } 2118 return; 2119 } 2120 break; 2121 case Min_FpUnary: 2122 if (i->Min.FpUnary.op == Mfp_CVTD) { 2123 mapReg(m, &i->Min.FpUnary.dst); 2124 mapReg(m, &i->Min.FpUnary.src); 2125 return; 2126 } else { 2127 mapReg(m, &i->Min.FpUnary.dst); 2128 mapReg(m, &i->Min.FpUnary.src); 2129 return; 2130 } 2131 case Min_FpBinary: 2132 mapReg(m, &i->Min.FpBinary.dst); 2133 mapReg(m, &i->Min.FpBinary.srcL); 2134 mapReg(m, &i->Min.FpBinary.srcR); 2135 return; 2136 case Min_FpConvert: 2137 mapReg(m, &i->Min.FpConvert.dst); 2138 mapReg(m, &i->Min.FpConvert.src); 2139 return; 2140 case Min_FpCompare: 2141 mapReg(m, &i->Min.FpCompare.dst); 2142 mapReg(m, &i->Min.FpCompare.srcL); 2143 mapReg(m, &i->Min.FpCompare.srcR); 2144 return; 2145 case Min_MtFCSR: 2146 mapReg(m, &i->Min.MtFCSR.src); 2147 return; 2148 case Min_MfFCSR: 2149 mapReg(m, &i->Min.MfFCSR.dst); 2150 return; 2151 case Min_MovCond: 2152 if (i->Min.MovCond.srcR->tag == Mrh_Reg) { 2153 mapReg(m, &(i->Min.MovCond.srcR->Mrh.Reg.reg)); 2154 } 2155 mapReg(m, &i->Min.MovCond.srcL); 2156 mapReg(m, &i->Min.MovCond.condR); 2157 mapReg(m, &i->Min.MovCond.dst); 2158 2159 return; 2160 case Min_EvCheck: 2161 /* We expect both amodes only to mention %ebp, so this is in 2162 fact pointless, since %ebp isn't allocatable, but anyway.. */ 2163 mapRegs_MIPSAMode(m, i->Min.EvCheck.amCounter); 2164 mapRegs_MIPSAMode(m, i->Min.EvCheck.amFailAddr); 2165 return; 2166 case Min_ProfInc: 2167 /* does not use any registers. */ 2168 return; 2169 default: 2170 ppMIPSInstr(i, mode64); 2171 vpanic("mapRegs_MIPSInstr"); 2172 break; 2173 } 2174 2175} 2176 2177/* Figure out if i represents a reg-reg move, and if so assign the 2178 source and destination to *src and *dst. If in doubt say No. Used 2179 by the register allocator to do move coalescing. 2180*/ 2181Bool isMove_MIPSInstr(MIPSInstr * i, HReg * src, HReg * dst) 2182{ 2183 /* Moves between integer regs */ 2184 if (i->tag == Min_Alu) { 2185 // or Rd,Rs,Rs == mr Rd,Rs 2186 if (i->Min.Alu.op != Malu_OR) 2187 return False; 2188 if (i->Min.Alu.srcR->tag != Mrh_Reg) 2189 return False; 2190 if (i->Min.Alu.srcR->Mrh.Reg.reg != i->Min.Alu.srcL) 2191 return False; 2192 *src = i->Min.Alu.srcL; 2193 *dst = i->Min.Alu.dst; 2194 return True; 2195 } 2196 return False; 2197} 2198 2199/* Generate mips spill/reload instructions under the direction of the 2200 register allocator. 2201*/ 2202void genSpill_MIPS( /*OUT*/ HInstr ** i1, /*OUT*/ HInstr ** i2, HReg rreg, 2203 Int offsetB, Bool mode64) 2204{ 2205 MIPSAMode *am; 2206 vassert(offsetB >= 0); 2207 vassert(!hregIsVirtual(rreg)); 2208 *i1 = *i2 = NULL; 2209 am = MIPSAMode_IR(offsetB, GuestStatePointer(mode64)); 2210 2211 switch (hregClass(rreg)) { 2212 case HRcInt64: 2213 vassert(mode64); 2214 *i1 = MIPSInstr_Store(8, am, rreg, mode64); 2215 break; 2216 case HRcInt32: 2217 vassert(!mode64); 2218 *i1 = MIPSInstr_Store(4, am, rreg, mode64); 2219 break; 2220 case HRcFlt32: 2221 vassert(!mode64); 2222 *i1 = MIPSInstr_FpLdSt(False /*Store */ , 4, rreg, am); 2223 break; 2224 case HRcFlt64: 2225 *i1 = MIPSInstr_FpLdSt(False /*Store */ , 8, rreg, am); 2226 break; 2227 default: 2228 ppHRegClass(hregClass(rreg)); 2229 vpanic("genSpill_MIPS: unimplemented regclass"); 2230 break; 2231 } 2232} 2233 2234void genReload_MIPS( /*OUT*/ HInstr ** i1, /*OUT*/ HInstr ** i2, HReg rreg, 2235 Int offsetB, Bool mode64) 2236{ 2237 MIPSAMode *am; 2238 vassert(!hregIsVirtual(rreg)); 2239 am = MIPSAMode_IR(offsetB, GuestStatePointer(mode64)); 2240 2241 switch (hregClass(rreg)) { 2242 case HRcInt64: 2243 vassert(mode64); 2244 *i1 = MIPSInstr_Load(8, rreg, am, mode64); 2245 break; 2246 case HRcInt32: 2247 vassert(!mode64); 2248 *i1 = MIPSInstr_Load(4, rreg, am, mode64); 2249 break; 2250 case HRcFlt32: 2251 if (mode64) 2252 *i1 = MIPSInstr_FpLdSt(True /*Load */ , 8, rreg, am); 2253 else 2254 *i1 = MIPSInstr_FpLdSt(True /*Load */ , 4, rreg, am); 2255 break; 2256 case HRcFlt64: 2257 *i1 = MIPSInstr_FpLdSt(True /*Load */ , 8, rreg, am); 2258 break; 2259 default: 2260 ppHRegClass(hregClass(rreg)); 2261 vpanic("genReload_MIPS: unimplemented regclass"); 2262 break; 2263 } 2264} 2265 2266/* --------- The mips assembler --------- */ 2267 2268static UInt iregNo(HReg r, Bool mode64) 2269{ 2270 UInt n; 2271 vassert(hregClass(r) == mode64 ? HRcInt64 : HRcInt32); 2272 vassert(!hregIsVirtual(r)); 2273 n = hregNumber(r); 2274 vassert(n <= 32); 2275 return n; 2276} 2277 2278static UChar fregNo(HReg r, Bool mode64) 2279{ 2280 UInt n; 2281 vassert(hregClass(r) == mode64 ? HRcFlt64 : HRcFlt32); 2282 vassert(!hregIsVirtual(r)); 2283 n = hregNumber(r); 2284 vassert(n <= 31); 2285 return n; 2286} 2287 2288static UChar dregNo(HReg r) 2289{ 2290 UInt n; 2291 vassert(hregClass(r) == HRcFlt64); 2292 vassert(!hregIsVirtual(r)); 2293 n = hregNumber(r); 2294 vassert(n <= 31); 2295 return n; 2296} 2297 2298/* Emit 32bit instruction */ 2299static UChar *emit32(UChar * p, UInt w32) 2300{ 2301#if defined (_MIPSEL) 2302 *p++ = toUChar(w32 & 0x000000FF); 2303 *p++ = toUChar((w32 >> 8) & 0x000000FF); 2304 *p++ = toUChar((w32 >> 16) & 0x000000FF); 2305 *p++ = toUChar((w32 >> 24) & 0x000000FF); 2306#elif defined (_MIPSEB) 2307 *p++ = toUChar((w32 >> 24) & 0x000000FF); 2308 *p++ = toUChar((w32 >> 16) & 0x000000FF); 2309 *p++ = toUChar((w32 >> 8) & 0x000000FF); 2310 *p++ = toUChar(w32 & 0x000000FF); 2311#endif 2312 return p; 2313} 2314/* Fetch an instruction */ 2315static UInt fetch32 ( UChar* p ) 2316{ 2317 UInt w32 = 0; 2318#if defined (_MIPSEL) 2319 w32 |= ((0xFF & (UInt)p[0]) << 0); 2320 w32 |= ((0xFF & (UInt)p[1]) << 8); 2321 w32 |= ((0xFF & (UInt)p[2]) << 16); 2322 w32 |= ((0xFF & (UInt)p[3]) << 24); 2323#elif defined (_MIPSEB) 2324 w32 |= ((0xFF & (UInt)p[0]) << 24); 2325 w32 |= ((0xFF & (UInt)p[1]) << 16); 2326 w32 |= ((0xFF & (UInt)p[2]) << 8); 2327 w32 |= ((0xFF & (UInt)p[3]) << 0); 2328#endif 2329 return w32; 2330} 2331 2332/* physical structure of mips instructions */ 2333/* type I : opcode - 6 bits 2334 rs - 5 bits 2335 rt - 5 bits 2336 immediate - 16 bits 2337*/ 2338static UChar *mkFormI(UChar * p, UInt opc, UInt rs, UInt rt, UInt imm) 2339{ 2340 UInt theInstr; 2341 vassert(opc < 0x40); 2342 vassert(rs < 0x20); 2343 vassert(rt < 0x20); 2344 imm = imm & 0xFFFF; 2345 theInstr = ((opc << 26) | (rs << 21) | (rt << 16) | (imm)); 2346 return emit32(p, theInstr); 2347} 2348 2349/* type R: opcode - 6 bits 2350 rs - 5 bits 2351 rt - 5 bits 2352 rd - 5 bits 2353 sa - 5 bits 2354 func - 6 bits 2355*/ 2356static UChar *mkFormR(UChar * p, UInt opc, UInt rs, UInt rt, UInt rd, UInt sa, 2357 UInt func) 2358{ 2359 if (rs >= 0x20) 2360 vex_printf("rs = %d\n", rs); 2361 UInt theInstr; 2362 vassert(opc < 0x40); 2363 vassert(rs < 0x20); 2364 vassert(rt < 0x20); 2365 vassert(rd < 0x20); 2366 vassert(sa < 0x20); 2367 func = func & 0xFFFF; 2368 theInstr = ((opc << 26) | (rs << 21) | (rt << 16) | (rd << 11) | (sa << 6) | 2369 (func)); 2370 2371 return emit32(p, theInstr); 2372} 2373 2374static UChar *mkFormS(UChar * p, UInt opc1, UInt rRD, UInt rRS, UInt rRT, 2375 UInt sa, UInt opc2) 2376{ 2377 UInt theInstr; 2378 vassert(opc1 <= 0x3F); 2379 vassert(rRD < 0x20); 2380 vassert(rRS < 0x20); 2381 vassert(rRT < 0x20); 2382 vassert(opc2 <= 0x3F); 2383 vassert(sa >= 0 && sa <= 0x3F); 2384 2385 theInstr = ((opc1 << 26) | (rRS << 21) | (rRT << 16) | (rRD << 11) | 2386 ((sa & 0x1F) << 6) | (opc2)); 2387 2388 return emit32(p, theInstr); 2389} 2390 2391static UChar *doAMode_IR(UChar * p, UInt opc1, UInt rSD, MIPSAMode * am, 2392 Bool mode64) 2393{ 2394 UInt rA, idx, r_dst; 2395 vassert(am->tag == Mam_IR); 2396 vassert(am->Mam.IR.index < 0x10000); 2397 2398 rA = iregNo(am->Mam.IR.base, mode64); 2399 idx = am->Mam.IR.index; 2400 2401 if (rSD == 33 || rSD == 34) 2402 r_dst = 24; 2403 else 2404 r_dst = rSD; 2405 2406 if (opc1 < 40) { 2407 //load 2408 if (rSD == 33) 2409 /* mfhi */ 2410 p = mkFormR(p, 0, 0, 0, r_dst, 0, 16); 2411 else if (rSD == 34) 2412 /* mflo */ 2413 p = mkFormR(p, 0, 0, 0, r_dst, 0, 18); 2414 } 2415 2416 p = mkFormI(p, opc1, rA, r_dst, idx); 2417 2418 if (opc1 >= 40) { 2419 //store 2420 if (rSD == 33) 2421 /* mthi */ 2422 p = mkFormR(p, 0, r_dst, 0, 0, 0, 17); 2423 else if (rSD == 34) 2424 /* mtlo */ 2425 p = mkFormR(p, 0, r_dst, 0, 0, 0, 19); 2426 } 2427 2428 return p; 2429} 2430 2431static UChar *doAMode_RR(UChar * p, UInt opc1, UInt rSD, MIPSAMode * am, 2432 Bool mode64) 2433{ 2434 UInt rA, rB, r_dst; 2435 vassert(am->tag == Mam_RR); 2436 2437 rA = iregNo(am->Mam.RR.base, mode64); 2438 rB = iregNo(am->Mam.RR.index, mode64); 2439 2440 if (rSD == 33 || rSD == 34) 2441 r_dst = 24; 2442 else 2443 r_dst = rSD; 2444 2445 if (opc1 < 40) { 2446 //load 2447 if (rSD == 33) 2448 /* mfhi */ 2449 p = mkFormR(p, 0, 0, 0, r_dst, 0, 16); 2450 else if (rSD == 34) 2451 /* mflo */ 2452 p = mkFormR(p, 0, 0, 0, r_dst, 0, 18); 2453 } 2454 /* addiu sp, sp, -4 2455 * sw rA, 0(sp) 2456 * addu rA, rA, rB 2457 * sw/lw r_dst, 0(rA) 2458 * lw rA, 0(sp) 2459 * addiu sp, sp, 4 */ 2460 if (mode64) { 2461 p = mkFormI(p, 25, 29, 29, 0xFFFC); 2462 p = mkFormI(p, 63, 29, rA, 0); 2463 p = mkFormR(p, 0, rA, rB, rA, 0, 45); 2464 p = mkFormI(p, opc1, rA, r_dst, 0); 2465 p = mkFormI(p, 55, 29, rA, 0); 2466 p = mkFormI(p, 25, 29, 29, 4); 2467 } else { 2468 p = mkFormI(p, 9, 29, 29, 0xFFFC); 2469 p = mkFormI(p, 43, 29, rA, 0); 2470 p = mkFormR(p, 0, rA, rB, rA, 0, 33); 2471 p = mkFormI(p, opc1, rA, r_dst, 0); 2472 p = mkFormI(p, 35, 29, rA, 0); 2473 p = mkFormI(p, 9, 29, 29, 4); 2474 } 2475 if (opc1 >= 40) { 2476 //store 2477 if (rSD == 33) 2478 /* mthi */ 2479 p = mkFormR(p, 0, r_dst, 0, 0, 0, 17); 2480 else if (rSD == 34) 2481 /* mtlo */ 2482 p = mkFormR(p, 0, r_dst, 0, 0, 0, 19); 2483 } 2484 2485 return p; 2486} 2487 2488/* Load imm to r_dst */ 2489static UChar *mkLoadImm(UChar * p, UInt r_dst, ULong imm, Bool mode64) 2490{ 2491 if (!mode64) { 2492 vassert(r_dst < 0x20); 2493 UInt u32 = (UInt) imm; 2494 Int s32 = (Int) u32; 2495 Long s64 = (Long) s32; 2496 imm = (ULong) s64; 2497 } 2498 2499 if (imm >= 0xFFFFFFFFFFFF8000ULL || imm < 0x8000) { 2500 // sign-extendable from 16 bits 2501 // addiu r_dst,0,imm => li r_dst,imm 2502 p = mkFormI(p, 9, 0, r_dst, imm & 0xFFFF); 2503 } else { 2504 if (imm >= 0xFFFFFFFF80000000ULL || imm < 0x80000000ULL) { 2505 // sign-extendable from 32 bits 2506 // addiu r_dst,r0,(imm>>16) => lis r_dst, (imm>>16) 2507 // lui r_dst, (imm>>16) 2508 p = mkFormI(p, 15, 0, r_dst, (imm >> 16) & 0xFFFF); 2509 // ori r_dst, r_dst, (imm & 0xFFFF) 2510 p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF); 2511 } else { 2512 vassert(mode64); 2513 // lui load in upper half of low word 2514 p = mkFormI(p, 15, 0, r_dst, (imm >> 48) & 0xFFFF); 2515 // ori 2516 p = mkFormI(p, 13, r_dst, r_dst, (imm >> 32) & 0xFFFF); 2517 //shift 2518 p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56); 2519 // ori 2520 p = mkFormI(p, 13, r_dst, r_dst, (imm >> 16) & 0xFFFF); 2521 //shift 2522 p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56); 2523 // ori 2524 p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF); 2525 } 2526 } 2527 return p; 2528} 2529 2530/* A simplified version of mkLoadImm that always generates 2 or 5 2531 instructions (32 or 64 bits respectively) even if it could generate 2532 fewer. This is needed for generating fixed sized patchable 2533 sequences. */ 2534static UChar* mkLoadImm_EXACTLY2or5 ( UChar* p, 2535 UInt r_dst, ULong imm, Bool mode64 ) 2536{ 2537 vassert(r_dst < 0x20); 2538 2539 if (!mode64) { 2540 /* In 32-bit mode, make sure the top 32 bits of imm are a sign 2541 extension of the bottom 32 bits. (Probably unnecessary.) */ 2542 UInt u32 = (UInt)imm; 2543 Int s32 = (Int)u32; 2544 Long s64 = (Long)s32; 2545 imm = (ULong)s64; 2546 } 2547 2548 if (!mode64) { 2549 // sign-extendable from 32 bits 2550 // addiu r_dst,r0,(imm>>16) => lis r_dst, (imm>>16) 2551 // lui r_dst, (imm>>16) 2552 p = mkFormI(p, 15, 0, r_dst, (imm >> 16) & 0xFFFF); 2553 // ori r_dst, r_dst, (imm & 0xFFFF) 2554 p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF); 2555 } else { 2556 vassert(0); 2557 } 2558 return p; 2559} 2560 2561/* Checks whether the sequence of bytes at p was indeed created 2562 by mkLoadImm_EXACTLY2or5 with the given parameters. */ 2563static Bool isLoadImm_EXACTLY2or5 ( UChar* p_to_check, 2564 UInt r_dst, ULong imm, Bool mode64 ) 2565{ 2566 vassert(r_dst < 0x20); 2567 Bool ret; 2568 if (!mode64) { 2569 /* In 32-bit mode, make sure the top 32 bits of imm are a sign 2570 extension of the bottom 32 bits. (Probably unnecessary.) */ 2571 UInt u32 = (UInt)imm; 2572 Int s32 = (Int)u32; 2573 Long s64 = (Long)s32; 2574 imm = (ULong)s64; 2575 } 2576 2577 if (!mode64) { 2578 UInt expect[2] = { 0, 0 }; 2579 UChar* p = (UChar*)&expect[0]; 2580 // lui r_dst, (imm>>16) 2581 p = mkFormI(p, 15, 0, r_dst, (imm >> 16) & 0xFFFF); 2582 // ori r_dst, r_dst, (imm & 0xFFFF) 2583 p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF); 2584 vassert(p == (UChar*)&expect[2]); 2585 2586 ret = fetch32(p_to_check + 0) == expect[0] 2587 && fetch32(p_to_check + 4) == expect[1]; 2588 2589 } else { 2590 vassert(0); 2591 } 2592 return ret; 2593} 2594 2595/* Generate a machine-word sized load or store. Simplified version of 2596 the Min_Load and Min_Store cases below. */ 2597static UChar* do_load_or_store_machine_word ( 2598 UChar* p, Bool isLoad, 2599 UInt reg, MIPSAMode* am, Bool mode64 ) 2600{ 2601 if (isLoad) { /* load */ 2602 UInt opc1, sz = mode64 ? 8 : 4; 2603 switch (am->tag) { 2604 case Mam_IR: 2605 if (mode64) { 2606 vassert(0 == (am->Mam.IR.index & 3)); 2607 } 2608 switch (sz) { 2609 case 1: 2610 opc1 = 32; 2611 break; 2612 case 2: 2613 opc1 = 33; 2614 break; 2615 case 4: 2616 opc1 = 35; 2617 break; 2618 case 8: 2619 opc1 = 55; 2620 vassert(mode64); 2621 break; 2622 default: 2623 vassert(0); 2624 break; 2625 } 2626 p = doAMode_IR(p, opc1, reg, am, mode64); 2627 break; 2628 case Mam_RR: 2629 /* we could handle this case, but we don't expect to ever 2630 need to. */ 2631 vassert(0); 2632 break; 2633 default: 2634 vassert(0); 2635 break; 2636 } 2637 } else /* store */ { 2638 UInt opc1, sz = mode64 ? 8 : 4; 2639 switch (am->tag) { 2640 case Mam_IR: 2641 if (mode64) { 2642 vassert(0 == (am->Mam.IR.index & 3)); 2643 } 2644 switch (sz) { 2645 case 1: 2646 opc1 = 40; 2647 break; 2648 case 2: 2649 opc1 = 41; 2650 break; 2651 case 4: 2652 opc1 = 43; 2653 break; 2654 case 8: 2655 vassert(mode64); 2656 opc1 = 63; 2657 break; 2658 default: 2659 vassert(0); 2660 break; 2661 } 2662 p = doAMode_IR(p, opc1, reg, am, mode64); 2663 break; 2664 case Mam_RR: 2665 /* we could handle this case, but we don't expect to ever 2666 need to. */ 2667 vassert(0); 2668 break; 2669 default: 2670 vassert(0); 2671 break; 2672 } 2673 } 2674 return p; 2675} 2676 2677/* Move r_dst to r_src */ 2678static UChar *mkMoveReg(UChar * p, UInt r_dst, UInt r_src) 2679{ 2680 vassert(r_dst < 0x20); 2681 vassert(r_src < 0x20); 2682 2683 if (r_dst != r_src) { 2684 /* or r_dst, r_src, r_src */ 2685 p = mkFormR(p, 0, r_src, r_src, r_dst, 0, 37); 2686 } 2687 return p; 2688} 2689 2690/* Emit an instruction into buf and return the number of bytes used. 2691 Note that buf is not the insn's final place, and therefore it is 2692 imperative to emit position-independent code. If the emitted 2693 instruction was a profiler inc, set *is_profInc to True, else 2694 leave it unchanged. */ 2695Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, 2696 UChar* buf, Int nbuf, MIPSInstr* i, 2697 Bool mode64, 2698 void* disp_cp_chain_me_to_slowEP, 2699 void* disp_cp_chain_me_to_fastEP, 2700 void* disp_cp_xindir, 2701 void* disp_cp_xassisted ) 2702{ 2703 UChar *p = &buf[0]; 2704 UChar *ptmp = p; 2705 vassert(nbuf >= 32); 2706 2707 switch (i->tag) { 2708 case Min_MovCond: { 2709 MIPSRH *srcR = i->Min.MovCond.srcR; 2710 UInt condR = iregNo(i->Min.MovCond.condR, mode64); 2711 UInt dst = iregNo(i->Min.MovCond.dst, mode64); 2712 2713 UInt srcL = iregNo(i->Min.MovCond.srcL, mode64); 2714 2715 p = mkMoveReg(p, dst, srcL); 2716 if (i->Min.MovCond.cond == MIPScc_MI) { 2717 p = mkFormI(p, 7, condR, 0, 2); //bgtz cond,2 2718 } 2719 2720 p = mkFormR(p, 0, 0, 0, 0, 0, 0); //nop 2721 2722 if (srcR->tag == Mrh_Reg) { 2723 //or dst,src,src 2724 p = mkMoveReg(p, dst, iregNo(srcR->Mrh.Reg.reg, mode64)); 2725 /*p = mkFormR(p, 0, dst, iregNo(src->Mrh.Reg.reg, mode64), 2726 iregNo(src->Mrh.Reg.reg, mode64), 0, 37);*/ 2727 } else { 2728 p = mkLoadImm(p, dst, srcR->Mrh.Imm.imm16, mode64); 2729 } 2730 } 2731 goto done; 2732 2733 case Min_LI: 2734 p = mkLoadImm(p, iregNo(i->Min.LI.dst, mode64), i->Min.LI.imm, mode64); 2735 goto done; 2736 2737 case Min_Alu: { 2738 MIPSRH *srcR = i->Min.Alu.srcR; 2739 Bool immR = toBool(srcR->tag == Mrh_Imm); 2740 UInt r_dst = iregNo(i->Min.Alu.dst, mode64); 2741 UInt r_srcL = iregNo(i->Min.Alu.srcL, mode64); 2742 UInt r_srcR = immR ? (-1) /*bogus */ : iregNo(srcR->Mrh.Reg.reg, mode64); 2743 2744 switch (i->Min.Alu.op) { 2745 /*Malu_ADD, Malu_SUB, Malu_AND, Malu_OR, Malu_NOR, Malu_XOR */ 2746 case Malu_ADD: 2747 if (immR) { 2748 vassert(srcR->Mrh.Imm.imm16 != 0x8000); 2749 if (srcR->Mrh.Imm.syned) 2750 /* addi */ 2751 p = mkFormI(p, 9, r_srcL, r_dst, srcR->Mrh.Imm.imm16); 2752 else 2753 /* addiu */ 2754 p = mkFormI(p, 9, r_srcL, r_dst, srcR->Mrh.Imm.imm16); 2755 } else { 2756 /* addu */ 2757 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 33); 2758 } 2759 break; 2760 case Malu_SUB: 2761 if (immR) { 2762 /* addi , but with negated imm */ 2763 vassert(srcR->Mrh.Imm.syned); 2764 vassert(srcR->Mrh.Imm.imm16 != 0x8000); 2765 p = mkFormI(p, 8, r_srcL, r_dst, (-srcR->Mrh.Imm.imm16)); 2766 } else { 2767 /* subu */ 2768 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 35); 2769 } 2770 break; 2771 case Malu_AND: 2772 if (immR) { 2773 /* andi */ 2774 vassert(!srcR->Mrh.Imm.syned); 2775 p = mkFormI(p, 12, r_srcL, r_dst, srcR->Mrh.Imm.imm16); 2776 } else { 2777 /* and */ 2778 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 36); 2779 } 2780 break; 2781 case Malu_OR: 2782 if (immR) { 2783 /* ori */ 2784 vassert(!srcR->Mrh.Imm.syned); 2785 p = mkFormI(p, 13, r_srcL, r_dst, srcR->Mrh.Imm.imm16); 2786 } else { 2787 /* or */ 2788 if (r_srcL == 33) 2789 //MFHI 2790 p = mkFormR(p, 0, 0, 0, r_dst, 0, 16); 2791 else if (r_srcL == 34) 2792 //MFLO 2793 p = mkFormR(p, 0, 0, 0, r_dst, 0, 18); 2794 else if (r_dst == 33) 2795 //MTHI 2796 p = mkFormR(p, 0, r_srcL, 0, 0, 0, 17); 2797 else if (r_dst == 34) 2798 //MTLO 2799 p = mkFormR(p, 0, r_srcL, 0, 0, 0, 19); 2800 else 2801 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 37); 2802 } 2803 break; 2804 case Malu_NOR: 2805 /* nor */ 2806 vassert(!immR); 2807 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 39); 2808 break; 2809 case Malu_XOR: 2810 if (immR) { 2811 /* xori */ 2812 vassert(!srcR->Mrh.Imm.syned); 2813 p = mkFormI(p, 14, r_srcL, r_dst, srcR->Mrh.Imm.imm16); 2814 } else { 2815 /* xor */ 2816 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 38); 2817 } 2818 break; 2819 2820 default: 2821 goto bad; 2822 } 2823 goto done; 2824 } 2825 2826 case Min_Shft: { 2827 MIPSRH *srcR = i->Min.Shft.srcR; 2828 Bool sz32 = i->Min.Shft.sz32; 2829 Bool immR = toBool(srcR->tag == Mrh_Imm); 2830 UInt r_dst = iregNo(i->Min.Shft.dst, mode64); 2831 UInt r_srcL = iregNo(i->Min.Shft.srcL, mode64); 2832 UInt r_srcR = immR ? (-1) /*bogus */ : iregNo(srcR->Mrh.Reg.reg, 2833 mode64); 2834 if (!mode64) 2835 vassert(sz32); 2836 switch (i->Min.Shft.op) { 2837 case Mshft_SLL: 2838 if (sz32) { 2839 if (immR) { 2840 UInt n = srcR->Mrh.Imm.imm16; 2841 vassert(n >= 0 && n < 32); 2842 p = mkFormS(p, 0, r_dst, 0, r_srcL, n, 0); 2843 } else { 2844 /* shift variable */ 2845 p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 4); 2846 } 2847 } else { 2848 if (immR) { 2849 UInt n = srcR->Mrh.Imm.imm16; 2850 vassert((n >= 0 && n < 32) || (n > 31 && n < 64)); 2851 if (n >= 0 && n < 32) { 2852 p = mkFormS(p, 0, r_dst, 0, r_srcL, n, 56); 2853 } else { 2854 p = mkFormS(p, 0, r_dst, 0, r_srcL, n - 32, 60); 2855 } 2856 } else { 2857 p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 20); 2858 } 2859 } 2860 break; 2861 2862 case Mshft_SRL: 2863 if (sz32) { 2864 // SRL, SRLV 2865 if (immR) { 2866 UInt n = srcR->Mrh.Imm.imm16; 2867 vassert(n >= 0 && n < 32); 2868 p = mkFormS(p, 0, r_dst, 0, r_srcL, n, 2); 2869 } else { 2870 /* shift variable */ 2871 p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 6); 2872 } 2873 } else { 2874 // DSRL, DSRL32, DSRLV 2875 if (immR) { 2876 UInt n = srcR->Mrh.Imm.imm16; 2877 vassert((n >= 0 && n < 32) || (n > 31 && n < 64)); 2878 if (n >= 0 && n < 32) { 2879 p = mkFormS(p, 0, r_dst, 0, r_srcL, n, 58); 2880 } else { 2881 p = mkFormS(p, 0, r_dst, 0, r_srcL, n - 32, 62); 2882 } 2883 } else { 2884 p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 22); 2885 } 2886 } 2887 break; 2888 2889 case Mshft_SRA: 2890 if (sz32) { 2891 // SRA, SRAV 2892 if (immR) { 2893 UInt n = srcR->Mrh.Imm.imm16; 2894 vassert(n >= 0 && n < 32); 2895 p = mkFormS(p, 0, r_dst, 0, r_srcL, n, 3); 2896 } else { 2897 /* shift variable */ 2898 p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 7); 2899 } 2900 } else { 2901 // DSRA, DSRA32, DSRAV 2902 if (immR) { 2903 UInt n = srcR->Mrh.Imm.imm16; 2904 vassert((n >= 0 && n < 32) || (n > 31 && n < 64)); 2905 if (n >= 0 && n < 32) { 2906 p = mkFormS(p, 0, r_dst, 0, r_srcL, n, 59); 2907 } else { 2908 p = mkFormS(p, 0, r_dst, 0, r_srcL, n - 32, 63); 2909 } 2910 } else { 2911 p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 23); 2912 } 2913 } 2914 break; 2915 2916 default: 2917 goto bad; 2918 } 2919 2920 goto done; 2921 } 2922 2923 case Min_Unary: { 2924 UInt r_dst = iregNo(i->Min.Unary.dst, mode64); 2925 UInt r_src = iregNo(i->Min.Unary.src, mode64); 2926 2927 switch (i->Min.Unary.op) { 2928 /*Mun_CLO, Mun_CLZ, Mun_NOP */ 2929 case Mun_CLO: //clo 2930 p = mkFormR(p, 28, r_src, 0 /*whatever */ , r_dst, 0, 33); 2931 break; 2932 case Mun_CLZ: //clz 2933 p = mkFormR(p, 28, r_src, 0 /*whatever */ , r_dst, 0, 32); 2934 break; 2935 case Mun_NOP: //nop (sll r0,r0,0) 2936 p = mkFormR(p, 0, 0, 0, 0, 0, 0); 2937 break; 2938 } 2939 goto done; 2940 } 2941 2942 case Min_Cmp: { 2943 UInt r_srcL = iregNo(i->Min.Cmp.srcL, mode64); 2944 UInt r_srcR = iregNo(i->Min.Cmp.srcR, mode64); 2945 UInt r_dst = iregNo(i->Min.Cmp.dst, mode64); 2946 2947 switch (i->Min.Cmp.cond) { 2948 case MIPScc_EQ: 2949 /* addiu r_dst, r0, 1 2950 beq r_srcL, r_srcR, 2 2951 nop 2952 addiu r_dst, r0, 0 2953 */ 2954 p = mkFormI(p, 9, 0, r_dst, 1); 2955 p = mkFormI(p, 4, r_srcL, r_srcR, 2); 2956 p = mkFormR(p, 0, 0, 0, 0, 0, 0); 2957 p = mkFormI(p, 9, 0, r_dst, 0); 2958 break; 2959 case MIPScc_NE: 2960 /* addiu r_dst, r0, 1 2961 bne r_srcL, r_srcR, 2 2962 nop 2963 addiu r_dst, r0, 0 2964 */ 2965 p = mkFormI(p, 9, 0, r_dst, 1); 2966 p = mkFormI(p, 5, r_srcL, r_srcR, 2); 2967 p = mkFormR(p, 0, 0, 0, 0, 0, 0); 2968 p = mkFormI(p, 9, 0, r_dst, 0); 2969 break; 2970 case MIPScc_LT: 2971 /* slt r_dst, r_srcL, r_srcR */ 2972 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 42); 2973 break; 2974 case MIPScc_LO: 2975 /* sltu r_dst, r_srcL, r_srcR */ 2976 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 43); 2977 break; 2978 case MIPScc_LE: 2979 /* addiu r_dst, r0, 1 2980 beq r_srcL, r_srcR, 2 2981 nop 2982 slt r_dst, r_srcL, r_srcR */ 2983 p = mkFormI(p, 9, 0, r_dst, 1); 2984 p = mkFormI(p, 4, r_srcL, r_srcR, 2); 2985 p = mkFormR(p, 0, 0, 0, 0, 0, 0); 2986 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 42); 2987 break; 2988 case MIPScc_LS: 2989 /* addiu r_dst, r0, 1 2990 beq r_srcL, r_srcR, 2 2991 nop 2992 sltu r_dst, r_srcL, r_srcR */ 2993 p = mkFormI(p, 9, 0, r_dst, 1); 2994 p = mkFormI(p, 4, r_srcL, r_srcR, 2); 2995 p = mkFormR(p, 0, 0, 0, 0, 0, 0); 2996 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 43); 2997 break; 2998 default: 2999 goto bad; 3000 } 3001 goto done; 3002 } 3003 3004 case Min_Mul: { 3005 Bool syned = i->Min.Mul.syned; 3006 Bool widening = i->Min.Mul.widening; 3007 Bool sz32 = i->Min.Mul.sz32; 3008 UInt r_srcL = iregNo(i->Min.Mul.srcL, mode64); 3009 UInt r_srcR = iregNo(i->Min.Mul.srcR, mode64); 3010 UInt r_dst = iregNo(i->Min.Mul.dst, mode64); 3011 3012 if (widening) { 3013 if (sz32) { 3014 if (syned) 3015 /* mult */ 3016 p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 24); 3017 else 3018 /* multu */ 3019 p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 25); 3020 } else { 3021 if (syned) /* DMULT r_dst,r_srcL,r_srcR */ 3022 p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 28); 3023 else /* DMULTU r_dst,r_srcL,r_srcR */ 3024 p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 29); 3025 } 3026 } else { 3027 if (sz32) 3028 /* mul */ 3029 p = mkFormR(p, 28, r_srcL, r_srcR, r_dst, 0, 2); 3030 else if (mode64 && !sz32) 3031 p = mkFormR(p, 28, r_srcL, r_srcR, r_dst, 0, 2); 3032 else 3033 goto bad; 3034 } 3035 goto done; 3036 } 3037 3038 case Min_Macc: { 3039 Bool syned = i->Min.Macc.syned; 3040 UInt r_srcL = iregNo(i->Min.Macc.srcL, mode64); 3041 UInt r_srcR = iregNo(i->Min.Macc.srcR, mode64); 3042 3043 if (syned) { 3044 switch (i->Min.Macc.op) { 3045 case Macc_ADD: 3046 //madd 3047 p = mkFormR(p, 28, r_srcL, r_srcR, 0, 0, 0); 3048 break; 3049 case Macc_SUB: 3050 //msub 3051 p = mkFormR(p, 28, r_srcL, r_srcR, 0, 0, 3052 4); 3053 break; 3054 default: 3055 goto bad; 3056 } 3057 } else { 3058 switch (i->Min.Macc.op) { 3059 case Macc_ADD: 3060 //maddu 3061 p = mkFormR(p, 28, r_srcL, r_srcR, 0, 0, 3062 1); 3063 break; 3064 case Macc_SUB: 3065 //msubu 3066 p = mkFormR(p, 28, r_srcL, r_srcR, 0, 0, 3067 5); 3068 break; 3069 default: 3070 goto bad; 3071 } 3072 } 3073 3074 goto done; 3075 } 3076 3077 case Min_Div: { 3078 Bool syned = i->Min.Div.syned; 3079 Bool sz32 = i->Min.Div.sz32; 3080 UInt r_srcL = iregNo(i->Min.Div.srcL, mode64); 3081 UInt r_srcR = iregNo(i->Min.Div.srcR, mode64); 3082 if (sz32) { 3083 if (syned) { 3084 /* div */ 3085 p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 26); 3086 } else 3087 /* divu */ 3088 p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 27); 3089 goto done; 3090 } else { 3091 if (syned) { 3092 /* ddiv */ 3093 p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 30); 3094 } else 3095 /* ddivu */ 3096 p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 31); 3097 goto done; 3098 } 3099 } 3100 3101 case Min_Mthi: { 3102 UInt r_src = iregNo(i->Min.MtHL.src, mode64); 3103 p = mkFormR(p, 0, r_src, 0, 0, 0, 17); 3104 goto done; 3105 } 3106 3107 case Min_Mtlo: { 3108 UInt r_src = iregNo(i->Min.MtHL.src, mode64); 3109 p = mkFormR(p, 0, r_src, 0, 0, 0, 19); 3110 goto done; 3111 } 3112 3113 case Min_Mfhi: { 3114 UInt r_dst = iregNo(i->Min.MfHL.dst, mode64); 3115 p = mkFormR(p, 0, 0, 0, r_dst, 0, 16); 3116 goto done; 3117 } 3118 3119 case Min_Mflo: { 3120 UInt r_dst = iregNo(i->Min.MfHL.dst, mode64); 3121 p = mkFormR(p, 0, 0, 0, r_dst, 0, 18); 3122 goto done; 3123 } 3124 3125 case Min_MtFCSR: { 3126 UInt r_src = iregNo(i->Min.MtFCSR.src, mode64); 3127 /* ctc1 */ 3128 p = mkFormR(p, 17, 6, r_src, 31, 0, 0); 3129 goto done; 3130 } 3131 3132 case Min_MfFCSR: { 3133 UInt r_dst = iregNo(i->Min.MfFCSR.dst, mode64); 3134 /* cfc1 */ 3135 p = mkFormR(p, 17, 2, r_dst, 31, 0, 0); 3136 goto done; 3137 } 3138 3139 case Min_Call: { 3140 MIPSCondCode cond = i->Min.Call.cond; 3141 UInt r_dst = 25; /* using %r25 as address temporary - 3142 see getRegUsage_MIPSInstr */ 3143 3144 /* jump over the following insns if condition does not hold */ 3145 if (cond != MIPScc_AL) { 3146 /* jmp fwds if !condition */ 3147 /* don't know how many bytes to jump over yet... 3148 make space for a jump instruction + nop!!! and fill in later. */ 3149 ptmp = p; /* fill in this bit later */ 3150 p += 8; // p += 8 3151 } 3152 3153 /* load target to r_dst */// p += 4|8 3154 p = mkLoadImm(p, r_dst, i->Min.Call.target, mode64); 3155 3156 /* jalr %r_dst */ 3157 p = mkFormR(p, 0, r_dst, 0, 31, 0, 9); // p += 4 3158 p = mkFormR(p, 0, 0, 0, 0, 0, 0); // p += 4 3159 3160 /* Fix up the conditional jump, if there was one. */ 3161 if (cond != MIPScc_AL) { 3162 UInt r_src = iregNo(i->Min.Call.src, mode64); 3163 Int delta = p - ptmp; 3164 3165 vassert(delta >= 20 && delta <= 32); 3166 /* bc !ct,cf,delta/4 */ 3167 /* blez r_src, delta/4-1 */ 3168 vassert(cond == MIPScc_EQ); 3169 ptmp = mkFormI(ptmp, 6, r_src, 0, delta / 4 - 1); 3170 ptmp = mkFormR(ptmp, 0, 0, 0, 0, 0, 0); 3171 } 3172 goto done; 3173 } 3174 3175 case Min_XDirect: { 3176 /* NB: what goes on here has to be very closely coordinated 3177 with the chainXDirect_MIPS and unchainXDirect_MIPS below. */ 3178 /* We're generating chain-me requests here, so we need to be 3179 sure this is actually allowed -- no-redir translations 3180 can't use chain-me's. Hence: */ 3181 vassert(disp_cp_chain_me_to_slowEP != NULL); 3182 vassert(disp_cp_chain_me_to_fastEP != NULL); 3183 3184 /* Use ptmp for backpatching conditional jumps. */ 3185 ptmp = NULL; 3186 3187 /* First off, if this is conditional, create a conditional 3188 jump over the rest of it. Or at least, leave a space for 3189 it that we will shortly fill in. */ 3190 if (i->Min.XDirect.cond != MIPScc_AL) { 3191 vassert(i->Min.XDirect.cond != MIPScc_NV); 3192 ptmp = p; 3193 p += 12; 3194 } 3195 3196 /* Update the guest PC. */ 3197 /* move r9, dstGA */ 3198 /* sw r9, amPC */ 3199 p = mkLoadImm_EXACTLY2or5(p, /*r*/9, 3200 (ULong)i->Min.XDirect.dstGA, mode64); 3201 p = do_load_or_store_machine_word(p, False/*!isLoad*/, 3202 /*r*/9, i->Min.XDirect.amPC, mode64); 3203 3204 /* --- FIRST PATCHABLE BYTE follows --- */ 3205 /* VG_(disp_cp_chain_me_to_{slowEP,fastEP}) (where we're 3206 calling to) backs up the return address, so as to find the 3207 address of the first patchable byte. So: don't change the 3208 number of instructions (3) below. */ 3209 /* move r9, VG_(disp_cp_chain_me_to_{slowEP,fastEP}) */ 3210 /* jr r9 */ 3211 void* disp_cp_chain_me 3212 = i->Min.XDirect.toFastEP ? disp_cp_chain_me_to_fastEP 3213 : disp_cp_chain_me_to_slowEP; 3214 p = mkLoadImm_EXACTLY2or5(p, /*r*/9, 3215 Ptr_to_ULong(disp_cp_chain_me), mode64); 3216 /* jalr $9 */ 3217 /* nop */ 3218 p = mkFormR(p, 0, 9, 0, 31, 0, 9); // p += 4 3219 p = mkFormR(p, 0, 0, 0, 0, 0, 0); // p += 4 3220 /* --- END of PATCHABLE BYTES --- */ 3221 3222 /* Fix up the conditional jump, if there was one. */ 3223 if (i->Min.XDirect.cond != MIPScc_AL) { 3224 Int delta = p - ptmp; 3225 delta = delta / 4 - 3; 3226 vassert(delta > 0 && delta < 40); 3227 /* lw $9, 316($10) // guest_COND 3228 beq $9, $0, 2 3229 nop*/ 3230 ptmp = mkFormI(ptmp, 35, 10, 9, 316); 3231 ptmp = mkFormI(ptmp, 4, 0, 9, (delta)); 3232 ptmp = mkFormR(ptmp, 0, 0, 0, 0, 0, 0); 3233 } 3234 goto done; 3235 } 3236 3237 case Min_XIndir: { 3238 /* We're generating transfers that could lead indirectly to a 3239 chain-me, so we need to be sure this is actually allowed -- 3240 no-redir translations are not allowed to reach normal 3241 translations without going through the scheduler. That means 3242 no XDirects or XIndirs out from no-redir translations. 3243 Hence: */ 3244 vassert(disp_cp_xindir != NULL); 3245 3246 /* Use ptmp for backpatching conditional jumps. */ 3247 ptmp = NULL; 3248 3249 /* First off, if this is conditional, create a conditional 3250 jump over the rest of it. */ 3251 if (i->Min.XIndir.cond != MIPScc_AL) { 3252 vassert(i->Min.XIndir.cond != MIPScc_NV); 3253 ptmp = p; 3254 p += 12; 3255 } 3256 3257 /* Update the guest PC. */ 3258 /* sw r-dstGA, amPC */ 3259 p = do_load_or_store_machine_word(p, False/*!isLoad*/, 3260 iregNo(i->Min.XIndir.dstGA, mode64), 3261 i->Min.XIndir.amPC, mode64); 3262 3263 /* move r9, VG_(disp_cp_xindir) */ 3264 /* jalr r9 */ 3265 /* nop */ 3266 p = mkLoadImm_EXACTLY2or5 ( p, /*r*/9, 3267 Ptr_to_ULong(disp_cp_xindir), mode64); 3268 p = mkFormR(p, 0, 9, 0, 31, 0, 9); // p += 4 3269 p = mkFormR(p, 0, 0, 0, 0, 0, 0); // p += 4 3270 3271 /* Fix up the conditional jump, if there was one. */ 3272 if (i->Min.XIndir.cond != MIPScc_AL) { 3273 Int delta = p - ptmp; 3274 delta = delta / 4 - 3; 3275 vassert(delta > 0 && delta < 40); 3276 /* lw $9, 316($10) // guest_COND 3277 beq $9, $0, 2 3278 nop*/ 3279 ptmp = mkFormI(ptmp, 35, 10, 9, 316); 3280 ptmp = mkFormI(ptmp, 4, 0, 9, (delta)); 3281 ptmp = mkFormR(ptmp, 0, 0, 0, 0, 0, 0); 3282 } 3283 goto done; 3284 } 3285 3286 case Min_XAssisted: { 3287 /* First off, if this is conditional, create a conditional jump 3288 over the rest of it. Or at least, leave a space for it that 3289 we will shortly fill in. */ 3290 ptmp = NULL; 3291 if (i->Min.XAssisted.cond != MIPScc_AL) { 3292 vassert(i->Min.XAssisted.cond != MIPScc_NV); 3293 ptmp = p; 3294 p += 12; 3295 } 3296 3297 /* Update the guest PC. */ 3298 /* sw r-dstGA, amPC */ 3299 p = do_load_or_store_machine_word(p, False/*!isLoad*/, 3300 iregNo(i->Min.XIndir.dstGA, mode64), 3301 i->Min.XIndir.amPC, mode64); 3302 3303 /* imm32/64 r31, $magic_number */ 3304 UInt trcval = 0; 3305 switch (i->Min.XAssisted.jk) { 3306 case Ijk_ClientReq: trcval = VEX_TRC_JMP_CLIENTREQ; break; 3307 case Ijk_Sys_syscall: trcval = VEX_TRC_JMP_SYS_SYSCALL; break; 3308 //case Ijk_Sys_int128: trcval = VEX_TRC_JMP_SYS_INT128; break; 3309 //case Ijk_Yield: trcval = VEX_TRC_JMP_YIELD; break; 3310 case Ijk_EmWarn: trcval = VEX_TRC_JMP_EMWARN; break; 3311 case Ijk_EmFail: trcval = VEX_TRC_JMP_EMFAIL; break; 3312 //case Ijk_MapFail: trcval = VEX_TRC_JMP_MAPFAIL; break; 3313 case Ijk_NoDecode: trcval = VEX_TRC_JMP_NODECODE; break; 3314 case Ijk_TInval: trcval = VEX_TRC_JMP_TINVAL; break; 3315 case Ijk_NoRedir: trcval = VEX_TRC_JMP_NOREDIR; break; 3316 case Ijk_SigTRAP: trcval = VEX_TRC_JMP_SIGTRAP; break; 3317 //case Ijk_SigSEGV: trcval = VEX_TRC_JMP_SIGSEGV; break; 3318 case Ijk_SigBUS: trcval = VEX_TRC_JMP_SIGBUS; break; 3319 case Ijk_Boring: trcval = VEX_TRC_JMP_BORING; break; 3320 /* We don't expect to see the following being assisted. */ 3321 //case Ijk_Ret: 3322 //case Ijk_Call: 3323 /* fallthrough */ 3324 default: 3325 ppIRJumpKind(i->Min.XAssisted.jk); 3326 vpanic("emit_MIPSInstr.Min_XAssisted: unexpected jump kind"); 3327 } 3328 vassert(trcval != 0); 3329 p = mkLoadImm_EXACTLY2or5(p, /*r*/10, trcval, mode64); 3330 3331 /* move r9, VG_(disp_cp_xassisted) */ 3332 p = mkLoadImm_EXACTLY2or5(p, /*r*/9, 3333 (ULong)Ptr_to_ULong(disp_cp_xassisted), mode64); 3334 /* jalr $9 3335 nop */ 3336 p = mkFormR(p, 0, 9, 0, 31, 0, 9); // p += 4 3337 p = mkFormR(p, 0, 0, 0, 0, 0, 0); // p += 4 3338 3339 /* Fix up the conditional jump, if there was one. */ 3340 if (i->Min.XAssisted.cond != MIPScc_AL) { 3341 Int delta = p - ptmp; 3342 delta = delta / 4 - 3; 3343 vassert(delta > 0 && delta < 40); 3344 /* lw $9, 316($10) // guest_COND 3345 beq $9, $0, 2 3346 nop*/ 3347 ptmp = mkFormI(ptmp, 35, 10, 9, 316); 3348 ptmp = mkFormI(ptmp, 4, 0, 9, (delta)); 3349 ptmp = mkFormR(ptmp, 0, 0, 0, 0, 0, 0); 3350 } 3351 goto done; 3352 } 3353 3354 case Min_Load: { 3355 MIPSAMode *am_addr = i->Min.Load.src; 3356 if (am_addr->tag == Mam_IR) { 3357 UInt r_dst = iregNo(i->Min.Load.dst, mode64); 3358 UInt opc, sz = i->Min.Load.sz; 3359 if (mode64 && (sz == 4 || sz == 8)) { 3360 /* should be guaranteed to us by iselWordExpr_AMode */ 3361 vassert(0 == (am_addr->Mam.IR.index & 3)); 3362 } 3363 switch (sz) { 3364 case 1: 3365 opc = 32; 3366 break; 3367 case 2: 3368 opc = 33; 3369 break; 3370 case 4: 3371 opc = 35; 3372 break; 3373 case 8: 3374 opc = 55; 3375 vassert(mode64); 3376 break; 3377 default: 3378 goto bad; 3379 } 3380 3381 p = doAMode_IR(p, opc, r_dst, am_addr, mode64); 3382 goto done; 3383 } else if (am_addr->tag == Mam_RR) { 3384 UInt r_dst = iregNo(i->Min.Load.dst, mode64); 3385 UInt opc, sz = i->Min.Load.sz; 3386 3387 switch (sz) { 3388 case 1: 3389 opc = 32; 3390 break; 3391 case 2: 3392 opc = 33; 3393 break; 3394 case 4: 3395 opc = 35; 3396 break; 3397 case 8: 3398 opc = 55; 3399 vassert(mode64); 3400 break; 3401 default: 3402 goto bad; 3403 } 3404 3405 p = doAMode_RR(p, opc, r_dst, am_addr, mode64); 3406 goto done; 3407 } 3408 break; 3409 } 3410 3411 case Min_Store: { 3412 MIPSAMode *am_addr = i->Min.Store.dst; 3413 if (am_addr->tag == Mam_IR) { 3414 UInt r_src = iregNo(i->Min.Store.src, mode64); 3415 UInt opc, sz = i->Min.Store.sz; 3416 if (mode64 && (sz == 4 || sz == 8)) { 3417 /* should be guaranteed to us by iselWordExpr_AMode */ 3418 vassert(0 == (am_addr->Mam.IR.index & 3)); 3419 } 3420 switch (sz) { 3421 case 1: 3422 opc = 40; 3423 break; 3424 case 2: 3425 opc = 41; 3426 break; 3427 case 4: 3428 opc = 43; 3429 break; 3430 case 8: 3431 vassert(mode64); 3432 opc = 63; 3433 break; 3434 default: 3435 goto bad; 3436 } 3437 3438 p = doAMode_IR(p, opc, r_src, am_addr, mode64); 3439 goto done; 3440 } else if (am_addr->tag == Mam_RR) { 3441 UInt r_src = iregNo(i->Min.Store.src, mode64); 3442 UInt opc, sz = i->Min.Store.sz; 3443 3444 switch (sz) { 3445 case 1: 3446 opc = 40; 3447 break; 3448 case 2: 3449 opc = 41; 3450 break; 3451 case 4: 3452 opc = 43; 3453 break; 3454 case 8: 3455 vassert(mode64); 3456 opc = 63; 3457 break; 3458 default: 3459 goto bad; 3460 } 3461 3462 p = doAMode_RR(p, opc, r_src, am_addr, mode64); 3463 goto done; 3464 } 3465 break; 3466 } 3467 case Min_LoadL: { 3468 MIPSAMode *am_addr = i->Min.LoadL.src; 3469 UInt r_src = iregNo(am_addr->Mam.IR.base, mode64); 3470 UInt idx = am_addr->Mam.IR.index; 3471 UInt r_dst = iregNo(i->Min.LoadL.dst, mode64); 3472 3473 p = mkFormI(p, 0x30, r_src, r_dst, idx); 3474 goto done; 3475 } 3476 case Min_StoreC: { 3477 MIPSAMode *am_addr = i->Min.StoreC.dst; 3478 UInt r_src = iregNo(i->Min.StoreC.src, mode64); 3479 UInt idx = am_addr->Mam.IR.index; 3480 UInt r_dst = iregNo(am_addr->Mam.IR.base, mode64); 3481 3482 p = mkFormI(p, 0x38, r_dst, r_src, idx); 3483 goto done; 3484 } 3485 case Min_RdWrLR: { 3486 UInt reg = iregNo(i->Min.RdWrLR.gpr, mode64); 3487 Bool wrLR = i->Min.RdWrLR.wrLR; 3488 if (wrLR) 3489 p = mkMoveReg(p, 31, reg); 3490 else 3491 p = mkMoveReg(p, reg, 31); 3492 goto done; 3493 } 3494 3495 // Floating point 3496 3497 case Min_FpLdSt: { 3498 MIPSAMode *am_addr = i->Min.FpLdSt.addr; 3499 UChar sz = i->Min.FpLdSt.sz; 3500 vassert(sz == 4 || sz == 8); 3501 if (sz == 4) { 3502 UInt f_reg = fregNo(i->Min.FpLdSt.reg, mode64); 3503 if (i->Min.FpLdSt.isLoad) { 3504 if (am_addr->tag == Mam_IR) 3505 p = doAMode_IR(p, 0x31, f_reg, am_addr, mode64); 3506 else if (am_addr->tag == Mam_RR) 3507 p = doAMode_RR(p, 0x31, f_reg, am_addr, mode64); 3508 } else { 3509 if (am_addr->tag == Mam_IR) 3510 p = doAMode_IR(p, 0x39, f_reg, am_addr, mode64); 3511 else if (am_addr->tag == Mam_RR) 3512 p = doAMode_RR(p, 0x39, f_reg, am_addr, mode64); 3513 } 3514 } else if (sz == 8) { 3515 UInt f_reg = dregNo(i->Min.FpLdSt.reg); 3516 if (i->Min.FpLdSt.isLoad) { 3517 if (am_addr->tag == Mam_IR) { 3518 if (mode64) { 3519 p = doAMode_IR(p, 0x35, f_reg, am_addr, mode64); 3520 } else { 3521 p = doAMode_IR(p, 0x31, f_reg, am_addr, mode64); 3522 p = doAMode_IR(p, 0x31, f_reg + 1, 3523 nextMIPSAModeFloat(am_addr), mode64); 3524 } 3525 } else if (am_addr->tag == Mam_RR) { 3526 if (mode64) { 3527 p = doAMode_RR(p, 0x35, f_reg, am_addr, mode64); 3528 } else { 3529 p = doAMode_RR(p, 0x31, f_reg, am_addr, mode64); 3530 p = doAMode_RR(p, 0x31, f_reg + 1, 3531 nextMIPSAModeFloat(am_addr), mode64); 3532 } 3533 } 3534 } else { 3535 if (am_addr->tag == Mam_IR) { 3536 if (mode64) { 3537 p = doAMode_IR(p, 0x3d, f_reg, am_addr, mode64); 3538 } else { 3539 p = doAMode_IR(p, 0x39, f_reg, am_addr, mode64); 3540 p = doAMode_IR(p, 0x39, f_reg + 1, 3541 nextMIPSAModeFloat(am_addr), mode64); 3542 } 3543 } else if (am_addr->tag == Mam_RR) { 3544 if (mode64) { 3545 p = doAMode_RR(p, 0x3d, f_reg, am_addr, mode64); 3546 } else { 3547 p = doAMode_RR(p, 0x39, f_reg, am_addr, mode64); 3548 p = doAMode_RR(p, 0x39, f_reg + 1, 3549 nextMIPSAModeFloat(am_addr), mode64); 3550 } 3551 } 3552 } 3553 } 3554 goto done; 3555 } 3556 3557 case Min_FpUnary: { 3558 switch (i->Min.FpUnary.op) { 3559 case Mfp_MOVS: { // FP move 3560 UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64); 3561 UInt fr_src = fregNo(i->Min.FpUnary.src, mode64); 3562 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x6); 3563 break; 3564 } 3565 case Mfp_MOVD: { // FP move 3566 UInt fr_dst = dregNo(i->Min.FpUnary.dst); 3567 UInt fr_src = dregNo(i->Min.FpUnary.src); 3568 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x6); 3569 break; 3570 } 3571 case Mfp_ABSS: { // ABSS 3572 UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64); 3573 UInt fr_src = fregNo(i->Min.FpUnary.src, mode64); 3574 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x5); 3575 break; 3576 } 3577 case Mfp_ABSD: { // ABSD 3578 UInt fr_dst = dregNo(i->Min.FpUnary.dst); 3579 UInt fr_src = dregNo(i->Min.FpUnary.src); 3580 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x5); 3581 break; 3582 } 3583 case Mfp_NEGS: { // ABSS 3584 UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64); 3585 UInt fr_src = fregNo(i->Min.FpUnary.src, mode64); 3586 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x7); 3587 break; 3588 } 3589 case Mfp_NEGD: { // ABSD 3590 UInt fr_dst = dregNo(i->Min.FpUnary.dst); 3591 UInt fr_src = dregNo(i->Min.FpUnary.src); 3592 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x7); 3593 break; 3594 } 3595 case Mfp_CVTD: { //CVT.D 3596 UInt fr_dst = dregNo(i->Min.FpUnary.dst); 3597 UInt fr_src = fregNo(i->Min.FpUnary.src, mode64); 3598 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x21); 3599 break; 3600 } 3601 case Mfp_SQRTS: { //SQRT.S 3602 UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64); 3603 UInt fr_src = fregNo(i->Min.FpUnary.src, mode64); 3604 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x04); 3605 break; 3606 } 3607 case Mfp_SQRTD: { //SQRT.D 3608 UInt fr_dst = dregNo(i->Min.FpUnary.dst); 3609 UInt fr_src = dregNo(i->Min.FpUnary.src); 3610 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x04); 3611 break; 3612 } 3613 case Mfp_RSQRTS: { //RSQRT.S 3614 UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64); 3615 UInt fr_src = fregNo(i->Min.FpUnary.src, mode64); 3616 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x16); 3617 break; 3618 } 3619 case Mfp_RSQRTD: { //RSQRT.D 3620 UInt fr_dst = dregNo(i->Min.FpUnary.dst); 3621 UInt fr_src = dregNo(i->Min.FpUnary.src); 3622 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x16); 3623 break; 3624 } 3625 case Mfp_RECIPS: { //RECIP.S 3626 UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64); 3627 UInt fr_src = fregNo(i->Min.FpUnary.src, mode64); 3628 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x15); 3629 break; 3630 } 3631 case Mfp_RECIPD: { //RECIP.D 3632 UInt fr_dst = dregNo(i->Min.FpUnary.dst); 3633 UInt fr_src = dregNo(i->Min.FpUnary.src); 3634 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x15); 3635 break; 3636 } 3637 default: 3638 goto bad; 3639 } 3640 goto done; 3641 } 3642 3643 case Min_FpBinary: { 3644 switch (i->Min.FpBinary.op) { 3645 case Mfp_ADDS: { 3646 UInt fr_dst = fregNo(i->Min.FpBinary.dst, mode64); 3647 UInt fr_srcL = fregNo(i->Min.FpBinary.srcL, mode64); 3648 UInt fr_srcR = fregNo(i->Min.FpBinary.srcR, mode64); 3649 p = mkFormR(p, 0x11, 0x10, fr_srcR, fr_srcL, fr_dst, 0); 3650 break; 3651 } 3652 case Mfp_SUBS: { 3653 UInt fr_dst = fregNo(i->Min.FpBinary.dst, mode64); 3654 UInt fr_srcL = fregNo(i->Min.FpBinary.srcL, mode64); 3655 UInt fr_srcR = fregNo(i->Min.FpBinary.srcR, mode64); 3656 p = mkFormR(p, 0x11, 0x10, fr_srcR, fr_srcL, fr_dst, 1); 3657 break; 3658 } 3659 case Mfp_MULS: { 3660 UInt fr_dst = fregNo(i->Min.FpBinary.dst, mode64); 3661 UInt fr_srcL = fregNo(i->Min.FpBinary.srcL, mode64); 3662 UInt fr_srcR = fregNo(i->Min.FpBinary.srcR, mode64); 3663 p = mkFormR(p, 0x11, 0x10, fr_srcR, fr_srcL, fr_dst, 2); 3664 break; 3665 } 3666 case Mfp_DIVS: { 3667 UInt fr_dst = fregNo(i->Min.FpBinary.dst, mode64); 3668 UInt fr_srcL = fregNo(i->Min.FpBinary.srcL, mode64); 3669 UInt fr_srcR = fregNo(i->Min.FpBinary.srcR, mode64); 3670 p = mkFormR(p, 0x11, 0x10, fr_srcR, fr_srcL, fr_dst, 3); 3671 break; 3672 } 3673 case Mfp_ADDD: { 3674 UInt fr_dst = dregNo(i->Min.FpBinary.dst); 3675 UInt fr_srcL = dregNo(i->Min.FpBinary.srcL); 3676 UInt fr_srcR = dregNo(i->Min.FpBinary.srcR); 3677 p = mkFormR(p, 0x11, 0x11, fr_srcR, fr_srcL, fr_dst, 0); 3678 break; 3679 } 3680 case Mfp_SUBD: { 3681 UInt fr_dst = dregNo(i->Min.FpBinary.dst); 3682 UInt fr_srcL = dregNo(i->Min.FpBinary.srcL); 3683 UInt fr_srcR = dregNo(i->Min.FpBinary.srcR); 3684 p = mkFormR(p, 0x11, 0x11, fr_srcR, fr_srcL, fr_dst, 1); 3685 break; 3686 } 3687 case Mfp_MULD: { 3688 UInt fr_dst = dregNo(i->Min.FpBinary.dst); 3689 UInt fr_srcL = dregNo(i->Min.FpBinary.srcL); 3690 UInt fr_srcR = dregNo(i->Min.FpBinary.srcR); 3691 p = mkFormR(p, 0x11, 0x11, fr_srcR, fr_srcL, fr_dst, 2); 3692 break; 3693 } 3694 case Mfp_DIVD: { 3695 UInt fr_dst = dregNo(i->Min.FpBinary.dst); 3696 UInt fr_srcL = dregNo(i->Min.FpBinary.srcL); 3697 UInt fr_srcR = dregNo(i->Min.FpBinary.srcR); 3698 p = mkFormR(p, 0x11, 0x11, fr_srcR, fr_srcL, fr_dst, 3); 3699 break; 3700 } 3701 default: 3702 goto bad; 3703 } 3704 goto done; 3705 } 3706 3707 case Min_FpConvert: { 3708 switch (i->Min.FpConvert.op) { 3709 UInt fr_dst, fr_src; 3710 case Mfp_CVTSD: 3711 fr_dst = fregNo(i->Min.FpConvert.dst, mode64); 3712 fr_src = dregNo(i->Min.FpConvert.src); 3713 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x20); 3714 break; 3715 case Mfp_CVTSW: 3716 fr_dst = fregNo(i->Min.FpConvert.dst, mode64); 3717 fr_src = fregNo(i->Min.FpConvert.src, mode64); 3718 p = mkFormR(p, 0x11, 0x14, 0, fr_src, fr_dst, 0x20); 3719 break; 3720 case Mfp_CVTWD: 3721 fr_dst = fregNo(i->Min.FpConvert.dst, mode64); 3722 fr_src = dregNo(i->Min.FpConvert.src); 3723 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x24); 3724 break; 3725 case Mfp_CVTWS: 3726 fr_dst = fregNo(i->Min.FpConvert.dst, mode64); 3727 fr_src = fregNo(i->Min.FpConvert.src, mode64); 3728 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x24); 3729 break; 3730 case Mfp_CVTDW: 3731 fr_dst = dregNo(i->Min.FpConvert.dst); 3732 fr_src = fregNo(i->Min.FpConvert.src, mode64); 3733 p = mkFormR(p, 0x11, 0x14, 0, fr_src, fr_dst, 0x21); 3734 break; 3735 case Mfp_TRUWS: 3736 fr_dst = fregNo(i->Min.FpConvert.dst, mode64); 3737 fr_src = fregNo(i->Min.FpConvert.src, mode64); 3738 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x0D); 3739 break; 3740 case Mfp_TRUWD: 3741 fr_dst = fregNo(i->Min.FpConvert.dst, mode64); 3742 fr_src = dregNo(i->Min.FpConvert.src); 3743 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x0D); 3744 break; 3745 case Mfp_TRULS: 3746 fr_dst = fregNo(i->Min.FpConvert.dst, mode64); 3747 fr_src = dregNo(i->Min.FpConvert.src); 3748 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x09); 3749 break; 3750 case Mfp_TRULD: 3751 fr_dst = dregNo(i->Min.FpConvert.dst); 3752 fr_src = dregNo(i->Min.FpConvert.src); 3753 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x09); 3754 break; 3755 case Mfp_CEILWS: 3756 fr_dst = fregNo(i->Min.FpConvert.dst, mode64); 3757 fr_src = fregNo(i->Min.FpConvert.src, mode64); 3758 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x0E); 3759 break; 3760 case Mfp_CEILWD: 3761 fr_dst = fregNo(i->Min.FpConvert.dst, mode64); 3762 fr_src = dregNo(i->Min.FpConvert.src); 3763 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x0E); 3764 break; 3765 case Mfp_CEILLS: 3766 fr_dst = dregNo(i->Min.FpConvert.dst); 3767 fr_src = fregNo(i->Min.FpConvert.src, mode64); 3768 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x0A); 3769 break; 3770 case Mfp_CEILLD: 3771 fr_dst = dregNo(i->Min.FpConvert.dst); 3772 fr_src = dregNo(i->Min.FpConvert.src); 3773 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x0A); 3774 break; 3775 case Mfp_ROUNDWS: 3776 fr_dst = fregNo(i->Min.FpConvert.dst, mode64); 3777 fr_src = fregNo(i->Min.FpConvert.src, mode64); 3778 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x0C); 3779 break; 3780 case Mfp_ROUNDWD: 3781 fr_dst = fregNo(i->Min.FpConvert.dst, mode64); 3782 fr_src = dregNo(i->Min.FpConvert.src); 3783 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x0C); 3784 break; 3785 case Mfp_FLOORWS: 3786 fr_dst = fregNo(i->Min.FpConvert.dst, mode64); 3787 fr_src = fregNo(i->Min.FpConvert.src, mode64); 3788 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x0F); 3789 break; 3790 case Mfp_FLOORWD: 3791 fr_dst = fregNo(i->Min.FpConvert.dst, mode64); 3792 fr_src = dregNo(i->Min.FpConvert.src); 3793 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x0F); 3794 break; 3795 3796 default: 3797 goto bad; 3798 } 3799 goto done; 3800 } 3801 3802 case Min_FpCompare: { 3803 UInt r_dst = iregNo(i->Min.FpCompare.dst, mode64); 3804 UInt fr_srcL = dregNo(i->Min.FpCompare.srcL); 3805 UInt fr_srcR = dregNo(i->Min.FpCompare.srcR); 3806 3807 switch (i->Min.FpConvert.op) { 3808 case Mfp_CMP: 3809 p = mkFormR(p, 0x11, 0x11, fr_srcL, fr_srcR, 0, 3810 (i->Min.FpCompare.cond1 + 48)); 3811 p = mkFormR(p, 0x11, 0x2, r_dst, 31, 0, 0); 3812 break; 3813 default: 3814 goto bad; 3815 } 3816 goto done; 3817 } 3818 case Min_EvCheck: { 3819 /* This requires a 32-bit dec/test in 32 mode. */ 3820 /* We generate: 3821 lw r9, amCounter 3822 addiu r9, r9, -1 3823 sw r9, amCounter 3824 bgez r9, nofail 3825 lw r9, amFailAddr 3826 jalr r9 3827 nop 3828 nofail: 3829 */ 3830 UChar* p0 = p; 3831 /* lw r9, amCounter */ 3832 p = do_load_or_store_machine_word(p, True/*isLoad*/, /*r*/9, 3833 i->Min.EvCheck.amCounter, mode64); 3834 /* addiu r9,r9,-1 */ 3835 p = mkFormI(p, 9, 9, 9, 0xFFFF); 3836 /* sw r30, amCounter */ 3837 p = do_load_or_store_machine_word(p, False/*!isLoad*/, /*r*/9, 3838 i->Min.EvCheck.amCounter, mode64); 3839 /* bgez t9, nofail */ 3840 p = mkFormI(p, 1, 9, 1, 3); 3841 /* lw r9, amFailAddr */ 3842 p = do_load_or_store_machine_word(p, True/*isLoad*/, /*r*/9, 3843 i->Min.EvCheck.amFailAddr, mode64); 3844 /* jalr $9 */ 3845 p = mkFormR(p, 0, 9, 0, 31, 0, 9); // p += 4 3846 p = mkFormR(p, 0, 0, 0, 0, 0, 0); // p += 4 3847 /* nofail: */ 3848 3849 /* Crosscheck */ 3850 vassert(evCheckSzB_MIPS() == (UChar*)p - (UChar*)p0); 3851 goto done; 3852 } 3853 3854 case Min_ProfInc: { 3855 /* Generate a code template to increment a memory location whose 3856 address will be known later as an immediate value. This code 3857 template will be patched once the memory location is known. 3858 For now we do this with address == 0x65556555. 3859 32-bit: 3860 3861 move r9, 0x65556555 3862 lw r8, 0(r9) 3863 addiu r8, r8, 1 # add least significant word 3864 sw r8, 0(r9) 3865 sltiu r1, r8, 1 # set carry-in bit 3866 lw r8, 4(r9) 3867 addu r8, r8, r1 3868 sw r8, 4(r9) */ 3869 3870 if (mode64) { 3871 vassert(0); 3872 } else { 3873 // move r9, 0x65556555 3874 p = mkLoadImm_EXACTLY2or5(p, /*r*/9, 0x65556555ULL, 3875 False/*!mode64*/); 3876 // lw r8, 0(r9) 3877 p = mkFormI(p, 35, 9, 8, 0); 3878 3879 // addiu r8, r8, 1 # add least significant word 3880 p = mkFormI(p, 9, 8, 8, 1); 3881 3882 // sw r8, 0(r9) 3883 p = mkFormI(p, 43, 9, 8, 0); 3884 3885 // sltiu r1, r8, 1 # set carry-in bit 3886 p = mkFormI(p, 11, 8, 1, 1); 3887 3888 // lw r8, 4(r9) 3889 p = mkFormI(p, 35, 9, 8, 4); 3890 3891 // addu r8, r8, r1 3892 p = mkFormR(p, 0, 8, 1, 8, 0, 33); 3893 3894 // sw r8, 4(r9) 3895 p = mkFormI(p, 43, 9, 8, 4); 3896 3897 } 3898 /* Tell the caller .. */ 3899 vassert(!(*is_profInc)); 3900 *is_profInc = True; 3901 goto done; 3902 } 3903 3904 default: 3905 goto bad; 3906 3907 } 3908 3909 bad: 3910 vex_printf("\n=> "); 3911 ppMIPSInstr(i, mode64); 3912 vpanic("emit_MIPSInstr"); 3913 /*NOTREACHED*/ done: 3914 //vassert(p - &buf[0] <= 32); 3915 return p - &buf[0]; 3916} 3917 3918/* How big is an event check? See case for Min_EvCheck in 3919 emit_MIPSInstr just above. That crosschecks what this returns, so 3920 we can tell if we're inconsistent. */ 3921Int evCheckSzB_MIPS ( void ) 3922{ 3923 UInt kInstrSize = 4; 3924 return 7*kInstrSize; 3925} 3926 3927/* NB: what goes on here has to be very closely coordinated with the 3928 emitInstr case for XDirect, above. */ 3929VexInvalRange chainXDirect_MIPS ( void* place_to_chain, 3930 void* disp_cp_chain_me_EXPECTED, 3931 void* place_to_jump_to, 3932 Bool mode64 ) 3933{ 3934 /* What we're expecting to see is: 3935 move r9, disp_cp_chain_me_to_EXPECTED 3936 jalr r9 3937 nop 3938 viz 3939 <8 or 20 bytes generated by mkLoadImm_EXACTLY2or5> 3940 0x120F809 // jalr r9 3941 0x00000000 // nop 3942 */ 3943 UChar* p = (UChar*)place_to_chain; 3944 vassert(0 == (3 & (HWord)p)); 3945 vassert(isLoadImm_EXACTLY2or5(p, /*r*/9, 3946 (UInt)Ptr_to_ULong(disp_cp_chain_me_EXPECTED), 3947 mode64)); 3948 vassert(fetch32(p + (mode64 ? 20 : 8) + 0) == 0x120F809); 3949 vassert(fetch32(p + (mode64 ? 20 : 8) + 4) == 0x00000000); 3950 /* And what we want to change it to is either: 3951 move r9, place_to_jump_to 3952 jalr r9 3953 nop 3954 viz 3955 <8 bytes generated by mkLoadImm_EXACTLY2or5> 3956 0x120F809 // jalr r9 3957 0x00000000 // nop 3958 3959 The replacement has the same length as the original. 3960 */ 3961 3962 p = mkLoadImm_EXACTLY2or5(p, /*r*/9, 3963 Ptr_to_ULong(place_to_jump_to), mode64); 3964 p = emit32(p, 0x120F809); 3965 p = emit32(p, 0x00000000); 3966 3967 Int len = p - (UChar*)place_to_chain; 3968 vassert(len == (mode64 ? 28 : 16)); /* stay sane */ 3969 VexInvalRange vir = {(HWord)place_to_chain, len}; 3970 return vir; 3971} 3972 3973/* NB: what goes on here has to be very closely coordinated with the 3974 emitInstr case for XDirect, above. */ 3975VexInvalRange unchainXDirect_MIPS ( void* place_to_unchain, 3976 void* place_to_jump_to_EXPECTED, 3977 void* disp_cp_chain_me, 3978 Bool mode64 ) 3979{ 3980 /* What we're expecting to see is: 3981 move r9, place_to_jump_to_EXPECTED 3982 jalr r9 3983 nop 3984 viz 3985 <8 or 20 bytes generated by mkLoadImm_EXACTLY2or5> 3986 0x120F809 // jalr r9 3987 0x00000000 // nop 3988 */ 3989 UChar* p = (UChar*)place_to_unchain; 3990 vassert(0 == (3 & (HWord)p)); 3991 vassert(isLoadImm_EXACTLY2or5(p, /*r*/9, 3992 Ptr_to_ULong(place_to_jump_to_EXPECTED), 3993 mode64)); 3994 vassert(fetch32(p + (mode64 ? 20 : 8) + 0) == 0x120F809); 3995 vassert(fetch32(p + (mode64 ? 20 : 8) + 4) == 0x00000000); 3996 /* And what we want to change it to is: 3997 move r9, disp_cp_chain_me 3998 jalr r9 3999 nop 4000 viz 4001 <8 or 20 bytes generated by mkLoadImm_EXACTLY2or5> 4002 0x120F809 // jalr r9 4003 0x00000000 // nop 4004 The replacement has the same length as the original. 4005 */ 4006 p = mkLoadImm_EXACTLY2or5(p, /*r*/9, 4007 Ptr_to_ULong(disp_cp_chain_me), mode64); 4008 p = emit32(p, 0x120F809); 4009 p = emit32(p, 0x00000000); 4010 4011 Int len = p - (UChar*)place_to_unchain; 4012 vassert(len == (mode64 ? 28 : 16)); /* stay sane */ 4013 VexInvalRange vir = {(HWord)place_to_unchain, len}; 4014 return vir; 4015} 4016 4017/* Patch the counter address into a profile inc point, as previously 4018 created by the Min_ProfInc case for emit_MIPSInstr. */ 4019VexInvalRange patchProfInc_MIPS ( void* place_to_patch, 4020 ULong* location_of_counter, Bool mode64 ) 4021{ 4022 vassert(sizeof(ULong*) == 4); 4023 UChar* p = (UChar*)place_to_patch; 4024 vassert(0 == (3 & (HWord)p)); 4025 vassert(isLoadImm_EXACTLY2or5((UChar *)p, /*r*/9, 0x65556555, mode64)); 4026 4027 vassert(fetch32(p + (mode64 ? 20 : 8) + 0) == 0x8D280000); 4028 vassert(fetch32(p + (mode64 ? 20 : 8) + 4) == 0x25080001); 4029 vassert(fetch32(p + (mode64 ? 20 : 8) + 8) == 0xAD280000); 4030 vassert(fetch32(p + (mode64 ? 20 : 8) + 12) == 0x2d010001); 4031 vassert(fetch32(p + (mode64 ? 20 : 8) + 16) == 0x8d280004); 4032 vassert(fetch32(p + (mode64 ? 20 : 8) + 20) == 0x01014021); 4033 vassert(fetch32(p + (mode64 ? 20 : 8) + 24) == 0xad280004); 4034 4035 p = mkLoadImm_EXACTLY2or5(p, /*r*/9, 4036 Ptr_to_ULong(location_of_counter), mode64); 4037 4038 VexInvalRange vir = {(HWord)p, 8}; 4039 return vir; 4040} 4041 4042 4043/*---------------------------------------------------------------*/ 4044/*--- end host_mips_defs.c ---*/ 4045/*---------------------------------------------------------------*/ 4046