Lines Matching defs:ld

107  * we use the ld->m (bit [36:36]) field to determine whether or not we have
149 * we use the ld->m (bit [36:36]) field to determine whether or not we have
326 DPRINT("r%lu, sw.bspstore=%lx pt.bspstore=%lx sof=%ld sol=%ld ridx=%ld\n",
363 DPRINT("rnat @%p = 0x%lx nat=%d old nat=%ld\n",
399 DPRINT("r%lu, sw.bspstore=%lx pt.bspstore=%lx sof=%ld sol=%ld ridx=%ld\n",
670 emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, unsigned long ifa)
678 if (ld.x6_op == 1 || ld.x6_op == 3) {
699 imm = ld.x << 7 | ld.imm;
704 if (ld.m) imm |= SIGN_EXT9;
712 setreg(ld.r3, ifa, 0, regs);
714 DPRINT("ld.x=%d ld.m=%d imm=%ld r3=0x%lx\n", ld.x, ld.m, imm, ifa);
716 } else if (ld.m) {
736 getreg(ld.imm, &r2, &nat_r2, regs);
743 setreg(ld.r3, ifa, nat_r2, regs);
745 DPRINT("imm=%d r2=%ld r3=0x%lx nat_r2=%d\n",ld.imm, r2, ifa, nat_r2);
751 emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
753 unsigned int len = 1 << ld.x6_sz;
770 DPRINT("unknown size: x6=%d\n", ld.x6_sz);
776 setreg(ld.r1, val, 0, regs);
781 if (ld.op == 0x5 || ld.m)
782 emulate_load_updates(ld.op == 0x5 ? UPD_IMMEDIATE: UPD_REG, ld, regs, ifa);
808 * in the base address of the load & size. To do that, a ld.a must be executed,
838 * We will always convert ld.a into a normal load with ALAT invalidated. This
839 * will enable compiler to do optimization where certain code path after ld.a
840 * is not required to have ld.c/chk.a, e.g., code path with no intervening stores.
842 * If there is a store after the advanced load, one must either do a ld.c.* or
846 * - ld.c.*, if the entry is not present a normal load is executed
851 * ALAT must be invalidated for the register (so that chk.a or ld.c don't pick
859 if (ld.x6_op == 0x5 || ld.x6_op == 0xa)
865 if (ld.x6_op == 0x2)
866 invala_gr(ld.r1);
872 emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
875 unsigned int len = 1 << ld.x6_sz;
883 getreg(ld.imm, &r2, NULL, regs);
896 DPRINT("unknown size: x6=%d\n", ld.x6_sz);
908 * ld.r3 can never be r0, because r0 would not generate an
911 if (ld.op == 0x5) {
917 imm = ld.x << 7 | ld.r1;
921 if (ld.m) imm |= SIGN_EXT9;
929 setreg(ld.r3, ifa, 0, regs);
940 if (ld.x6_op == 0xd)
1021 emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
1025 unsigned long len = float_fsz[ld.x6_sz];
1045 if (ld.x6_op != 0x2) {
1054 DPRINT("ld.r1=%d ld.imm=%d x6_sz=%d\n", ld.r1, ld.imm, ld.x6_sz);
1060 switch( ld.x6_sz ) {
1086 setfpreg(ld.r1, &fpr_final[0], regs);
1087 setfpreg(ld.imm, &fpr_final[1], regs);
1094 if (ld.m) {
1107 if (ld.x6_op == 1 || ld.x6_op == 3)
1111 setreg(ld.r3, ifa, 0, regs);
1117 if (ld.x6_op == 0x2) {
1118 invala_fr(ld.r1);
1119 invala_fr(ld.imm);
1126 emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
1130 unsigned long len = float_fsz[ld.x6_sz];
1151 if (ld.x6_op != 0x2) {
1155 DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz);
1160 switch( ld.x6_sz ) {
1182 setfpreg(ld.r1, &fpr_final, regs);
1188 if (ld.op == 0x7 || ld.m)
1189 emulate_load_updates(ld.op == 0x7 ? UPD_IMMEDIATE: UPD_REG, ld, regs, ifa);
1194 if (ld.x6_op == 0x2)
1195 invala_fr(ld.r1);
1202 emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
1206 unsigned long len = float_fsz[ld.x6_sz];
1220 getfpreg(ld.imm, &fpr_init, regs);
1226 switch( ld.x6_sz ) {
1240 DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz);
1251 * ld.r3 can never be r0, because r0 would not generate an
1254 if (ld.op == 0x7) {
1260 imm = ld.x << 7 | ld.r1;
1264 if (ld.m)
1273 setreg(ld.r3, ifa, 0, regs);
1384 DPRINT("opcode=%lx ld.qp=%d ld.r1=%d ld.imm=%d ld.r3=%d ld.x=%d ld.hint=%d "
1385 "ld.x6=0x%x ld.m=%d ld.op=%d\n", opcode, u.insn.qp, u.insn.r1, u.insn.imm,