1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "codegen_mips.h"
18#include "dex/quick/mir_to_lir-inl.h"
19#include "dex/reg_storage_eq.h"
20#include "mips_lir.h"
21
22namespace art {
23
24/* This file contains codegen for the MIPS32 ISA. */
25LIR* MipsMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
26  int opcode;
27  /* must be both DOUBLE or both not DOUBLE */
28  DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble());
29  if (r_dest.IsDouble()) {
30    opcode = kMipsFmovd;
31  } else {
32    if (r_dest.IsSingle()) {
33      if (r_src.IsSingle()) {
34        opcode = kMipsFmovs;
35      } else {
36        /* note the operands are swapped for the mtc1 instr */
37        RegStorage t_opnd = r_src;
38        r_src = r_dest;
39        r_dest = t_opnd;
40        opcode = kMipsMtc1;
41      }
42    } else {
43      DCHECK(r_src.IsSingle());
44      opcode = kMipsMfc1;
45    }
46  }
47  LIR* res = RawLIR(current_dalvik_offset_, opcode, r_src.GetReg(), r_dest.GetReg());
48  if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
49    res->flags.is_nop = true;
50  }
51  return res;
52}
53
54bool MipsMir2Lir::InexpensiveConstantInt(int32_t value) {
55  return ((value == 0) || IsUint(16, value) || ((value < 0) && (value >= -32768)));
56}
57
58bool MipsMir2Lir::InexpensiveConstantFloat(int32_t value) {
59  return false;  // TUNING
60}
61
62bool MipsMir2Lir::InexpensiveConstantLong(int64_t value) {
63  return false;  // TUNING
64}
65
66bool MipsMir2Lir::InexpensiveConstantDouble(int64_t value) {
67  return false;  // TUNING
68}
69
70/*
71 * Load a immediate using a shortcut if possible; otherwise
72 * grab from the per-translation literal pool.  If target is
73 * a high register, build constant into a low register and copy.
74 *
75 * No additional register clobbering operation performed. Use this version when
76 * 1) r_dest is freshly returned from AllocTemp or
77 * 2) The codegen is under fixed register usage
78 */
79LIR* MipsMir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
80  LIR *res;
81
82  RegStorage r_dest_save = r_dest;
83  int is_fp_reg = r_dest.IsFloat();
84  if (is_fp_reg) {
85    DCHECK(r_dest.IsSingle());
86    r_dest = AllocTemp();
87  }
88
89  /* See if the value can be constructed cheaply */
90  if (value == 0) {
91    res = NewLIR2(kMipsMove, r_dest.GetReg(), rZERO);
92  } else if ((value > 0) && (value <= 65535)) {
93    res = NewLIR3(kMipsOri, r_dest.GetReg(), rZERO, value);
94  } else if ((value < 0) && (value >= -32768)) {
95    res = NewLIR3(kMipsAddiu, r_dest.GetReg(), rZERO, value);
96  } else {
97    res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16);
98    if (value & 0xffff)
99      NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), value);
100  }
101
102  if (is_fp_reg) {
103    NewLIR2(kMipsMtc1, r_dest.GetReg(), r_dest_save.GetReg());
104    FreeTemp(r_dest);
105  }
106
107  return res;
108}
109
110LIR* MipsMir2Lir::OpUnconditionalBranch(LIR* target) {
111  LIR* res = NewLIR1(kMipsB, 0 /* offset to be patched during assembly*/);
112  res->target = target;
113  return res;
114}
115
116LIR* MipsMir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
117  MipsOpCode opcode = kMipsNop;
118  switch (op) {
119    case kOpBlx:
120      opcode = kMipsJalr;
121      break;
122    case kOpBx:
123      return NewLIR1(kMipsJr, r_dest_src.GetReg());
124      break;
125    default:
126      LOG(FATAL) << "Bad case in OpReg";
127  }
128  return NewLIR2(opcode, rRA, r_dest_src.GetReg());
129}
130
131LIR* MipsMir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
132  LIR *res;
133  bool neg = (value < 0);
134  int abs_value = (neg) ? -value : value;
135  bool short_form = (abs_value & 0xff) == abs_value;
136  MipsOpCode opcode = kMipsNop;
137  switch (op) {
138    case kOpAdd:
139      return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
140      break;
141    case kOpSub:
142      return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
143      break;
144    default:
145      LOG(FATAL) << "Bad case in OpRegImm";
146      break;
147  }
148  if (short_form) {
149    res = NewLIR2(opcode, r_dest_src1.GetReg(), abs_value);
150  } else {
151    RegStorage r_scratch = AllocTemp();
152    res = LoadConstant(r_scratch, value);
153    if (op == kOpCmp)
154      NewLIR2(opcode, r_dest_src1.GetReg(), r_scratch.GetReg());
155    else
156      NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_scratch.GetReg());
157  }
158  return res;
159}
160
161LIR* MipsMir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) {
162  MipsOpCode opcode = kMipsNop;
163  switch (op) {
164    case kOpAdd:
165      opcode = kMipsAddu;
166      break;
167    case kOpSub:
168      opcode = kMipsSubu;
169      break;
170    case kOpAnd:
171      opcode = kMipsAnd;
172      break;
173    case kOpMul:
174      opcode = kMipsMul;
175      break;
176    case kOpOr:
177      opcode = kMipsOr;
178      break;
179    case kOpXor:
180      opcode = kMipsXor;
181      break;
182    case kOpLsl:
183      opcode = kMipsSllv;
184      break;
185    case kOpLsr:
186      opcode = kMipsSrlv;
187      break;
188    case kOpAsr:
189      opcode = kMipsSrav;
190      break;
191    case kOpAdc:
192    case kOpSbc:
193      LOG(FATAL) << "No carry bit on MIPS";
194      break;
195    default:
196      LOG(FATAL) << "bad case in OpRegRegReg";
197      break;
198  }
199  return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
200}
201
202LIR* MipsMir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
203  LIR *res;
204  MipsOpCode opcode = kMipsNop;
205  bool short_form = true;
206
207  switch (op) {
208    case kOpAdd:
209      if (IS_SIMM16(value)) {
210        opcode = kMipsAddiu;
211      } else {
212        short_form = false;
213        opcode = kMipsAddu;
214      }
215      break;
216    case kOpSub:
217      if (IS_SIMM16((-value))) {
218        value = -value;
219        opcode = kMipsAddiu;
220      } else {
221        short_form = false;
222        opcode = kMipsSubu;
223      }
224      break;
225    case kOpLsl:
226        DCHECK(value >= 0 && value <= 31);
227        opcode = kMipsSll;
228        break;
229    case kOpLsr:
230        DCHECK(value >= 0 && value <= 31);
231        opcode = kMipsSrl;
232        break;
233    case kOpAsr:
234        DCHECK(value >= 0 && value <= 31);
235        opcode = kMipsSra;
236        break;
237    case kOpAnd:
238      if (IS_UIMM16((value))) {
239        opcode = kMipsAndi;
240      } else {
241        short_form = false;
242        opcode = kMipsAnd;
243      }
244      break;
245    case kOpOr:
246      if (IS_UIMM16((value))) {
247        opcode = kMipsOri;
248      } else {
249        short_form = false;
250        opcode = kMipsOr;
251      }
252      break;
253    case kOpXor:
254      if (IS_UIMM16((value))) {
255        opcode = kMipsXori;
256      } else {
257        short_form = false;
258        opcode = kMipsXor;
259      }
260      break;
261    case kOpMul:
262      short_form = false;
263      opcode = kMipsMul;
264      break;
265    default:
266      LOG(FATAL) << "Bad case in OpRegRegImm";
267      break;
268  }
269
270  if (short_form) {
271    res = NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), value);
272  } else {
273    if (r_dest != r_src1) {
274      res = LoadConstant(r_dest, value);
275      NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_dest.GetReg());
276    } else {
277      RegStorage r_scratch = AllocTemp();
278      res = LoadConstant(r_scratch, value);
279      NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
280    }
281  }
282  return res;
283}
284
285LIR* MipsMir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
286  MipsOpCode opcode = kMipsNop;
287  LIR *res;
288  switch (op) {
289    case kOpMov:
290      opcode = kMipsMove;
291      break;
292    case kOpMvn:
293      return NewLIR3(kMipsNor, r_dest_src1.GetReg(), r_src2.GetReg(), rZERO);
294    case kOpNeg:
295      return NewLIR3(kMipsSubu, r_dest_src1.GetReg(), rZERO, r_src2.GetReg());
296    case kOpAdd:
297    case kOpAnd:
298    case kOpMul:
299    case kOpOr:
300    case kOpSub:
301    case kOpXor:
302      return OpRegRegReg(op, r_dest_src1, r_dest_src1, r_src2);
303    case kOp2Byte:
304#if __mips_isa_rev >= 2
305      res = NewLIR2(kMipsSeb, r_dest_src1.GetReg(), r_src2.GetReg());
306#else
307      res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 24);
308      OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 24);
309#endif
310      return res;
311    case kOp2Short:
312#if __mips_isa_rev >= 2
313      res = NewLIR2(kMipsSeh, r_dest_src1.GetReg(), r_src2.GetReg());
314#else
315      res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 16);
316      OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 16);
317#endif
318      return res;
319    case kOp2Char:
320       return NewLIR3(kMipsAndi, r_dest_src1.GetReg(), r_src2.GetReg(), 0xFFFF);
321    default:
322      LOG(FATAL) << "Bad case in OpRegReg";
323      break;
324  }
325  return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
326}
327
328LIR* MipsMir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
329                              MoveType move_type) {
330  UNIMPLEMENTED(FATAL);
331  return nullptr;
332}
333
334LIR* MipsMir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
335  UNIMPLEMENTED(FATAL);
336  return nullptr;
337}
338
339LIR* MipsMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
340  LOG(FATAL) << "Unexpected use of OpCondRegReg for MIPS";
341  return NULL;
342}
343
344LIR* MipsMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
345  LIR *res;
346  if (!r_dest.IsPair()) {
347    // Form 64-bit pair
348    r_dest = Solo64ToPair64(r_dest);
349  }
350  res = LoadConstantNoClobber(r_dest.GetLow(), Low32Bits(value));
351  LoadConstantNoClobber(r_dest.GetHigh(), High32Bits(value));
352  return res;
353}
354
355/* Load value from base + scaled index. */
356LIR* MipsMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
357                                  int scale, OpSize size) {
358  LIR *first = NULL;
359  LIR *res;
360  MipsOpCode opcode = kMipsNop;
361  RegStorage t_reg = AllocTemp();
362
363  if (r_dest.IsFloat()) {
364    DCHECK(r_dest.IsSingle());
365    DCHECK((size == k32) || (size == kSingle) || (size == kReference));
366    size = kSingle;
367  } else {
368    if (size == kSingle)
369      size = k32;
370  }
371
372  if (!scale) {
373    first = NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
374  } else {
375    first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
376    NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
377  }
378
379  switch (size) {
380    case kSingle:
381      opcode = kMipsFlwc1;
382      break;
383    case k32:
384    case kReference:
385      opcode = kMipsLw;
386      break;
387    case kUnsignedHalf:
388      opcode = kMipsLhu;
389      break;
390    case kSignedHalf:
391      opcode = kMipsLh;
392      break;
393    case kUnsignedByte:
394      opcode = kMipsLbu;
395      break;
396    case kSignedByte:
397      opcode = kMipsLb;
398      break;
399    default:
400      LOG(FATAL) << "Bad case in LoadBaseIndexed";
401  }
402
403  res = NewLIR3(opcode, r_dest.GetReg(), 0, t_reg.GetReg());
404  FreeTemp(t_reg);
405  return (first) ? first : res;
406}
407
408/* store value base base + scaled index. */
409LIR* MipsMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
410                                   int scale, OpSize size) {
411  LIR *first = NULL;
412  MipsOpCode opcode = kMipsNop;
413  RegStorage t_reg = AllocTemp();
414
415  if (r_src.IsFloat()) {
416    DCHECK(r_src.IsSingle());
417    DCHECK((size == k32) || (size == kSingle) || (size == kReference));
418    size = kSingle;
419  } else {
420    if (size == kSingle)
421      size = k32;
422  }
423
424  if (!scale) {
425    first = NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
426  } else {
427    first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
428    NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
429  }
430
431  switch (size) {
432    case kSingle:
433      opcode = kMipsFswc1;
434      break;
435    case k32:
436    case kReference:
437      opcode = kMipsSw;
438      break;
439    case kUnsignedHalf:
440    case kSignedHalf:
441      opcode = kMipsSh;
442      break;
443    case kUnsignedByte:
444    case kSignedByte:
445      opcode = kMipsSb;
446      break;
447    default:
448      LOG(FATAL) << "Bad case in StoreBaseIndexed";
449  }
450  NewLIR3(opcode, r_src.GetReg(), 0, t_reg.GetReg());
451  return first;
452}
453
454// FIXME: don't split r_dest into 2 containers.
455LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
456                                   OpSize size) {
457/*
458 * Load value from base + displacement.  Optionally perform null check
459 * on base (which must have an associated s_reg and MIR).  If not
460 * performing null check, incoming MIR can be null. IMPORTANT: this
461 * code must not allocate any new temps.  If a new register is needed
462 * and base and dest are the same, spill some other register to
463 * rlp and then restore.
464 */
465  LIR *res;
466  LIR *load = NULL;
467  LIR *load2 = NULL;
468  MipsOpCode opcode = kMipsNop;
469  bool short_form = IS_SIMM16(displacement);
470  bool pair = r_dest.IsPair();
471
472  switch (size) {
473    case k64:
474    case kDouble:
475      if (!pair) {
476        // Form 64-bit pair
477        r_dest = Solo64ToPair64(r_dest);
478        pair = 1;
479      }
480      if (r_dest.IsFloat()) {
481        DCHECK_EQ(r_dest.GetLowReg(), r_dest.GetHighReg() - 1);
482        opcode = kMipsFlwc1;
483      } else {
484        opcode = kMipsLw;
485      }
486      short_form = IS_SIMM16_2WORD(displacement);
487      DCHECK_EQ((displacement & 0x3), 0);
488      break;
489    case k32:
490    case kSingle:
491    case kReference:
492      opcode = kMipsLw;
493      if (r_dest.IsFloat()) {
494        opcode = kMipsFlwc1;
495        DCHECK(r_dest.IsSingle());
496      }
497      DCHECK_EQ((displacement & 0x3), 0);
498      break;
499    case kUnsignedHalf:
500      opcode = kMipsLhu;
501      DCHECK_EQ((displacement & 0x1), 0);
502      break;
503    case kSignedHalf:
504      opcode = kMipsLh;
505      DCHECK_EQ((displacement & 0x1), 0);
506      break;
507    case kUnsignedByte:
508      opcode = kMipsLbu;
509      break;
510    case kSignedByte:
511      opcode = kMipsLb;
512      break;
513    default:
514      LOG(FATAL) << "Bad case in LoadBaseIndexedBody";
515  }
516
517  if (short_form) {
518    if (!pair) {
519      load = res = NewLIR3(opcode, r_dest.GetReg(), displacement, r_base.GetReg());
520    } else {
521      load = res = NewLIR3(opcode, r_dest.GetLowReg(), displacement + LOWORD_OFFSET, r_base.GetReg());
522      load2 = NewLIR3(opcode, r_dest.GetHighReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
523    }
524  } else {
525    if (pair) {
526      RegStorage r_tmp = AllocTemp();
527      res = OpRegRegImm(kOpAdd, r_tmp, r_base, displacement);
528      load = NewLIR3(opcode, r_dest.GetLowReg(), LOWORD_OFFSET, r_tmp.GetReg());
529      load2 = NewLIR3(opcode, r_dest.GetHighReg(), HIWORD_OFFSET, r_tmp.GetReg());
530      FreeTemp(r_tmp);
531    } else {
532      RegStorage r_tmp = (r_base == r_dest) ? AllocTemp() : r_dest;
533      res = OpRegRegImm(kOpAdd, r_tmp, r_base, displacement);
534      load = NewLIR3(opcode, r_dest.GetReg(), 0, r_tmp.GetReg());
535      if (r_tmp != r_dest)
536        FreeTemp(r_tmp);
537    }
538  }
539
540  if (mem_ref_type_ == ResourceMask::kDalvikReg) {
541    DCHECK(r_base == rs_rMIPS_SP);
542    AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
543                            true /* is_load */, pair /* is64bit */);
544    if (pair) {
545      AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
546                              true /* is_load */, pair /* is64bit */);
547    }
548  }
549  return load;
550}
551
552LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
553                               OpSize size, VolatileKind is_volatile) {
554  if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble))) {
555    // Do atomic 64-bit load.
556    return GenAtomic64Load(r_base, displacement, r_dest);
557  }
558
559  // TODO: base this on target.
560  if (size == kWord) {
561    size = k32;
562  }
563  LIR* load;
564  load = LoadBaseDispBody(r_base, displacement, r_dest, size);
565
566  if (UNLIKELY(is_volatile == kVolatile)) {
567    GenMemBarrier(kLoadAny);
568  }
569
570  return load;
571}
572
573// FIXME: don't split r_dest into 2 containers.
574LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement,
575                                    RegStorage r_src, OpSize size) {
576  LIR *res;
577  LIR *store = NULL;
578  LIR *store2 = NULL;
579  MipsOpCode opcode = kMipsNop;
580  bool short_form = IS_SIMM16(displacement);
581  bool pair = r_src.IsPair();
582
583  switch (size) {
584    case k64:
585    case kDouble:
586      if (!pair) {
587        // Form 64-bit pair
588        r_src = Solo64ToPair64(r_src);
589        pair = 1;
590      }
591      if (r_src.IsFloat()) {
592        DCHECK_EQ(r_src.GetLowReg(), r_src.GetHighReg() - 1);
593        opcode = kMipsFswc1;
594      } else {
595        opcode = kMipsSw;
596      }
597      short_form = IS_SIMM16_2WORD(displacement);
598      DCHECK_EQ((displacement & 0x3), 0);
599      break;
600    case k32:
601    case kSingle:
602    case kReference:
603      opcode = kMipsSw;
604      if (r_src.IsFloat()) {
605        opcode = kMipsFswc1;
606        DCHECK(r_src.IsSingle());
607      }
608      DCHECK_EQ((displacement & 0x3), 0);
609      break;
610    case kUnsignedHalf:
611    case kSignedHalf:
612      opcode = kMipsSh;
613      DCHECK_EQ((displacement & 0x1), 0);
614      break;
615    case kUnsignedByte:
616    case kSignedByte:
617      opcode = kMipsSb;
618      break;
619    default:
620      LOG(FATAL) << "Bad case in StoreBaseDispBody";
621  }
622
623  if (short_form) {
624    if (!pair) {
625      store = res = NewLIR3(opcode, r_src.GetReg(), displacement, r_base.GetReg());
626    } else {
627      store = res = NewLIR3(opcode, r_src.GetLowReg(), displacement + LOWORD_OFFSET, r_base.GetReg());
628      store2 = NewLIR3(opcode, r_src.GetHighReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
629    }
630  } else {
631    RegStorage r_scratch = AllocTemp();
632    res = OpRegRegImm(kOpAdd, r_scratch, r_base, displacement);
633    if (!pair) {
634      store =  NewLIR3(opcode, r_src.GetReg(), 0, r_scratch.GetReg());
635    } else {
636      store =  NewLIR3(opcode, r_src.GetLowReg(), LOWORD_OFFSET, r_scratch.GetReg());
637      store2 = NewLIR3(opcode, r_src.GetHighReg(), HIWORD_OFFSET, r_scratch.GetReg());
638    }
639    FreeTemp(r_scratch);
640  }
641
642  if (mem_ref_type_ == ResourceMask::kDalvikReg) {
643    DCHECK(r_base == rs_rMIPS_SP);
644    AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
645                            false /* is_load */, pair /* is64bit */);
646    if (pair) {
647      AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
648                              false /* is_load */, pair /* is64bit */);
649    }
650  }
651
652  return res;
653}
654
655LIR* MipsMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
656                                OpSize size, VolatileKind is_volatile) {
657  if (is_volatile == kVolatile) {
658    // Ensure that prior accesses become visible to other threads first.
659    GenMemBarrier(kAnyStore);
660  }
661
662  LIR* store;
663  if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble))) {
664    // Do atomic 64-bit load.
665    store = GenAtomic64Store(r_base, displacement, r_src);
666  } else {
667    // TODO: base this on target.
668    if (size == kWord) {
669      size = k32;
670    }
671    store = StoreBaseDispBody(r_base, displacement, r_src, size);
672  }
673
674  if (UNLIKELY(is_volatile == kVolatile)) {
675    // Preserve order with respect to any subsequent volatile loads.
676    // We need StoreLoad, but that generally requires the most expensive barrier.
677    GenMemBarrier(kAnyAny);
678  }
679
680  return store;
681}
682
683LIR* MipsMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
684  LOG(FATAL) << "Unexpected use of OpMem for MIPS";
685  return NULL;
686}
687
688LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
689  LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS";
690  return NULL;
691}
692
693LIR* MipsMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
694  return OpReg(op, r_tgt);
695}
696
697}  // namespace art
698