ARMAddressingModes.h revision 7ce057983ea7b8ad42d5cca1bb5d3f6941662269
1//===- ARMAddressingModes.h - ARM Addressing Modes --------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the ARM addressing mode implementation stuff.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_TARGET_ARM_ARMADDRESSINGMODES_H
15#define LLVM_TARGET_ARM_ARMADDRESSINGMODES_H
16
17#include "llvm/Support/MathExtras.h"
18#include <cassert>
19
20namespace llvm {
21
22/// ARM_AM - ARM Addressing Mode Stuff
23namespace ARM_AM {
24  enum ShiftOpc {
25    no_shift = 0,
26    asr,
27    lsl,
28    lsr,
29    ror,
30    rrx
31  };
32
33  enum AddrOpc {
34    sub = 0,
35    add
36  };
37
38  static inline const char *getAddrOpcStr(AddrOpc Op) {
39    return Op == sub ? "-" : "";
40  }
41
42  static inline const char *getShiftOpcStr(ShiftOpc Op) {
43    switch (Op) {
44    default: assert(0 && "Unknown shift opc!");
45    case ARM_AM::asr: return "asr";
46    case ARM_AM::lsl: return "lsl";
47    case ARM_AM::lsr: return "lsr";
48    case ARM_AM::ror: return "ror";
49    case ARM_AM::rrx: return "rrx";
50    }
51  }
52
53  static inline unsigned getShiftOpcEncoding(ShiftOpc Op) {
54    switch (Op) {
55    default: assert(0 && "Unknown shift opc!");
56    case ARM_AM::asr: return 2;
57    case ARM_AM::lsl: return 0;
58    case ARM_AM::lsr: return 1;
59    case ARM_AM::ror: return 3;
60    }
61  }
62
63  enum AMSubMode {
64    bad_am_submode = 0,
65    ia,
66    ib,
67    da,
68    db
69  };
70
71  static inline const char *getAMSubModeStr(AMSubMode Mode) {
72    switch (Mode) {
73    default: assert(0 && "Unknown addressing sub-mode!");
74    case ARM_AM::ia: return "ia";
75    case ARM_AM::ib: return "ib";
76    case ARM_AM::da: return "da";
77    case ARM_AM::db: return "db";
78    }
79  }
80
81  /// rotr32 - Rotate a 32-bit unsigned value right by a specified # bits.
82  ///
83  static inline unsigned rotr32(unsigned Val, unsigned Amt) {
84    assert(Amt < 32 && "Invalid rotate amount");
85    return (Val >> Amt) | (Val << ((32-Amt)&31));
86  }
87
88  /// rotl32 - Rotate a 32-bit unsigned value left by a specified # bits.
89  ///
90  static inline unsigned rotl32(unsigned Val, unsigned Amt) {
91    assert(Amt < 32 && "Invalid rotate amount");
92    return (Val << Amt) | (Val >> ((32-Amt)&31));
93  }
94
95  //===--------------------------------------------------------------------===//
96  // Addressing Mode #1: shift_operand with registers
97  //===--------------------------------------------------------------------===//
98  //
99  // This 'addressing mode' is used for arithmetic instructions.  It can
100  // represent things like:
101  //   reg
102  //   reg [asr|lsl|lsr|ror|rrx] reg
103  //   reg [asr|lsl|lsr|ror|rrx] imm
104  //
105  // This is stored three operands [rega, regb, opc].  The first is the base
106  // reg, the second is the shift amount (or reg0 if not present or imm).  The
107  // third operand encodes the shift opcode and the imm if a reg isn't present.
108  //
109  static inline unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm) {
110    return ShOp | (Imm << 3);
111  }
112  static inline unsigned getSORegOffset(unsigned Op) {
113    return Op >> 3;
114  }
115  static inline ShiftOpc getSORegShOp(unsigned Op) {
116    return (ShiftOpc)(Op & 7);
117  }
118
119  /// getSOImmValImm - Given an encoded imm field for the reg/imm form, return
120  /// the 8-bit imm value.
121  static inline unsigned getSOImmValImm(unsigned Imm) {
122    return Imm & 0xFF;
123  }
124  /// getSOImmValRot - Given an encoded imm field for the reg/imm form, return
125  /// the rotate amount.
126  static inline unsigned getSOImmValRot(unsigned Imm) {
127    return (Imm >> 8) * 2;
128  }
129
130  /// getSOImmValRotate - Try to handle Imm with an immediate shifter operand,
131  /// computing the rotate amount to use.  If this immediate value cannot be
132  /// handled with a single shifter-op, determine a good rotate amount that will
133  /// take a maximal chunk of bits out of the immediate.
134  static inline unsigned getSOImmValRotate(unsigned Imm) {
135    // 8-bit (or less) immediates are trivially shifter_operands with a rotate
136    // of zero.
137    if ((Imm & ~255U) == 0) return 0;
138
139    // Use CTZ to compute the rotate amount.
140    unsigned TZ = CountTrailingZeros_32(Imm);
141
142    // Rotate amount must be even.  Something like 0x200 must be rotated 8 bits,
143    // not 9.
144    unsigned RotAmt = TZ & ~1;
145
146    // If we can handle this spread, return it.
147    if ((rotr32(Imm, RotAmt) & ~255U) == 0)
148      return (32-RotAmt)&31;  // HW rotates right, not left.
149
150    // For values like 0xF000000F, we should ignore the low 6 bits, then
151    // retry the hunt.
152    if (Imm & 63U) {
153      unsigned TZ2 = CountTrailingZeros_32(Imm & ~63U);
154      unsigned RotAmt2 = TZ2 & ~1;
155      if ((rotr32(Imm, RotAmt2) & ~255U) == 0)
156        return (32-RotAmt2)&31;  // HW rotates right, not left.
157    }
158
159    // Otherwise, we have no way to cover this span of bits with a single
160    // shifter_op immediate.  Return a chunk of bits that will be useful to
161    // handle.
162    return (32-RotAmt)&31;  // HW rotates right, not left.
163  }
164
165  /// getSOImmVal - Given a 32-bit immediate, if it is something that can fit
166  /// into an shifter_operand immediate operand, return the 12-bit encoding for
167  /// it.  If not, return -1.
168  static inline int getSOImmVal(unsigned Arg) {
169    // 8-bit (or less) immediates are trivially shifter_operands with a rotate
170    // of zero.
171    if ((Arg & ~255U) == 0) return Arg;
172
173    unsigned RotAmt = getSOImmValRotate(Arg);
174
175    // If this cannot be handled with a single shifter_op, bail out.
176    if (rotr32(~255U, RotAmt) & Arg)
177      return -1;
178
179    // Encode this correctly.
180    return rotl32(Arg, RotAmt) | ((RotAmt>>1) << 8);
181  }
182
183  /// isSOImmTwoPartVal - Return true if the specified value can be obtained by
184  /// or'ing together two SOImmVal's.
185  static inline bool isSOImmTwoPartVal(unsigned V) {
186    // If this can be handled with a single shifter_op, bail out.
187    V = rotr32(~255U, getSOImmValRotate(V)) & V;
188    if (V == 0)
189      return false;
190
191    // If this can be handled with two shifter_op's, accept.
192    V = rotr32(~255U, getSOImmValRotate(V)) & V;
193    return V == 0;
194  }
195
196  /// getSOImmTwoPartFirst - If V is a value that satisfies isSOImmTwoPartVal,
197  /// return the first chunk of it.
198  static inline unsigned getSOImmTwoPartFirst(unsigned V) {
199    return rotr32(255U, getSOImmValRotate(V)) & V;
200  }
201
202  /// getSOImmTwoPartSecond - If V is a value that satisfies isSOImmTwoPartVal,
203  /// return the second chunk of it.
204  static inline unsigned getSOImmTwoPartSecond(unsigned V) {
205    // Mask out the first hunk.
206    V = rotr32(~255U, getSOImmValRotate(V)) & V;
207
208    // Take what's left.
209    assert(V == (rotr32(255U, getSOImmValRotate(V)) & V));
210    return V;
211  }
212
213  /// getThumbImmValShift - Try to handle Imm with a 8-bit immediate followed
214  /// by a left shift. Returns the shift amount to use.
215  static inline unsigned getThumbImmValShift(unsigned Imm) {
216    // 8-bit (or less) immediates are trivially immediate operand with a shift
217    // of zero.
218    if ((Imm & ~255U) == 0) return 0;
219
220    // Use CTZ to compute the shift amount.
221    return CountTrailingZeros_32(Imm);
222  }
223
224  /// isThumbImmShiftedVal - Return true if the specified value can be obtained
225  /// by left shifting a 8-bit immediate.
226  static inline bool isThumbImmShiftedVal(unsigned V) {
227    // If this can be handled with
228    V = (~255U << getThumbImmValShift(V)) & V;
229    return V == 0;
230  }
231
232  /// getThumbImm16ValShift - Try to handle Imm with a 16-bit immediate followed
233  /// by a left shift. Returns the shift amount to use.
234  static inline unsigned getThumbImm16ValShift(unsigned Imm) {
235    // 16-bit (or less) immediates are trivially immediate operand with a shift
236    // of zero.
237    if ((Imm & ~65535U) == 0) return 0;
238
239    // Use CTZ to compute the shift amount.
240    return CountTrailingZeros_32(Imm);
241  }
242
243  /// isThumbImm16ShiftedVal - Return true if the specified value can be
244  /// obtained by left shifting a 16-bit immediate.
245  static inline bool isThumbImm16ShiftedVal(unsigned V) {
246    // If this can be handled with
247    V = (~65535U << getThumbImm16ValShift(V)) & V;
248    return V == 0;
249  }
250
251  /// getThumbImmNonShiftedVal - If V is a value that satisfies
252  /// isThumbImmShiftedVal, return the non-shiftd value.
253  static inline unsigned getThumbImmNonShiftedVal(unsigned V) {
254    return V >> getThumbImmValShift(V);
255  }
256
257
258  /// getT2SOImmValSplat - Return the 12-bit encoded representation
259  /// if the specified value can be obtained by splatting the low 8 bits
260  /// into every other byte or every byte of a 32-bit value. i.e.,
261  ///     00000000 00000000 00000000 abcdefgh    control = 0
262  ///     00000000 abcdefgh 00000000 abcdefgh    control = 1
263  ///     abcdefgh 00000000 abcdefgh 00000000    control = 2
264  ///     abcdefgh abcdefgh abcdefgh abcdefgh    control = 3
265  /// Return -1 if none of the above apply.
266  /// See ARM Reference Manual A6.3.2.
267  static inline int getT2SOImmValSplatVal(unsigned V) {
268    unsigned u, Vs, Imm;
269    // control = 0
270    if ((V & 0xffffff00) == 0)
271      return V;
272
273    // If the value is zeroes in the first byte, just shift those off
274    Vs = ((V & 0xff) == 0) ? V >> 8 : V;
275    // Any passing value only has 8 bits of payload, splatted across the word
276    Imm = Vs & 0xff;
277    // Likewise, any passing values have the payload splatted into the 3rd byte
278    u = Imm | (Imm << 16);
279
280    // control = 1 or 2
281    if (Vs == u)
282      return (((Vs == V) ? 1 : 2) << 8) | Imm;
283
284    // control = 3
285    if (Vs == (u | (u << 8)))
286      return (3 << 8) | Imm;
287
288    return -1;
289  }
290
291  /// getT2SOImmValRotateVal - Return the 12-bit encoded representation if the
292  /// specified value is a rotated 8-bit value. Return -1 if no rotation
293  /// encoding is possible.
294  /// See ARM Reference Manual A6.3.2.
295  static inline int getT2SOImmValRotateVal(unsigned V) {
296    unsigned RotAmt = CountLeadingZeros_32(V);
297    if (RotAmt >= 24)
298      return -1;
299
300    // If 'Arg' can be handled with a single shifter_op return the value.
301    if ((rotr32(0xff000000U, RotAmt) & V) == V)
302      return (rotr32(V, 24 - RotAmt) & 0x7f) | ((RotAmt + 8) << 7);
303
304    return -1;
305  }
306
307  /// getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit
308  /// into a Thumb-2 shifter_operand immediate operand, return the 12-bit
309  /// encoding for it.  If not, return -1.
310  /// See ARM Reference Manual A6.3.2.
311  static inline int getT2SOImmVal(unsigned Arg) {
312    // If 'Arg' is an 8-bit splat, then get the encoded value.
313    int Splat = getT2SOImmValSplatVal(Arg);
314    if (Splat != -1)
315      return Splat;
316
317    // If 'Arg' can be handled with a single shifter_op return the value.
318    int Rot = getT2SOImmValRotateVal(Arg);
319    if (Rot != -1)
320      return Rot;
321
322    return -1;
323  }
324
325  static inline unsigned getT2SOImmValRotate(unsigned V) {
326    if ((V & ~255U) == 0) return 0;
327    // Use CTZ to compute the rotate amount.
328    unsigned RotAmt = CountTrailingZeros_32(V);
329    return (32 - RotAmt) & 31;
330  }
331
332  static inline bool isT2SOImmTwoPartVal (unsigned Imm) {
333    unsigned V = Imm;
334    // Passing values can be any combination of splat values and shifter
335    // values. If this can be handled with a single shifter or splat, bail
336    // out. Those should be handled directly, not with a two-part val.
337    if (getT2SOImmValSplatVal(V) != -1)
338      return false;
339    V = rotr32 (~255U, getT2SOImmValRotate(V)) & V;
340    if (V == 0)
341      return false;
342
343    // If this can be handled as an immediate, accept.
344    if (getT2SOImmVal(V) != -1) return true;
345
346    // Likewise, try masking out a splat value first.
347    V = Imm;
348    if (getT2SOImmValSplatVal(V & 0xff00ff00U) != -1)
349      V &= ~0xff00ff00U;
350    else if (getT2SOImmValSplatVal(V & 0x00ff00ffU) != -1)
351      V &= ~0x00ff00ffU;
352    // If what's left can be handled as an immediate, accept.
353    if (getT2SOImmVal(V) != -1) return true;
354
355    // Otherwise, do not accept.
356    return false;
357  }
358
359  static inline unsigned getT2SOImmTwoPartFirst(unsigned Imm) {
360    assert (isT2SOImmTwoPartVal(Imm) &&
361            "Immedate cannot be encoded as two part immediate!");
362    // Try a shifter operand as one part
363    unsigned V = rotr32 (~255, getT2SOImmValRotate(Imm)) & Imm;
364    // If the rest is encodable as an immediate, then return it.
365    if (getT2SOImmVal(V) != -1) return V;
366
367    // Try masking out a splat value first.
368    if (getT2SOImmValSplatVal(Imm & 0xff00ff00U) != -1)
369      return Imm & 0xff00ff00U;
370
371    // The other splat is all that's left as an option.
372    assert (getT2SOImmValSplatVal(Imm & 0x00ff00ffU) != -1);
373    return Imm & 0x00ff00ffU;
374  }
375
376  static inline unsigned getT2SOImmTwoPartSecond(unsigned Imm) {
377    // Mask out the first hunk
378    Imm ^= getT2SOImmTwoPartFirst(Imm);
379    // Return what's left
380    assert (getT2SOImmVal(Imm) != -1 &&
381            "Unable to encode second part of T2 two part SO immediate");
382    return Imm;
383  }
384
385
386  //===--------------------------------------------------------------------===//
387  // Addressing Mode #2
388  //===--------------------------------------------------------------------===//
389  //
390  // This is used for most simple load/store instructions.
391  //
392  // addrmode2 := reg +/- reg shop imm
393  // addrmode2 := reg +/- imm12
394  //
395  // The first operand is always a Reg.  The second operand is a reg if in
396  // reg/reg form, otherwise it's reg#0.  The third field encodes the operation
397  // in bit 12, the immediate in bits 0-11, and the shift op in 13-15. The
398  // fourth operand 16-17 encodes the index mode.
399  //
400  // If this addressing mode is a frame index (before prolog/epilog insertion
401  // and code rewriting), this operand will have the form:  FI#, reg0, <offs>
402  // with no shift amount for the frame offset.
403  //
404  static inline unsigned getAM2Opc(AddrOpc Opc, unsigned Imm12, ShiftOpc SO,
405                                   unsigned IdxMode = 0) {
406    assert(Imm12 < (1 << 12) && "Imm too large!");
407    bool isSub = Opc == sub;
408    return Imm12 | ((int)isSub << 12) | (SO << 13) | (IdxMode << 16) ;
409  }
410  static inline unsigned getAM2Offset(unsigned AM2Opc) {
411    return AM2Opc & ((1 << 12)-1);
412  }
413  static inline AddrOpc getAM2Op(unsigned AM2Opc) {
414    return ((AM2Opc >> 12) & 1) ? sub : add;
415  }
416  static inline ShiftOpc getAM2ShiftOpc(unsigned AM2Opc) {
417    return (ShiftOpc)((AM2Opc >> 13) & 7);
418  }
419  static inline unsigned getAM2IdxMode(unsigned AM2Opc) {
420    return (AM2Opc >> 16);
421  }
422
423
424  //===--------------------------------------------------------------------===//
425  // Addressing Mode #3
426  //===--------------------------------------------------------------------===//
427  //
428  // This is used for sign-extending loads, and load/store-pair instructions.
429  //
430  // addrmode3 := reg +/- reg
431  // addrmode3 := reg +/- imm8
432  //
433  // The first operand is always a Reg.  The second operand is a reg if in
434  // reg/reg form, otherwise it's reg#0.  The third field encodes the operation
435  // in bit 8, the immediate in bits 0-7. The fourth operand 9-10 encodes the
436  // index mode.
437
438  /// getAM3Opc - This function encodes the addrmode3 opc field.
439  static inline unsigned getAM3Opc(AddrOpc Opc, unsigned char Offset,
440                                   unsigned IdxMode = 0) {
441    bool isSub = Opc == sub;
442    return ((int)isSub << 8) | Offset | (IdxMode << 9);
443  }
444  static inline unsigned char getAM3Offset(unsigned AM3Opc) {
445    return AM3Opc & 0xFF;
446  }
447  static inline AddrOpc getAM3Op(unsigned AM3Opc) {
448    return ((AM3Opc >> 8) & 1) ? sub : add;
449  }
450  static inline unsigned getAM3IdxMode(unsigned AM3Opc) {
451    return (AM3Opc >> 9);
452  }
453
454  //===--------------------------------------------------------------------===//
455  // Addressing Mode #4
456  //===--------------------------------------------------------------------===//
457  //
458  // This is used for load / store multiple instructions.
459  //
460  // addrmode4 := reg, <mode>
461  //
462  // The four modes are:
463  //    IA - Increment after
464  //    IB - Increment before
465  //    DA - Decrement after
466  //    DB - Decrement before
467  // For VFP instructions, only the IA and DB modes are valid.
468
469  static inline AMSubMode getAM4SubMode(unsigned Mode) {
470    return (AMSubMode)(Mode & 0x7);
471  }
472
473  static inline unsigned getAM4ModeImm(AMSubMode SubMode) {
474    return (int)SubMode;
475  }
476
477  //===--------------------------------------------------------------------===//
478  // Addressing Mode #5
479  //===--------------------------------------------------------------------===//
480  //
481  // This is used for coprocessor instructions, such as FP load/stores.
482  //
483  // addrmode5 := reg +/- imm8*4
484  //
485  // The first operand is always a Reg.  The second operand encodes the
486  // operation in bit 8 and the immediate in bits 0-7.
487
488  /// getAM5Opc - This function encodes the addrmode5 opc field.
489  static inline unsigned getAM5Opc(AddrOpc Opc, unsigned char Offset) {
490    bool isSub = Opc == sub;
491    return ((int)isSub << 8) | Offset;
492  }
493  static inline unsigned char getAM5Offset(unsigned AM5Opc) {
494    return AM5Opc & 0xFF;
495  }
496  static inline AddrOpc getAM5Op(unsigned AM5Opc) {
497    return ((AM5Opc >> 8) & 1) ? sub : add;
498  }
499
500  //===--------------------------------------------------------------------===//
501  // Addressing Mode #6
502  //===--------------------------------------------------------------------===//
503  //
504  // This is used for NEON load / store instructions.
505  //
506  // addrmode6 := reg with optional alignment
507  //
508  // This is stored in two operands [regaddr, align].  The first is the
509  // address register.  The second operand is the value of the alignment
510  // specifier in bytes or zero if no explicit alignment.
511  // Valid alignments depend on the specific instruction.
512
513  //===--------------------------------------------------------------------===//
514  // NEON Modified Immediates
515  //===--------------------------------------------------------------------===//
516  //
517  // Several NEON instructions (e.g., VMOV) take a "modified immediate"
518  // vector operand, where a small immediate encoded in the instruction
519  // specifies a full NEON vector value.  These modified immediates are
520  // represented here as encoded integers.  The low 8 bits hold the immediate
521  // value; bit 12 holds the "Op" field of the instruction, and bits 11-8 hold
522  // the "Cmode" field of the instruction.  The interfaces below treat the
523  // Op and Cmode values as a single 5-bit value.
524
525  static inline unsigned createNEONModImm(unsigned OpCmode, unsigned Val) {
526    return (OpCmode << 8) | Val;
527  }
528  static inline unsigned getNEONModImmOpCmode(unsigned ModImm) {
529    return (ModImm >> 8) & 0x1f;
530  }
531  static inline unsigned getNEONModImmVal(unsigned ModImm) {
532    return ModImm & 0xff;
533  }
534
535  /// decodeNEONModImm - Decode a NEON modified immediate value into the
536  /// element value and the element size in bits.  (If the element size is
537  /// smaller than the vector, it is splatted into all the elements.)
538  static inline uint64_t decodeNEONModImm(unsigned ModImm, unsigned &EltBits) {
539    unsigned OpCmode = getNEONModImmOpCmode(ModImm);
540    unsigned Imm8 = getNEONModImmVal(ModImm);
541    uint64_t Val = 0;
542
543    if (OpCmode == 0xe) {
544      // 8-bit vector elements
545      Val = Imm8;
546      EltBits = 8;
547    } else if ((OpCmode & 0xc) == 0x8) {
548      // 16-bit vector elements
549      unsigned ByteNum = (OpCmode & 0x6) >> 1;
550      Val = Imm8 << (8 * ByteNum);
551      EltBits = 16;
552    } else if ((OpCmode & 0x8) == 0) {
553      // 32-bit vector elements, zero with one byte set
554      unsigned ByteNum = (OpCmode & 0x6) >> 1;
555      Val = Imm8 << (8 * ByteNum);
556      EltBits = 32;
557    } else if ((OpCmode & 0xe) == 0xc) {
558      // 32-bit vector elements, one byte with low bits set
559      unsigned ByteNum = 1 + (OpCmode & 0x1);
560      Val = (Imm8 << (8 * ByteNum)) | (0xffff >> (8 * (2 - ByteNum)));
561      EltBits = 32;
562    } else if (OpCmode == 0x1e) {
563      // 64-bit vector elements
564      for (unsigned ByteNum = 0; ByteNum < 8; ++ByteNum) {
565        if ((ModImm >> ByteNum) & 1)
566          Val |= (uint64_t)0xff << (8 * ByteNum);
567      }
568      EltBits = 64;
569    } else {
570      assert(false && "Unsupported NEON immediate");
571    }
572    return Val;
573  }
574
575  AMSubMode getLoadStoreMultipleSubMode(int Opcode);
576
577} // end namespace ARM_AM
578} // end namespace llvm
579
580#endif
581
582