X86InstrCompiler.td revision dce4a407a24b04eebc6a376f8e62b41aaa7b071f
1//===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file describes the various pseudo instructions used by the compiler,
11// as well as Pat patterns used during instruction selection.
12//
13//===----------------------------------------------------------------------===//
14
15//===----------------------------------------------------------------------===//
16// Pattern Matching Support
17
18def GetLo32XForm : SDNodeXForm<imm, [{
19  // Transformation function: get the low 32 bits.
20  return getI32Imm((unsigned)N->getZExtValue());
21}]>;
22
23def GetLo8XForm : SDNodeXForm<imm, [{
24  // Transformation function: get the low 8 bits.
25  return getI8Imm((uint8_t)N->getZExtValue());
26}]>;
27
28
29//===----------------------------------------------------------------------===//
30// Random Pseudo Instructions.
31
32// PIC base construction.  This expands to code that looks like this:
33//     call  $next_inst
34//     popl %destreg"
35let neverHasSideEffects = 1, isNotDuplicable = 1, Uses = [ESP] in
36  def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label),
37                      "", []>;
38
39
40// ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into
41// a stack adjustment and the codegen must know that they may modify the stack
42// pointer before prolog-epilog rewriting occurs.
43// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
44// sub / add which can clobber EFLAGS.
45let Defs = [ESP, EFLAGS], Uses = [ESP] in {
46def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), (ins i32imm:$amt),
47                           "#ADJCALLSTACKDOWN",
48                           [(X86callseq_start timm:$amt)]>,
49                          Requires<[Not64BitMode]>;
50def ADJCALLSTACKUP32   : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
51                           "#ADJCALLSTACKUP",
52                           [(X86callseq_end timm:$amt1, timm:$amt2)]>,
53                          Requires<[Not64BitMode]>;
54}
55
56// ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
57// a stack adjustment and the codegen must know that they may modify the stack
58// pointer before prolog-epilog rewriting occurs.
59// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
60// sub / add which can clobber EFLAGS.
61let Defs = [RSP, EFLAGS], Uses = [RSP] in {
62def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt),
63                           "#ADJCALLSTACKDOWN",
64                           [(X86callseq_start timm:$amt)]>,
65                          Requires<[In64BitMode]>;
66def ADJCALLSTACKUP64   : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
67                           "#ADJCALLSTACKUP",
68                           [(X86callseq_end timm:$amt1, timm:$amt2)]>,
69                          Requires<[In64BitMode]>;
70}
71
72
73
74// x86-64 va_start lowering magic.
75let usesCustomInserter = 1, Defs = [EFLAGS] in {
76def VASTART_SAVE_XMM_REGS : I<0, Pseudo,
77                              (outs),
78                              (ins GR8:$al,
79                                   i64imm:$regsavefi, i64imm:$offset,
80                                   variable_ops),
81                              "#VASTART_SAVE_XMM_REGS $al, $regsavefi, $offset",
82                              [(X86vastart_save_xmm_regs GR8:$al,
83                                                         imm:$regsavefi,
84                                                         imm:$offset),
85                               (implicit EFLAGS)]>;
86
87// The VAARG_64 pseudo-instruction takes the address of the va_list,
88// and places the address of the next argument into a register.
89let Defs = [EFLAGS] in
90def VAARG_64 : I<0, Pseudo,
91                 (outs GR64:$dst),
92                 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),
93                 "#VAARG_64 $dst, $ap, $size, $mode, $align",
94                 [(set GR64:$dst,
95                    (X86vaarg64 addr:$ap, imm:$size, imm:$mode, imm:$align)),
96                  (implicit EFLAGS)]>;
97
98// Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
99// targets.  These calls are needed to probe the stack when allocating more than
100// 4k bytes in one go. Touching the stack at 4K increments is necessary to
101// ensure that the guard pages used by the OS virtual memory manager are
102// allocated in correct sequence.
103// The main point of having separate instruction are extra unmodelled effects
104// (compared to ordinary calls) like stack pointer change.
105
106let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
107  def WIN_ALLOCA : I<0, Pseudo, (outs), (ins),
108                     "# dynamic stack allocation",
109                     [(X86WinAlloca)]>;
110
111// When using segmented stacks these are lowered into instructions which first
112// check if the current stacklet has enough free memory. If it does, memory is
113// allocated by bumping the stack pointer. Otherwise memory is allocated from 
114// the heap.
115
116let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
117def SEG_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size),
118                      "# variable sized alloca for segmented stacks",
119                      [(set GR32:$dst,
120                         (X86SegAlloca GR32:$size))]>,
121                    Requires<[Not64BitMode]>;
122
123let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
124def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
125                      "# variable sized alloca for segmented stacks",
126                      [(set GR64:$dst,
127                         (X86SegAlloca GR64:$size))]>,
128                    Requires<[In64BitMode]>;
129}
130
131// The MSVC runtime contains an _ftol2 routine for converting floating-point
132// to integer values. It has a strange calling convention: the input is
133// popped from the x87 stack, and the return value is given in EDX:EAX. ECX is
134// used as a temporary register. No other registers (aside from flags) are
135// touched.
136// Microsoft toolchains do not support 80-bit precision, so a WIN_FTOL_80
137// variant is unnecessary.
138
139let Defs = [EAX, EDX, ECX, EFLAGS], FPForm = SpecialFP in {
140  def WIN_FTOL_32 : I<0, Pseudo, (outs), (ins RFP32:$src),
141                      "# win32 fptoui",
142                      [(X86WinFTOL RFP32:$src)]>,
143                    Requires<[Not64BitMode]>;
144
145  def WIN_FTOL_64 : I<0, Pseudo, (outs), (ins RFP64:$src),
146                      "# win32 fptoui",
147                      [(X86WinFTOL RFP64:$src)]>,
148                    Requires<[Not64BitMode]>;
149}
150
151//===----------------------------------------------------------------------===//
152// EH Pseudo Instructions
153//
154let SchedRW = [WriteSystem] in {
155let isTerminator = 1, isReturn = 1, isBarrier = 1,
156    hasCtrlDep = 1, isCodeGenOnly = 1 in {
157def EH_RETURN   : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
158                    "ret\t#eh_return, addr: $addr",
159                    [(X86ehret GR32:$addr)], IIC_RET>, Sched<[WriteJumpLd]>;
160
161}
162
163let isTerminator = 1, isReturn = 1, isBarrier = 1,
164    hasCtrlDep = 1, isCodeGenOnly = 1 in {
165def EH_RETURN64   : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
166                     "ret\t#eh_return, addr: $addr",
167                     [(X86ehret GR64:$addr)], IIC_RET>, Sched<[WriteJumpLd]>;
168
169}
170
171let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1,
172    usesCustomInserter = 1 in {
173  def EH_SjLj_SetJmp32  : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$buf),
174                            "#EH_SJLJ_SETJMP32",
175                            [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
176                          Requires<[Not64BitMode]>;
177  def EH_SjLj_SetJmp64  : I<0, Pseudo, (outs GR32:$dst), (ins i64mem:$buf),
178                            "#EH_SJLJ_SETJMP64",
179                            [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
180                          Requires<[In64BitMode]>;
181  let isTerminator = 1 in {
182  def EH_SjLj_LongJmp32 : I<0, Pseudo, (outs), (ins i32mem:$buf),
183                            "#EH_SJLJ_LONGJMP32",
184                            [(X86eh_sjlj_longjmp addr:$buf)]>,
185                          Requires<[Not64BitMode]>;
186  def EH_SjLj_LongJmp64 : I<0, Pseudo, (outs), (ins i64mem:$buf),
187                            "#EH_SJLJ_LONGJMP64",
188                            [(X86eh_sjlj_longjmp addr:$buf)]>,
189                          Requires<[In64BitMode]>;
190  }
191}
192} // SchedRW
193
194let isBranch = 1, isTerminator = 1, isCodeGenOnly = 1 in {
195  def EH_SjLj_Setup : I<0, Pseudo, (outs), (ins brtarget:$dst),
196                        "#EH_SjLj_Setup\t$dst", []>;
197}
198
199//===----------------------------------------------------------------------===//
200// Pseudo instructions used by segmented stacks.
201//
202
203// This is lowered into a RET instruction by MCInstLower.  We need
204// this so that we don't have to have a MachineBasicBlock which ends
205// with a RET and also has successors.
206let isPseudo = 1 in {
207def MORESTACK_RET: I<0, Pseudo, (outs), (ins),
208                          "", []>;
209
210// This instruction is lowered to a RET followed by a MOV.  The two
211// instructions are not generated on a higher level since then the
212// verifier sees a MachineBasicBlock ending with a non-terminator.
213def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins),
214                                  "", []>;
215}
216
217//===----------------------------------------------------------------------===//
218// Alias Instructions
219//===----------------------------------------------------------------------===//
220
221// Alias instruction mapping movr0 to xor.
222// FIXME: remove when we can teach regalloc that xor reg, reg is ok.
223let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
224    isPseudo = 1 in
225def MOV32r0  : I<0, Pseudo, (outs GR32:$dst), (ins), "",
226                 [(set GR32:$dst, 0)], IIC_ALU_NONMEM>, Sched<[WriteZero]>;
227
228// Other widths can also make use of the 32-bit xor, which may have a smaller
229// encoding and avoid partial register updates.
230def : Pat<(i8 0), (EXTRACT_SUBREG (MOV32r0), sub_8bit)>;
231def : Pat<(i16 0), (EXTRACT_SUBREG (MOV32r0), sub_16bit)>;
232def : Pat<(i64 0), (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit)> {
233  let AddedComplexity = 20;
234}
235
236// Materialize i64 constant where top 32-bits are zero. This could theoretically
237// use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
238// that would make it more difficult to rematerialize.
239let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1,
240    isCodeGenOnly = 1, neverHasSideEffects = 1 in
241def MOV32ri64 : Ii32<0xb8, AddRegFrm, (outs GR32:$dst), (ins i64i32imm:$src),
242                     "", [], IIC_ALU_NONMEM>, Sched<[WriteALU]>;
243
244// This 64-bit pseudo-move can be used for both a 64-bit constant that is
245// actually the zero-extension of a 32-bit constant, and for labels in the
246// x86-64 small code model.
247def mov64imm32 : ComplexPattern<i64, 1, "SelectMOV64Imm32", [imm, X86Wrapper]>;
248
249let AddedComplexity = 1 in
250def : Pat<(i64 mov64imm32:$src),
251          (SUBREG_TO_REG (i64 0), (MOV32ri64 mov64imm32:$src), sub_32bit)>;
252
253// Use sbb to materialize carry bit.
254let Uses = [EFLAGS], Defs = [EFLAGS], isPseudo = 1, SchedRW = [WriteALU] in {
255// FIXME: These are pseudo ops that should be replaced with Pat<> patterns.
256// However, Pat<> can't replicate the destination reg into the inputs of the
257// result.
258def SETB_C8r : I<0, Pseudo, (outs GR8:$dst), (ins), "",
259                 [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
260def SETB_C16r : I<0, Pseudo, (outs GR16:$dst), (ins), "",
261                 [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
262def SETB_C32r : I<0, Pseudo, (outs GR32:$dst), (ins), "",
263                 [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
264def SETB_C64r : I<0, Pseudo, (outs GR64:$dst), (ins), "",
265                 [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
266} // isCodeGenOnly
267
268
269def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
270          (SETB_C16r)>;
271def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
272          (SETB_C32r)>;
273def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
274          (SETB_C64r)>;
275
276def : Pat<(i16 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
277          (SETB_C16r)>;
278def : Pat<(i32 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
279          (SETB_C32r)>;
280def : Pat<(i64 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
281          (SETB_C64r)>;
282
283// We canonicalize 'setb' to "(and (sbb reg,reg), 1)" on the hope that the and
284// will be eliminated and that the sbb can be extended up to a wider type.  When
285// this happens, it is great.  However, if we are left with an 8-bit sbb and an
286// and, we might as well just match it as a setb.
287def : Pat<(and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1),
288          (SETBr)>;
289
290// (add OP, SETB) -> (adc OP, 0)
291def : Pat<(add (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR8:$op),
292          (ADC8ri GR8:$op, 0)>;
293def : Pat<(add (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR32:$op),
294          (ADC32ri8 GR32:$op, 0)>;
295def : Pat<(add (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR64:$op),
296          (ADC64ri8 GR64:$op, 0)>;
297
298// (sub OP, SETB) -> (sbb OP, 0)
299def : Pat<(sub GR8:$op, (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
300          (SBB8ri GR8:$op, 0)>;
301def : Pat<(sub GR32:$op, (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
302          (SBB32ri8 GR32:$op, 0)>;
303def : Pat<(sub GR64:$op, (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
304          (SBB64ri8 GR64:$op, 0)>;
305
306// (sub OP, SETCC_CARRY) -> (adc OP, 0)
307def : Pat<(sub GR8:$op, (i8 (X86setcc_c X86_COND_B, EFLAGS))),
308          (ADC8ri GR8:$op, 0)>;
309def : Pat<(sub GR32:$op, (i32 (X86setcc_c X86_COND_B, EFLAGS))),
310          (ADC32ri8 GR32:$op, 0)>;
311def : Pat<(sub GR64:$op, (i64 (X86setcc_c X86_COND_B, EFLAGS))),
312          (ADC64ri8 GR64:$op, 0)>;
313
314//===----------------------------------------------------------------------===//
315// String Pseudo Instructions
316//
317let SchedRW = [WriteMicrocoded] in {
318let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in {
319def REP_MOVSB_32 : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
320                    [(X86rep_movs i8)], IIC_REP_MOVS>, REP,
321                   Requires<[Not64BitMode]>;
322def REP_MOVSW_32 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
323                    [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize16,
324                   Requires<[Not64BitMode]>;
325def REP_MOVSD_32 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
326                    [(X86rep_movs i32)], IIC_REP_MOVS>, REP, OpSize32,
327                   Requires<[Not64BitMode]>;
328}
329
330let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in {
331def REP_MOVSB_64 : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
332                    [(X86rep_movs i8)], IIC_REP_MOVS>, REP,
333                   Requires<[In64BitMode]>;
334def REP_MOVSW_64 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
335                    [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize16,
336                   Requires<[In64BitMode]>;
337def REP_MOVSD_64 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
338                    [(X86rep_movs i32)], IIC_REP_MOVS>, REP, OpSize32,
339                   Requires<[In64BitMode]>;
340def REP_MOVSQ_64 : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
341                    [(X86rep_movs i64)], IIC_REP_MOVS>, REP,
342                   Requires<[In64BitMode]>;
343}
344
345// FIXME: Should use "(X86rep_stos AL)" as the pattern.
346let Defs = [ECX,EDI], isCodeGenOnly = 1 in {
347  let Uses = [AL,ECX,EDI] in
348  def REP_STOSB_32 : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
349                      [(X86rep_stos i8)], IIC_REP_STOS>, REP,
350                     Requires<[Not64BitMode]>;
351  let Uses = [AX,ECX,EDI] in
352  def REP_STOSW_32 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
353                      [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize16,
354                     Requires<[Not64BitMode]>;
355  let Uses = [EAX,ECX,EDI] in
356  def REP_STOSD_32 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
357                      [(X86rep_stos i32)], IIC_REP_STOS>, REP, OpSize32,
358                     Requires<[Not64BitMode]>;
359}
360
361let Defs = [RCX,RDI], isCodeGenOnly = 1 in {
362  let Uses = [AL,RCX,RDI] in
363  def REP_STOSB_64 : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
364                      [(X86rep_stos i8)], IIC_REP_STOS>, REP,
365                     Requires<[In64BitMode]>;
366  let Uses = [AX,RCX,RDI] in
367  def REP_STOSW_64 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
368                      [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize16,
369                     Requires<[In64BitMode]>;
370  let Uses = [RAX,RCX,RDI] in
371  def REP_STOSD_64 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
372                      [(X86rep_stos i32)], IIC_REP_STOS>, REP, OpSize32,
373                     Requires<[In64BitMode]>;
374 
375  let Uses = [RAX,RCX,RDI] in
376  def REP_STOSQ_64 : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
377                      [(X86rep_stos i64)], IIC_REP_STOS>, REP,
378                     Requires<[In64BitMode]>;
379}
380} // SchedRW
381
382//===----------------------------------------------------------------------===//
383// Thread Local Storage Instructions
384//
385
386// ELF TLS Support
387// All calls clobber the non-callee saved registers. ESP is marked as
388// a use to prevent stack-pointer assignments that appear immediately
389// before calls from potentially appearing dead.
390let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
391            MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
392            XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
393            XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
394    Uses = [ESP] in {
395def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
396                  "# TLS_addr32",
397                  [(X86tlsaddr tls32addr:$sym)]>,
398                  Requires<[Not64BitMode]>;
399def TLS_base_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
400                  "# TLS_base_addr32",
401                  [(X86tlsbaseaddr tls32baseaddr:$sym)]>,
402                  Requires<[Not64BitMode]>;
403}
404
405// All calls clobber the non-callee saved registers. RSP is marked as
406// a use to prevent stack-pointer assignments that appear immediately
407// before calls from potentially appearing dead.
408let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
409            FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
410            MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
411            XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
412            XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
413    Uses = [RSP] in {
414def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
415                   "# TLS_addr64",
416                  [(X86tlsaddr tls64addr:$sym)]>,
417                  Requires<[In64BitMode]>;
418def TLS_base_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
419                   "# TLS_base_addr64",
420                  [(X86tlsbaseaddr tls64baseaddr:$sym)]>,
421                  Requires<[In64BitMode]>;
422}
423
424// Darwin TLS Support
425// For i386, the address of the thunk is passed on the stack, on return the
426// address of the variable is in %eax.  %ecx is trashed during the function
427// call.  All other registers are preserved.
428let Defs = [EAX, ECX, EFLAGS],
429    Uses = [ESP],
430    usesCustomInserter = 1 in
431def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
432                "# TLSCall_32",
433                [(X86TLSCall addr:$sym)]>,
434                Requires<[Not64BitMode]>;
435
436// For x86_64, the address of the thunk is passed in %rdi, on return
437// the address of the variable is in %rax.  All other registers are preserved.
438let Defs = [RAX, EFLAGS],
439    Uses = [RSP, RDI],
440    usesCustomInserter = 1 in
441def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
442                  "# TLSCall_64",
443                  [(X86TLSCall addr:$sym)]>,
444                  Requires<[In64BitMode]>;
445
446
447//===----------------------------------------------------------------------===//
448// Conditional Move Pseudo Instructions
449
450// X86 doesn't have 8-bit conditional moves. Use a customInserter to
451// emit control flow. An alternative to this is to mark i8 SELECT as Promote,
452// however that requires promoting the operands, and can induce additional
453// i8 register pressure.
454let usesCustomInserter = 1, Uses = [EFLAGS] in {
455def CMOV_GR8 : I<0, Pseudo,
456                 (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
457                 "#CMOV_GR8 PSEUDO!",
458                 [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,
459                                          imm:$cond, EFLAGS))]>;
460
461let Predicates = [NoCMov] in {
462def CMOV_GR32 : I<0, Pseudo,
463                    (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond),
464                    "#CMOV_GR32* PSEUDO!",
465                    [(set GR32:$dst,
466                      (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>;
467def CMOV_GR16 : I<0, Pseudo,
468                    (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond),
469                    "#CMOV_GR16* PSEUDO!",
470                    [(set GR16:$dst,
471                      (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>;
472} // Predicates = [NoCMov]
473
474// fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no
475// SSE1.
476let Predicates = [FPStackf32] in
477def CMOV_RFP32 : I<0, Pseudo,
478                    (outs RFP32:$dst),
479                    (ins RFP32:$src1, RFP32:$src2, i8imm:$cond),
480                    "#CMOV_RFP32 PSEUDO!",
481                    [(set RFP32:$dst,
482                      (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond,
483                                                  EFLAGS))]>;
484// fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no
485// SSE2.
486let Predicates = [FPStackf64] in
487def CMOV_RFP64 : I<0, Pseudo,
488                    (outs RFP64:$dst),
489                    (ins RFP64:$src1, RFP64:$src2, i8imm:$cond),
490                    "#CMOV_RFP64 PSEUDO!",
491                    [(set RFP64:$dst,
492                      (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond,
493                                                  EFLAGS))]>;
494def CMOV_RFP80 : I<0, Pseudo,
495                    (outs RFP80:$dst),
496                    (ins RFP80:$src1, RFP80:$src2, i8imm:$cond),
497                    "#CMOV_RFP80 PSEUDO!",
498                    [(set RFP80:$dst,
499                      (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond,
500                                                  EFLAGS))]>;
501} // UsesCustomInserter = 1, Uses = [EFLAGS]
502
503
504//===----------------------------------------------------------------------===//
505// Atomic Instruction Pseudo Instructions
506//===----------------------------------------------------------------------===//
507
508// Pseudo atomic instructions
509
510multiclass PSEUDO_ATOMIC_LOAD_BINOP<string mnemonic> {
511  let usesCustomInserter = 1, mayLoad = 1, mayStore = 1 in {
512    let Defs = [EFLAGS, AL] in
513    def NAME#8  : I<0, Pseudo, (outs GR8:$dst),
514                    (ins i8mem:$ptr, GR8:$val),
515                    !strconcat(mnemonic, "8 PSEUDO!"), []>;
516    let Defs = [EFLAGS, AX] in
517    def NAME#16 : I<0, Pseudo,(outs GR16:$dst),
518                    (ins i16mem:$ptr, GR16:$val),
519                    !strconcat(mnemonic, "16 PSEUDO!"), []>;
520    let Defs = [EFLAGS, EAX] in
521    def NAME#32 : I<0, Pseudo, (outs GR32:$dst),
522                    (ins i32mem:$ptr, GR32:$val),
523                    !strconcat(mnemonic, "32 PSEUDO!"), []>;
524    let Defs = [EFLAGS, RAX] in
525    def NAME#64 : I<0, Pseudo, (outs GR64:$dst),
526                    (ins i64mem:$ptr, GR64:$val),
527                    !strconcat(mnemonic, "64 PSEUDO!"), []>;
528  }
529}
530
531multiclass PSEUDO_ATOMIC_LOAD_BINOP_PATS<string name, string frag> {
532  def : Pat<(!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val),
533            (!cast<Instruction>(name # "8") addr:$ptr, GR8:$val)>;
534  def : Pat<(!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val),
535            (!cast<Instruction>(name # "16") addr:$ptr, GR16:$val)>;
536  def : Pat<(!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val),
537            (!cast<Instruction>(name # "32") addr:$ptr, GR32:$val)>;
538  def : Pat<(!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val),
539            (!cast<Instruction>(name # "64") addr:$ptr, GR64:$val)>;
540}
541
542// Atomic exchange, and, or, xor
543defm ATOMAND  : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMAND">;
544defm ATOMOR   : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMOR">;
545defm ATOMXOR  : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMXOR">;
546defm ATOMNAND : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMNAND">;
547defm ATOMMAX  : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMMAX">;
548defm ATOMMIN  : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMMIN">;
549defm ATOMUMAX : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMUMAX">;
550defm ATOMUMIN : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMUMIN">;
551
552defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMAND",  "atomic_load_and">;
553defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMOR",   "atomic_load_or">;
554defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMXOR",  "atomic_load_xor">;
555defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMNAND", "atomic_load_nand">;
556defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMMAX",  "atomic_load_max">;
557defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMMIN",  "atomic_load_min">;
558defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMUMAX", "atomic_load_umax">;
559defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMUMIN", "atomic_load_umin">;
560
561multiclass PSEUDO_ATOMIC_LOAD_BINOP6432<string mnemonic> {
562  let usesCustomInserter = 1, Defs = [EFLAGS, EAX, EDX],
563      mayLoad = 1, mayStore = 1, hasSideEffects = 0 in
564    def NAME#6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
565                      (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
566                      !strconcat(mnemonic, "6432 PSEUDO!"), []>;
567}
568
569defm ATOMAND  : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMAND">;
570defm ATOMOR   : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMOR">;
571defm ATOMXOR  : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMXOR">;
572defm ATOMNAND : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMNAND">;
573defm ATOMADD  : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMADD">;
574defm ATOMSUB  : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMSUB">;
575defm ATOMMAX  : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMMAX">;
576defm ATOMMIN  : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMMIN">;
577defm ATOMUMAX : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMUMAX">;
578defm ATOMUMIN : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMUMIN">;
579defm ATOMSWAP : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMSWAP">;
580
581//===----------------------------------------------------------------------===//
582// Normal-Instructions-With-Lock-Prefix Pseudo Instructions
583//===----------------------------------------------------------------------===//
584
585// FIXME: Use normal instructions and add lock prefix dynamically.
586
587// Memory barriers
588
589// TODO: Get this to fold the constant into the instruction.
590let isCodeGenOnly = 1, Defs = [EFLAGS] in
591def OR32mrLocked  : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$zero),
592                      "or{l}\t{$zero, $dst|$dst, $zero}",
593                      [], IIC_ALU_MEM>, Requires<[Not64BitMode]>, LOCK,
594                    Sched<[WriteALULd, WriteRMW]>;
595
596let hasSideEffects = 1 in
597def Int_MemBarrier : I<0, Pseudo, (outs), (ins),
598                     "#MEMBARRIER",
599                     [(X86MemBarrier)]>, Sched<[WriteLoad]>;
600
601// RegOpc corresponds to the mr version of the instruction
602// ImmOpc corresponds to the mi version of the instruction
603// ImmOpc8 corresponds to the mi8 version of the instruction
604// ImmMod corresponds to the instruction format of the mi and mi8 versions
605multiclass LOCK_ArithBinOp<bits<8> RegOpc, bits<8> ImmOpc, bits<8> ImmOpc8,
606                           Format ImmMod, string mnemonic> {
607let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
608    SchedRW = [WriteALULd, WriteRMW] in {
609
610def NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
611                  RegOpc{3}, RegOpc{2}, RegOpc{1}, 0 },
612                  MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
613                  !strconcat(mnemonic, "{b}\t",
614                             "{$src2, $dst|$dst, $src2}"),
615                  [], IIC_ALU_NONMEM>, LOCK;
616def NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
617                   RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
618                   MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
619                   !strconcat(mnemonic, "{w}\t",
620                              "{$src2, $dst|$dst, $src2}"),
621                   [], IIC_ALU_NONMEM>, OpSize16, LOCK;
622def NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
623                   RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
624                   MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
625                   !strconcat(mnemonic, "{l}\t",
626                              "{$src2, $dst|$dst, $src2}"),
627                   [], IIC_ALU_NONMEM>, OpSize32, LOCK;
628def NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
629                    RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
630                    MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
631                    !strconcat(mnemonic, "{q}\t",
632                               "{$src2, $dst|$dst, $src2}"),
633                    [], IIC_ALU_NONMEM>, LOCK;
634
635def NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
636                    ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 },
637                    ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2),
638                    !strconcat(mnemonic, "{b}\t",
639                               "{$src2, $dst|$dst, $src2}"),
640                    [], IIC_ALU_MEM>, LOCK;
641
642def NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
643                      ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
644                      ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2),
645                      !strconcat(mnemonic, "{w}\t",
646                                 "{$src2, $dst|$dst, $src2}"),
647                      [], IIC_ALU_MEM>, OpSize16, LOCK;
648
649def NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
650                      ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
651                      ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2),
652                      !strconcat(mnemonic, "{l}\t",
653                                 "{$src2, $dst|$dst, $src2}"),
654                      [], IIC_ALU_MEM>, OpSize32, LOCK;
655
656def NAME#64mi32 : RIi32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
657                         ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
658                         ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2),
659                         !strconcat(mnemonic, "{q}\t",
660                                    "{$src2, $dst|$dst, $src2}"),
661                         [], IIC_ALU_MEM>, LOCK;
662
663def NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
664                      ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
665                      ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2),
666                      !strconcat(mnemonic, "{w}\t",
667                                 "{$src2, $dst|$dst, $src2}"),
668                      [], IIC_ALU_MEM>, OpSize16, LOCK;
669def NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
670                      ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
671                      ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2),
672                      !strconcat(mnemonic, "{l}\t",
673                                 "{$src2, $dst|$dst, $src2}"),
674                      [], IIC_ALU_MEM>, OpSize32, LOCK;
675def NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
676                       ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
677                       ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2),
678                       !strconcat(mnemonic, "{q}\t",
679                                  "{$src2, $dst|$dst, $src2}"),
680                       [], IIC_ALU_MEM>, LOCK;
681
682}
683
684}
685
686defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, "add">;
687defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, "sub">;
688defm LOCK_OR  : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, "or">;
689defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, "and">;
690defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, "xor">;
691
692// Optimized codegen when the non-memory output is not used.
693multiclass LOCK_ArithUnOp<bits<8> Opc8, bits<8> Opc, Format Form,
694                          string mnemonic> {
695let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
696    SchedRW = [WriteALULd, WriteRMW] in {
697
698def NAME#8m  : I<Opc8, Form, (outs), (ins i8mem :$dst),
699                 !strconcat(mnemonic, "{b}\t$dst"),
700                 [], IIC_UNARY_MEM>, LOCK;
701def NAME#16m : I<Opc, Form, (outs), (ins i16mem:$dst),
702                 !strconcat(mnemonic, "{w}\t$dst"),
703                 [], IIC_UNARY_MEM>, OpSize16, LOCK;
704def NAME#32m : I<Opc, Form, (outs), (ins i32mem:$dst),
705                 !strconcat(mnemonic, "{l}\t$dst"),
706                 [], IIC_UNARY_MEM>, OpSize32, LOCK;
707def NAME#64m : RI<Opc, Form, (outs), (ins i64mem:$dst),
708                  !strconcat(mnemonic, "{q}\t$dst"),
709                  [], IIC_UNARY_MEM>, LOCK;
710}
711}
712
713defm LOCK_INC    : LOCK_ArithUnOp<0xFE, 0xFF, MRM0m, "inc">;
714defm LOCK_DEC    : LOCK_ArithUnOp<0xFE, 0xFF, MRM1m, "dec">;
715
716// Atomic compare and swap.
717multiclass LCMPXCHG_UnOp<bits<8> Opc, Format Form, string mnemonic,
718                         SDPatternOperator frag, X86MemOperand x86memop,
719                         InstrItinClass itin> {
720let isCodeGenOnly = 1 in {
721  def NAME : I<Opc, Form, (outs), (ins x86memop:$ptr),
722               !strconcat(mnemonic, "\t$ptr"),
723               [(frag addr:$ptr)], itin>, TB, LOCK;
724}
725}
726
727multiclass LCMPXCHG_BinOp<bits<8> Opc8, bits<8> Opc, Format Form,
728                          string mnemonic, SDPatternOperator frag,
729                          InstrItinClass itin8, InstrItinClass itin> {
730let isCodeGenOnly = 1, SchedRW = [WriteALULd, WriteRMW] in {
731  let Defs = [AL, EFLAGS], Uses = [AL] in
732  def NAME#8  : I<Opc8, Form, (outs), (ins i8mem:$ptr, GR8:$swap),
733                  !strconcat(mnemonic, "{b}\t{$swap, $ptr|$ptr, $swap}"),
734                  [(frag addr:$ptr, GR8:$swap, 1)], itin8>, TB, LOCK;
735  let Defs = [AX, EFLAGS], Uses = [AX] in
736  def NAME#16 : I<Opc, Form, (outs), (ins i16mem:$ptr, GR16:$swap),
737                  !strconcat(mnemonic, "{w}\t{$swap, $ptr|$ptr, $swap}"),
738                  [(frag addr:$ptr, GR16:$swap, 2)], itin>, TB, OpSize16, LOCK;
739  let Defs = [EAX, EFLAGS], Uses = [EAX] in
740  def NAME#32 : I<Opc, Form, (outs), (ins i32mem:$ptr, GR32:$swap),
741                  !strconcat(mnemonic, "{l}\t{$swap, $ptr|$ptr, $swap}"),
742                  [(frag addr:$ptr, GR32:$swap, 4)], itin>, TB, OpSize32, LOCK;
743  let Defs = [RAX, EFLAGS], Uses = [RAX] in
744  def NAME#64 : RI<Opc, Form, (outs), (ins i64mem:$ptr, GR64:$swap),
745                   !strconcat(mnemonic, "{q}\t{$swap, $ptr|$ptr, $swap}"),
746                   [(frag addr:$ptr, GR64:$swap, 8)], itin>, TB, LOCK;
747}
748}
749
750let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX],
751    SchedRW = [WriteALULd, WriteRMW] in {
752defm LCMPXCHG8B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg8b",
753                                X86cas8, i64mem,
754                                IIC_CMPX_LOCK_8B>;
755}
756
757let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX],
758    Predicates = [HasCmpxchg16b], SchedRW = [WriteALULd, WriteRMW] in {
759defm LCMPXCHG16B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg16b",
760                                 X86cas16, i128mem,
761                                 IIC_CMPX_LOCK_16B>, REX_W;
762}
763
764defm LCMPXCHG : LCMPXCHG_BinOp<0xB0, 0xB1, MRMDestMem, "cmpxchg",
765                               X86cas, IIC_CMPX_LOCK_8, IIC_CMPX_LOCK>;
766
767// Atomic exchange and add
768multiclass ATOMIC_LOAD_BINOP<bits<8> opc8, bits<8> opc, string mnemonic,
769                             string frag,
770                             InstrItinClass itin8, InstrItinClass itin> {
771  let Constraints = "$val = $dst", Defs = [EFLAGS], isCodeGenOnly = 1,
772      SchedRW = [WriteALULd, WriteRMW] in {
773    def NAME#8  : I<opc8, MRMSrcMem, (outs GR8:$dst),
774                    (ins GR8:$val, i8mem:$ptr),
775                    !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"),
776                    [(set GR8:$dst,
777                          (!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val))],
778                    itin8>;
779    def NAME#16 : I<opc, MRMSrcMem, (outs GR16:$dst),
780                    (ins GR16:$val, i16mem:$ptr),
781                    !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"),
782                    [(set
783                       GR16:$dst,
784                       (!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))],
785                    itin>, OpSize16;
786    def NAME#32 : I<opc, MRMSrcMem, (outs GR32:$dst),
787                    (ins GR32:$val, i32mem:$ptr),
788                    !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
789                    [(set
790                       GR32:$dst,
791                       (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))],
792                    itin>, OpSize32;
793    def NAME#64 : RI<opc, MRMSrcMem, (outs GR64:$dst),
794                     (ins GR64:$val, i64mem:$ptr),
795                     !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),
796                     [(set
797                        GR64:$dst,
798                        (!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val))],
799                     itin>;
800  }
801}
802
803defm LXADD : ATOMIC_LOAD_BINOP<0xc0, 0xc1, "xadd", "atomic_load_add",
804                               IIC_XADD_LOCK_MEM8, IIC_XADD_LOCK_MEM>,
805             TB, LOCK;
806
807def ACQUIRE_MOV8rm  : I<0, Pseudo, (outs GR8 :$dst), (ins i8mem :$src),
808                      "#ACQUIRE_MOV PSEUDO!",
809                      [(set GR8:$dst,  (atomic_load_8  addr:$src))]>;
810def ACQUIRE_MOV16rm : I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$src),
811                      "#ACQUIRE_MOV PSEUDO!",
812                      [(set GR16:$dst, (atomic_load_16 addr:$src))]>;
813def ACQUIRE_MOV32rm : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$src),
814                      "#ACQUIRE_MOV PSEUDO!",
815                      [(set GR32:$dst, (atomic_load_32 addr:$src))]>;
816def ACQUIRE_MOV64rm : I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$src),
817                      "#ACQUIRE_MOV PSEUDO!",
818                      [(set GR64:$dst, (atomic_load_64 addr:$src))]>;
819
820def RELEASE_MOV8mr  : I<0, Pseudo, (outs), (ins i8mem :$dst, GR8 :$src),
821                        "#RELEASE_MOV PSEUDO!",
822                        [(atomic_store_8  addr:$dst, GR8 :$src)]>;
823def RELEASE_MOV16mr : I<0, Pseudo, (outs), (ins i16mem:$dst, GR16:$src),
824                        "#RELEASE_MOV PSEUDO!",
825                        [(atomic_store_16 addr:$dst, GR16:$src)]>;
826def RELEASE_MOV32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, GR32:$src),
827                        "#RELEASE_MOV PSEUDO!",
828                        [(atomic_store_32 addr:$dst, GR32:$src)]>;
829def RELEASE_MOV64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, GR64:$src),
830                        "#RELEASE_MOV PSEUDO!",
831                        [(atomic_store_64 addr:$dst, GR64:$src)]>;
832
833//===----------------------------------------------------------------------===//
834// Conditional Move Pseudo Instructions.
835//===----------------------------------------------------------------------===//
836
837
838// CMOV* - Used to implement the SSE SELECT DAG operation.  Expanded after
839// instruction selection into a branch sequence.
840let Uses = [EFLAGS], usesCustomInserter = 1 in {
841  def CMOV_FR32 : I<0, Pseudo,
842                    (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
843                    "#CMOV_FR32 PSEUDO!",
844                    [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
845                                                  EFLAGS))]>;
846  def CMOV_FR64 : I<0, Pseudo,
847                    (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
848                    "#CMOV_FR64 PSEUDO!",
849                    [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
850                                                  EFLAGS))]>;
851  def CMOV_V4F32 : I<0, Pseudo,
852                    (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
853                    "#CMOV_V4F32 PSEUDO!",
854                    [(set VR128:$dst,
855                      (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
856                                          EFLAGS)))]>;
857  def CMOV_V2F64 : I<0, Pseudo,
858                    (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
859                    "#CMOV_V2F64 PSEUDO!",
860                    [(set VR128:$dst,
861                      (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
862                                          EFLAGS)))]>;
863  def CMOV_V2I64 : I<0, Pseudo,
864                    (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
865                    "#CMOV_V2I64 PSEUDO!",
866                    [(set VR128:$dst,
867                      (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
868                                          EFLAGS)))]>;
869  def CMOV_V8F32 : I<0, Pseudo,
870                    (outs VR256:$dst), (ins VR256:$t, VR256:$f, i8imm:$cond),
871                    "#CMOV_V8F32 PSEUDO!",
872                    [(set VR256:$dst,
873                      (v8f32 (X86cmov VR256:$t, VR256:$f, imm:$cond,
874                                          EFLAGS)))]>;
875  def CMOV_V4F64 : I<0, Pseudo,
876                    (outs VR256:$dst), (ins VR256:$t, VR256:$f, i8imm:$cond),
877                    "#CMOV_V4F64 PSEUDO!",
878                    [(set VR256:$dst,
879                      (v4f64 (X86cmov VR256:$t, VR256:$f, imm:$cond,
880                                          EFLAGS)))]>;
881  def CMOV_V4I64 : I<0, Pseudo,
882                    (outs VR256:$dst), (ins VR256:$t, VR256:$f, i8imm:$cond),
883                    "#CMOV_V4I64 PSEUDO!",
884                    [(set VR256:$dst,
885                      (v4i64 (X86cmov VR256:$t, VR256:$f, imm:$cond,
886                                          EFLAGS)))]>;
887  def CMOV_V8I64 : I<0, Pseudo,
888                    (outs VR512:$dst), (ins VR512:$t, VR512:$f, i8imm:$cond),
889                    "#CMOV_V8I64 PSEUDO!",
890                    [(set VR512:$dst,
891                      (v8i64 (X86cmov VR512:$t, VR512:$f, imm:$cond,
892                                          EFLAGS)))]>;
893  def CMOV_V8F64 : I<0, Pseudo,
894                    (outs VR512:$dst), (ins VR512:$t, VR512:$f, i8imm:$cond),
895                    "#CMOV_V8F64 PSEUDO!",
896                    [(set VR512:$dst,
897                      (v8f64 (X86cmov VR512:$t, VR512:$f, imm:$cond,
898                                          EFLAGS)))]>;
899  def CMOV_V16F32 : I<0, Pseudo,
900                    (outs VR512:$dst), (ins VR512:$t, VR512:$f, i8imm:$cond),
901                    "#CMOV_V16F32 PSEUDO!",
902                    [(set VR512:$dst,
903                      (v16f32 (X86cmov VR512:$t, VR512:$f, imm:$cond,
904                                          EFLAGS)))]>;
905}
906
907
908//===----------------------------------------------------------------------===//
909// DAG Pattern Matching Rules
910//===----------------------------------------------------------------------===//
911
912// ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
913def : Pat<(i32 (X86Wrapper tconstpool  :$dst)), (MOV32ri tconstpool  :$dst)>;
914def : Pat<(i32 (X86Wrapper tjumptable  :$dst)), (MOV32ri tjumptable  :$dst)>;
915def : Pat<(i32 (X86Wrapper tglobaltlsaddr:$dst)),(MOV32ri tglobaltlsaddr:$dst)>;
916def : Pat<(i32 (X86Wrapper tglobaladdr :$dst)), (MOV32ri tglobaladdr :$dst)>;
917def : Pat<(i32 (X86Wrapper texternalsym:$dst)), (MOV32ri texternalsym:$dst)>;
918def : Pat<(i32 (X86Wrapper tblockaddress:$dst)), (MOV32ri tblockaddress:$dst)>;
919
920def : Pat<(add GR32:$src1, (X86Wrapper tconstpool:$src2)),
921          (ADD32ri GR32:$src1, tconstpool:$src2)>;
922def : Pat<(add GR32:$src1, (X86Wrapper tjumptable:$src2)),
923          (ADD32ri GR32:$src1, tjumptable:$src2)>;
924def : Pat<(add GR32:$src1, (X86Wrapper tglobaladdr :$src2)),
925          (ADD32ri GR32:$src1, tglobaladdr:$src2)>;
926def : Pat<(add GR32:$src1, (X86Wrapper texternalsym:$src2)),
927          (ADD32ri GR32:$src1, texternalsym:$src2)>;
928def : Pat<(add GR32:$src1, (X86Wrapper tblockaddress:$src2)),
929          (ADD32ri GR32:$src1, tblockaddress:$src2)>;
930
931def : Pat<(store (i32 (X86Wrapper tglobaladdr:$src)), addr:$dst),
932          (MOV32mi addr:$dst, tglobaladdr:$src)>;
933def : Pat<(store (i32 (X86Wrapper texternalsym:$src)), addr:$dst),
934          (MOV32mi addr:$dst, texternalsym:$src)>;
935def : Pat<(store (i32 (X86Wrapper tblockaddress:$src)), addr:$dst),
936          (MOV32mi addr:$dst, tblockaddress:$src)>;
937
938// ConstantPool GlobalAddress, ExternalSymbol, and JumpTable when not in small
939// code model mode, should use 'movabs'.  FIXME: This is really a hack, the
940//  'movabs' predicate should handle this sort of thing.
941def : Pat<(i64 (X86Wrapper tconstpool  :$dst)),
942          (MOV64ri tconstpool  :$dst)>, Requires<[FarData]>;
943def : Pat<(i64 (X86Wrapper tjumptable  :$dst)),
944          (MOV64ri tjumptable  :$dst)>, Requires<[FarData]>;
945def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
946          (MOV64ri tglobaladdr :$dst)>, Requires<[FarData]>;
947def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
948          (MOV64ri texternalsym:$dst)>, Requires<[FarData]>;
949def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
950          (MOV64ri tblockaddress:$dst)>, Requires<[FarData]>;
951
952// In kernel code model, we can get the address of a label
953// into a register with 'movq'.  FIXME: This is a hack, the 'imm' predicate of
954// the MOV64ri32 should accept these.
955def : Pat<(i64 (X86Wrapper tconstpool  :$dst)),
956          (MOV64ri32 tconstpool  :$dst)>, Requires<[KernelCode]>;
957def : Pat<(i64 (X86Wrapper tjumptable  :$dst)),
958          (MOV64ri32 tjumptable  :$dst)>, Requires<[KernelCode]>;
959def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
960          (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>;
961def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
962          (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>;
963def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
964          (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>;
965
966// If we have small model and -static mode, it is safe to store global addresses
967// directly as immediates.  FIXME: This is really a hack, the 'imm' predicate
968// for MOV64mi32 should handle this sort of thing.
969def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
970          (MOV64mi32 addr:$dst, tconstpool:$src)>,
971          Requires<[NearData, IsStatic]>;
972def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
973          (MOV64mi32 addr:$dst, tjumptable:$src)>,
974          Requires<[NearData, IsStatic]>;
975def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
976          (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
977          Requires<[NearData, IsStatic]>;
978def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
979          (MOV64mi32 addr:$dst, texternalsym:$src)>,
980          Requires<[NearData, IsStatic]>;
981def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
982          (MOV64mi32 addr:$dst, tblockaddress:$src)>,
983          Requires<[NearData, IsStatic]>;
984
985// Calls
986
987// tls has some funny stuff here...
988// This corresponds to movabs $foo@tpoff, %rax
989def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
990          (MOV64ri32 tglobaltlsaddr :$dst)>;
991// This corresponds to add $foo@tpoff, %rax
992def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)),
993          (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>;
994
995
996// Direct PC relative function call for small code model. 32-bit displacement
997// sign extended to 64-bit.
998def : Pat<(X86call (i64 tglobaladdr:$dst)),
999          (CALL64pcrel32 tglobaladdr:$dst)>;
1000def : Pat<(X86call (i64 texternalsym:$dst)),
1001          (CALL64pcrel32 texternalsym:$dst)>;
1002
1003// Tailcall stuff. The TCRETURN instructions execute after the epilog, so they
1004// can never use callee-saved registers. That is the purpose of the GR64_TC
1005// register classes.
1006//
1007// The only volatile register that is never used by the calling convention is
1008// %r11. This happens when calling a vararg function with 6 arguments.
1009//
1010// Match an X86tcret that uses less than 7 volatile registers.
1011def X86tcret_6regs : PatFrag<(ops node:$ptr, node:$off),
1012                             (X86tcret node:$ptr, node:$off), [{
1013  // X86tcret args: (*chain, ptr, imm, regs..., glue)
1014  unsigned NumRegs = 0;
1015  for (unsigned i = 3, e = N->getNumOperands(); i != e; ++i)
1016    if (isa<RegisterSDNode>(N->getOperand(i)) && ++NumRegs > 6)
1017      return false;
1018  return true;
1019}]>;
1020
1021def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
1022          (TCRETURNri ptr_rc_tailcall:$dst, imm:$off)>,
1023          Requires<[Not64BitMode]>;
1024
1025// FIXME: This is disabled for 32-bit PIC mode because the global base
1026// register which is part of the address mode may be assigned a
1027// callee-saved register.
1028def : Pat<(X86tcret (load addr:$dst), imm:$off),
1029          (TCRETURNmi addr:$dst, imm:$off)>,
1030          Requires<[Not64BitMode, IsNotPIC]>;
1031
1032def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off),
1033          (TCRETURNdi texternalsym:$dst, imm:$off)>,
1034          Requires<[Not64BitMode]>;
1035
1036def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off),
1037          (TCRETURNdi texternalsym:$dst, imm:$off)>,
1038          Requires<[Not64BitMode]>;
1039
1040def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
1041          (TCRETURNri64 ptr_rc_tailcall:$dst, imm:$off)>,
1042          Requires<[In64BitMode]>;
1043
1044// Don't fold loads into X86tcret requiring more than 6 regs.
1045// There wouldn't be enough scratch registers for base+index.
1046def : Pat<(X86tcret_6regs (load addr:$dst), imm:$off),
1047          (TCRETURNmi64 addr:$dst, imm:$off)>,
1048          Requires<[In64BitMode]>;
1049
1050def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
1051          (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>,
1052          Requires<[In64BitMode]>;
1053
1054def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
1055          (TCRETURNdi64 texternalsym:$dst, imm:$off)>,
1056          Requires<[In64BitMode]>;
1057
1058// Normal calls, with various flavors of addresses.
1059def : Pat<(X86call (i32 tglobaladdr:$dst)),
1060          (CALLpcrel32 tglobaladdr:$dst)>;
1061def : Pat<(X86call (i32 texternalsym:$dst)),
1062          (CALLpcrel32 texternalsym:$dst)>;
1063def : Pat<(X86call (i32 imm:$dst)),
1064          (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>;
1065
1066// Comparisons.
1067
1068// TEST R,R is smaller than CMP R,0
1069def : Pat<(X86cmp GR8:$src1, 0),
1070          (TEST8rr GR8:$src1, GR8:$src1)>;
1071def : Pat<(X86cmp GR16:$src1, 0),
1072          (TEST16rr GR16:$src1, GR16:$src1)>;
1073def : Pat<(X86cmp GR32:$src1, 0),
1074          (TEST32rr GR32:$src1, GR32:$src1)>;
1075def : Pat<(X86cmp GR64:$src1, 0),
1076          (TEST64rr GR64:$src1, GR64:$src1)>;
1077
1078// Conditional moves with folded loads with operands swapped and conditions
1079// inverted.
1080multiclass CMOVmr<PatLeaf InvertedCond, Instruction Inst16, Instruction Inst32,
1081                  Instruction Inst64> {
1082  let Predicates = [HasCMov] in {
1083    def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, InvertedCond, EFLAGS),
1084              (Inst16 GR16:$src2, addr:$src1)>;
1085    def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, InvertedCond, EFLAGS),
1086              (Inst32 GR32:$src2, addr:$src1)>;
1087    def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, InvertedCond, EFLAGS),
1088              (Inst64 GR64:$src2, addr:$src1)>;
1089  }
1090}
1091
1092defm : CMOVmr<X86_COND_B , CMOVAE16rm, CMOVAE32rm, CMOVAE64rm>;
1093defm : CMOVmr<X86_COND_AE, CMOVB16rm , CMOVB32rm , CMOVB64rm>;
1094defm : CMOVmr<X86_COND_E , CMOVNE16rm, CMOVNE32rm, CMOVNE64rm>;
1095defm : CMOVmr<X86_COND_NE, CMOVE16rm , CMOVE32rm , CMOVE64rm>;
1096defm : CMOVmr<X86_COND_BE, CMOVA16rm , CMOVA32rm , CMOVA64rm>;
1097defm : CMOVmr<X86_COND_A , CMOVBE16rm, CMOVBE32rm, CMOVBE64rm>;
1098defm : CMOVmr<X86_COND_L , CMOVGE16rm, CMOVGE32rm, CMOVGE64rm>;
1099defm : CMOVmr<X86_COND_GE, CMOVL16rm , CMOVL32rm , CMOVL64rm>;
1100defm : CMOVmr<X86_COND_LE, CMOVG16rm , CMOVG32rm , CMOVG64rm>;
1101defm : CMOVmr<X86_COND_G , CMOVLE16rm, CMOVLE32rm, CMOVLE64rm>;
1102defm : CMOVmr<X86_COND_P , CMOVNP16rm, CMOVNP32rm, CMOVNP64rm>;
1103defm : CMOVmr<X86_COND_NP, CMOVP16rm , CMOVP32rm , CMOVP64rm>;
1104defm : CMOVmr<X86_COND_S , CMOVNS16rm, CMOVNS32rm, CMOVNS64rm>;
1105defm : CMOVmr<X86_COND_NS, CMOVS16rm , CMOVS32rm , CMOVS64rm>;
1106defm : CMOVmr<X86_COND_O , CMOVNO16rm, CMOVNO32rm, CMOVNO64rm>;
1107defm : CMOVmr<X86_COND_NO, CMOVO16rm , CMOVO32rm , CMOVO64rm>;
1108
1109// zextload bool -> zextload byte
1110def : Pat<(zextloadi8i1  addr:$src), (MOV8rm     addr:$src)>;
1111def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
1112def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
1113def : Pat<(zextloadi64i1 addr:$src),
1114          (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1115
1116// extload bool -> extload byte
1117// When extloading from 16-bit and smaller memory locations into 64-bit
1118// registers, use zero-extending loads so that the entire 64-bit register is
1119// defined, avoiding partial-register updates.
1120
1121def : Pat<(extloadi8i1 addr:$src),   (MOV8rm      addr:$src)>;
1122def : Pat<(extloadi16i1 addr:$src),  (MOVZX16rm8  addr:$src)>;
1123def : Pat<(extloadi32i1 addr:$src),  (MOVZX32rm8  addr:$src)>;
1124def : Pat<(extloadi16i8 addr:$src),  (MOVZX16rm8  addr:$src)>;
1125def : Pat<(extloadi32i8 addr:$src),  (MOVZX32rm8  addr:$src)>;
1126def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
1127
1128// For other extloads, use subregs, since the high contents of the register are
1129// defined after an extload.
1130def : Pat<(extloadi64i1 addr:$src),
1131          (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1132def : Pat<(extloadi64i8 addr:$src),
1133          (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1134def : Pat<(extloadi64i16 addr:$src),
1135          (SUBREG_TO_REG (i64 0), (MOVZX32rm16 addr:$src), sub_32bit)>;
1136def : Pat<(extloadi64i32 addr:$src),
1137          (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), sub_32bit)>;
1138
1139// anyext. Define these to do an explicit zero-extend to
1140// avoid partial-register updates.
1141def : Pat<(i16 (anyext GR8 :$src)), (EXTRACT_SUBREG
1142                                     (MOVZX32rr8 GR8 :$src), sub_16bit)>;
1143def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8  GR8 :$src)>;
1144
1145// Except for i16 -> i32 since isel expect i16 ops to be promoted to i32.
1146def : Pat<(i32 (anyext GR16:$src)),
1147          (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>;
1148
1149def : Pat<(i64 (anyext GR8 :$src)),
1150          (SUBREG_TO_REG (i64 0), (MOVZX32rr8  GR8  :$src), sub_32bit)>;
1151def : Pat<(i64 (anyext GR16:$src)),
1152          (SUBREG_TO_REG (i64 0), (MOVZX32rr16 GR16 :$src), sub_32bit)>;
1153def : Pat<(i64 (anyext GR32:$src)),
1154          (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1155
1156
1157// Any instruction that defines a 32-bit result leaves the high half of the
1158// register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
1159// be copying from a truncate. And x86's cmov doesn't do anything if the
1160// condition is false. But any other 32-bit operation will zero-extend
1161// up to 64 bits.
1162def def32 : PatLeaf<(i32 GR32:$src), [{
1163  return N->getOpcode() != ISD::TRUNCATE &&
1164         N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
1165         N->getOpcode() != ISD::CopyFromReg &&
1166         N->getOpcode() != X86ISD::CMOV;
1167}]>;
1168
1169// In the case of a 32-bit def that is known to implicitly zero-extend,
1170// we can use a SUBREG_TO_REG.
1171def : Pat<(i64 (zext def32:$src)),
1172          (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1173
1174//===----------------------------------------------------------------------===//
1175// Pattern match OR as ADD
1176//===----------------------------------------------------------------------===//
1177
1178// If safe, we prefer to pattern match OR as ADD at isel time. ADD can be
1179// 3-addressified into an LEA instruction to avoid copies.  However, we also
1180// want to finally emit these instructions as an or at the end of the code
1181// generator to make the generated code easier to read.  To do this, we select
1182// into "disjoint bits" pseudo ops.
1183
1184// Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero.
1185def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
1186  if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
1187    return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
1188
1189  APInt KnownZero0, KnownOne0;
1190  CurDAG->computeKnownBits(N->getOperand(0), KnownZero0, KnownOne0, 0);
1191  APInt KnownZero1, KnownOne1;
1192  CurDAG->computeKnownBits(N->getOperand(1), KnownZero1, KnownOne1, 0);
1193  return (~KnownZero0 & ~KnownZero1) == 0;
1194}]>;
1195
1196
1197// (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
1198// Try this before the selecting to OR.
1199let AddedComplexity = 5, SchedRW = [WriteALU] in {
1200
1201let isConvertibleToThreeAddress = 1,
1202    Constraints = "$src1 = $dst", Defs = [EFLAGS] in {
1203let isCommutable = 1 in {
1204def ADD16rr_DB  : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1205                    "", // orw/addw REG, REG
1206                    [(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>;
1207def ADD32rr_DB  : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1208                    "", // orl/addl REG, REG
1209                    [(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>;
1210def ADD64rr_DB  : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1211                    "", // orq/addq REG, REG
1212                    [(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>;
1213} // isCommutable
1214
1215// NOTE: These are order specific, we want the ri8 forms to be listed
1216// first so that they are slightly preferred to the ri forms.
1217
1218def ADD16ri8_DB : I<0, Pseudo,
1219                    (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1220                    "", // orw/addw REG, imm8
1221                    [(set GR16:$dst,(or_is_add GR16:$src1,i16immSExt8:$src2))]>;
1222def ADD16ri_DB  : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
1223                    "", // orw/addw REG, imm
1224                    [(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>;
1225
1226def ADD32ri8_DB : I<0, Pseudo,
1227                    (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1228                    "", // orl/addl REG, imm8
1229                    [(set GR32:$dst,(or_is_add GR32:$src1,i32immSExt8:$src2))]>;
1230def ADD32ri_DB  : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
1231                    "", // orl/addl REG, imm
1232                    [(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>;
1233
1234
1235def ADD64ri8_DB : I<0, Pseudo,
1236                    (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
1237                    "", // orq/addq REG, imm8
1238                    [(set GR64:$dst, (or_is_add GR64:$src1,
1239                                                i64immSExt8:$src2))]>;
1240def ADD64ri32_DB : I<0, Pseudo,
1241                     (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
1242                      "", // orq/addq REG, imm
1243                      [(set GR64:$dst, (or_is_add GR64:$src1,
1244                                                  i64immSExt32:$src2))]>;
1245}
1246} // AddedComplexity, SchedRW
1247
1248
1249//===----------------------------------------------------------------------===//
1250// Some peepholes
1251//===----------------------------------------------------------------------===//
1252
1253// Odd encoding trick: -128 fits into an 8-bit immediate field while
1254// +128 doesn't, so in this special case use a sub instead of an add.
1255def : Pat<(add GR16:$src1, 128),
1256          (SUB16ri8 GR16:$src1, -128)>;
1257def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst),
1258          (SUB16mi8 addr:$dst, -128)>;
1259
1260def : Pat<(add GR32:$src1, 128),
1261          (SUB32ri8 GR32:$src1, -128)>;
1262def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst),
1263          (SUB32mi8 addr:$dst, -128)>;
1264
1265def : Pat<(add GR64:$src1, 128),
1266          (SUB64ri8 GR64:$src1, -128)>;
1267def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
1268          (SUB64mi8 addr:$dst, -128)>;
1269
1270// The same trick applies for 32-bit immediate fields in 64-bit
1271// instructions.
1272def : Pat<(add GR64:$src1, 0x0000000080000000),
1273          (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1274def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst),
1275          (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
1276
1277// To avoid needing to materialize an immediate in a register, use a 32-bit and
1278// with implicit zero-extension instead of a 64-bit and if the immediate has at
1279// least 32 bits of leading zeros. If in addition the last 32 bits can be
1280// represented with a sign extension of a 8 bit constant, use that.
1281
1282def : Pat<(and GR64:$src, i64immZExt32SExt8:$imm),
1283          (SUBREG_TO_REG
1284            (i64 0),
1285            (AND32ri8
1286              (EXTRACT_SUBREG GR64:$src, sub_32bit),
1287              (i32 (GetLo8XForm imm:$imm))),
1288            sub_32bit)>;
1289
1290def : Pat<(and GR64:$src, i64immZExt32:$imm),
1291          (SUBREG_TO_REG
1292            (i64 0),
1293            (AND32ri
1294              (EXTRACT_SUBREG GR64:$src, sub_32bit),
1295              (i32 (GetLo32XForm imm:$imm))),
1296            sub_32bit)>;
1297
1298
1299// r & (2^16-1) ==> movz
1300def : Pat<(and GR32:$src1, 0xffff),
1301          (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>;
1302// r & (2^8-1) ==> movz
1303def : Pat<(and GR32:$src1, 0xff),
1304          (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src1,
1305                                                             GR32_ABCD)),
1306                                      sub_8bit))>,
1307      Requires<[Not64BitMode]>;
1308// r & (2^8-1) ==> movz
1309def : Pat<(and GR16:$src1, 0xff),
1310           (EXTRACT_SUBREG (MOVZX32rr8 (EXTRACT_SUBREG
1311            (i16 (COPY_TO_REGCLASS GR16:$src1, GR16_ABCD)), sub_8bit)),
1312             sub_16bit)>,
1313      Requires<[Not64BitMode]>;
1314
1315// r & (2^32-1) ==> movz
1316def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
1317          (SUBREG_TO_REG (i64 0),
1318                         (MOV32rr (EXTRACT_SUBREG GR64:$src, sub_32bit)),
1319                         sub_32bit)>;
1320// r & (2^16-1) ==> movz
1321def : Pat<(and GR64:$src, 0xffff),
1322          (SUBREG_TO_REG (i64 0),
1323                      (MOVZX32rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit))),
1324                      sub_32bit)>;
1325// r & (2^8-1) ==> movz
1326def : Pat<(and GR64:$src, 0xff),
1327          (SUBREG_TO_REG (i64 0),
1328                         (MOVZX32rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit))),
1329                         sub_32bit)>;
1330// r & (2^8-1) ==> movz
1331def : Pat<(and GR32:$src1, 0xff),
1332           (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>,
1333      Requires<[In64BitMode]>;
1334// r & (2^8-1) ==> movz
1335def : Pat<(and GR16:$src1, 0xff),
1336           (EXTRACT_SUBREG (MOVZX32rr8 (i8
1337            (EXTRACT_SUBREG GR16:$src1, sub_8bit))), sub_16bit)>,
1338      Requires<[In64BitMode]>;
1339
1340
1341// sext_inreg patterns
1342def : Pat<(sext_inreg GR32:$src, i16),
1343          (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>;
1344def : Pat<(sext_inreg GR32:$src, i8),
1345          (MOVSX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1346                                                             GR32_ABCD)),
1347                                      sub_8bit))>,
1348      Requires<[Not64BitMode]>;
1349
1350def : Pat<(sext_inreg GR16:$src, i8),
1351           (EXTRACT_SUBREG (i32 (MOVSX32rr8 (EXTRACT_SUBREG
1352            (i32 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), sub_8bit))),
1353             sub_16bit)>,
1354      Requires<[Not64BitMode]>;
1355
1356def : Pat<(sext_inreg GR64:$src, i32),
1357          (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
1358def : Pat<(sext_inreg GR64:$src, i16),
1359          (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;
1360def : Pat<(sext_inreg GR64:$src, i8),
1361          (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;
1362def : Pat<(sext_inreg GR32:$src, i8),
1363          (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>,
1364      Requires<[In64BitMode]>;
1365def : Pat<(sext_inreg GR16:$src, i8),
1366           (EXTRACT_SUBREG (MOVSX32rr8
1367            (EXTRACT_SUBREG GR16:$src, sub_8bit)), sub_16bit)>,
1368      Requires<[In64BitMode]>;
1369
1370// sext, sext_load, zext, zext_load
1371def: Pat<(i16 (sext GR8:$src)),
1372          (EXTRACT_SUBREG (MOVSX32rr8 GR8:$src), sub_16bit)>;
1373def: Pat<(sextloadi16i8 addr:$src),
1374          (EXTRACT_SUBREG (MOVSX32rm8 addr:$src), sub_16bit)>;
1375def: Pat<(i16 (zext GR8:$src)),
1376          (EXTRACT_SUBREG (MOVZX32rr8 GR8:$src), sub_16bit)>;
1377def: Pat<(zextloadi16i8 addr:$src),
1378          (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1379
1380// trunc patterns
1381def : Pat<(i16 (trunc GR32:$src)),
1382          (EXTRACT_SUBREG GR32:$src, sub_16bit)>;
1383def : Pat<(i8 (trunc GR32:$src)),
1384          (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1385                          sub_8bit)>,
1386      Requires<[Not64BitMode]>;
1387def : Pat<(i8 (trunc GR16:$src)),
1388          (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1389                          sub_8bit)>,
1390      Requires<[Not64BitMode]>;
1391def : Pat<(i32 (trunc GR64:$src)),
1392          (EXTRACT_SUBREG GR64:$src, sub_32bit)>;
1393def : Pat<(i16 (trunc GR64:$src)),
1394          (EXTRACT_SUBREG GR64:$src, sub_16bit)>;
1395def : Pat<(i8 (trunc GR64:$src)),
1396          (EXTRACT_SUBREG GR64:$src, sub_8bit)>;
1397def : Pat<(i8 (trunc GR32:$src)),
1398          (EXTRACT_SUBREG GR32:$src, sub_8bit)>,
1399      Requires<[In64BitMode]>;
1400def : Pat<(i8 (trunc GR16:$src)),
1401          (EXTRACT_SUBREG GR16:$src, sub_8bit)>,
1402      Requires<[In64BitMode]>;
1403
1404// h-register tricks
1405def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
1406          (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1407                          sub_8bit_hi)>,
1408      Requires<[Not64BitMode]>;
1409def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
1410          (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1411                          sub_8bit_hi)>,
1412      Requires<[Not64BitMode]>;
1413def : Pat<(srl GR16:$src, (i8 8)),
1414          (EXTRACT_SUBREG
1415            (MOVZX32rr8
1416              (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1417                              sub_8bit_hi)),
1418            sub_16bit)>,
1419      Requires<[Not64BitMode]>;
1420def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1421          (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
1422                                                             GR16_ABCD)),
1423                                      sub_8bit_hi))>,
1424      Requires<[Not64BitMode]>;
1425def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
1426          (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
1427                                                             GR16_ABCD)),
1428                                      sub_8bit_hi))>,
1429      Requires<[Not64BitMode]>;
1430def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1431          (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1432                                                             GR32_ABCD)),
1433                                      sub_8bit_hi))>,
1434      Requires<[Not64BitMode]>;
1435def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
1436          (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1437                                                             GR32_ABCD)),
1438                                      sub_8bit_hi))>,
1439      Requires<[Not64BitMode]>;
1440
1441// h-register tricks.
1442// For now, be conservative on x86-64 and use an h-register extract only if the
1443// value is immediately zero-extended or stored, which are somewhat common
1444// cases. This uses a bunch of code to prevent a register requiring a REX prefix
1445// from being allocated in the same instruction as the h register, as there's
1446// currently no way to describe this requirement to the register allocator.
1447
1448// h-register extract and zero-extend.
1449def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
1450          (SUBREG_TO_REG
1451            (i64 0),
1452            (MOVZX32_NOREXrr8
1453              (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
1454                              sub_8bit_hi)),
1455            sub_32bit)>;
1456def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1457          (MOVZX32_NOREXrr8
1458            (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1459                            sub_8bit_hi))>,
1460      Requires<[In64BitMode]>;
1461def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
1462          (MOVZX32_NOREXrr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1463                                                                   GR32_ABCD)),
1464                                             sub_8bit_hi))>,
1465      Requires<[In64BitMode]>;
1466def : Pat<(srl GR16:$src, (i8 8)),
1467          (EXTRACT_SUBREG
1468            (MOVZX32_NOREXrr8
1469              (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1470                              sub_8bit_hi)),
1471            sub_16bit)>,
1472      Requires<[In64BitMode]>;
1473def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1474          (MOVZX32_NOREXrr8
1475            (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1476                            sub_8bit_hi))>,
1477      Requires<[In64BitMode]>;
1478def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
1479          (MOVZX32_NOREXrr8
1480            (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1481                            sub_8bit_hi))>,
1482      Requires<[In64BitMode]>;
1483def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
1484          (SUBREG_TO_REG
1485            (i64 0),
1486            (MOVZX32_NOREXrr8
1487              (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1488                              sub_8bit_hi)),
1489            sub_32bit)>;
1490def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
1491          (SUBREG_TO_REG
1492            (i64 0),
1493            (MOVZX32_NOREXrr8
1494              (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1495                              sub_8bit_hi)),
1496            sub_32bit)>;
1497
1498// h-register extract and store.
1499def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
1500          (MOV8mr_NOREX
1501            addr:$dst,
1502            (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
1503                            sub_8bit_hi))>;
1504def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
1505          (MOV8mr_NOREX
1506            addr:$dst,
1507            (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1508                            sub_8bit_hi))>,
1509      Requires<[In64BitMode]>;
1510def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
1511          (MOV8mr_NOREX
1512            addr:$dst,
1513            (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1514                            sub_8bit_hi))>,
1515      Requires<[In64BitMode]>;
1516
1517
1518// (shl x, 1) ==> (add x, x)
1519// Note that if x is undef (immediate or otherwise), we could theoretically
1520// end up with the two uses of x getting different values, producing a result
1521// where the least significant bit is not 0. However, the probability of this
1522// happening is considered low enough that this is officially not a
1523// "real problem".
1524def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr  GR8 :$src1, GR8 :$src1)>;
1525def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
1526def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
1527def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1528
1529// Helper imms that check if a mask doesn't change significant shift bits.
1530def immShift32 : ImmLeaf<i8, [{ return CountTrailingOnes_32(Imm) >= 5; }]>;
1531def immShift64 : ImmLeaf<i8, [{ return CountTrailingOnes_32(Imm) >= 6; }]>;
1532
1533// Shift amount is implicitly masked.
1534multiclass MaskedShiftAmountPats<SDNode frag, string name> {
1535  // (shift x (and y, 31)) ==> (shift x, y)
1536  def : Pat<(frag GR8:$src1, (and CL, immShift32)),
1537            (!cast<Instruction>(name # "8rCL") GR8:$src1)>;
1538  def : Pat<(frag GR16:$src1, (and CL, immShift32)),
1539            (!cast<Instruction>(name # "16rCL") GR16:$src1)>;
1540  def : Pat<(frag GR32:$src1, (and CL, immShift32)),
1541            (!cast<Instruction>(name # "32rCL") GR32:$src1)>;
1542  def : Pat<(store (frag (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst),
1543            (!cast<Instruction>(name # "8mCL") addr:$dst)>;
1544  def : Pat<(store (frag (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst),
1545            (!cast<Instruction>(name # "16mCL") addr:$dst)>;
1546  def : Pat<(store (frag (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst),
1547            (!cast<Instruction>(name # "32mCL") addr:$dst)>;
1548
1549  // (shift x (and y, 63)) ==> (shift x, y)
1550  def : Pat<(frag GR64:$src1, (and CL, immShift64)),
1551            (!cast<Instruction>(name # "64rCL") GR64:$src1)>;
1552  def : Pat<(store (frag (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
1553            (!cast<Instruction>(name # "64mCL") addr:$dst)>;
1554}
1555
1556defm : MaskedShiftAmountPats<shl, "SHL">;
1557defm : MaskedShiftAmountPats<srl, "SHR">;
1558defm : MaskedShiftAmountPats<sra, "SAR">;
1559defm : MaskedShiftAmountPats<rotl, "ROL">;
1560defm : MaskedShiftAmountPats<rotr, "ROR">;
1561
1562// (anyext (setcc_carry)) -> (setcc_carry)
1563def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1564          (SETB_C16r)>;
1565def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1566          (SETB_C32r)>;
1567def : Pat<(i32 (anyext (i16 (X86setcc_c X86_COND_B, EFLAGS)))),
1568          (SETB_C32r)>;
1569
1570
1571
1572
1573//===----------------------------------------------------------------------===//
1574// EFLAGS-defining Patterns
1575//===----------------------------------------------------------------------===//
1576
1577// add reg, reg
1578def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr  GR8 :$src1, GR8 :$src2)>;
1579def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>;
1580def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>;
1581
1582// add reg, mem
1583def : Pat<(add GR8:$src1, (loadi8 addr:$src2)),
1584          (ADD8rm GR8:$src1, addr:$src2)>;
1585def : Pat<(add GR16:$src1, (loadi16 addr:$src2)),
1586          (ADD16rm GR16:$src1, addr:$src2)>;
1587def : Pat<(add GR32:$src1, (loadi32 addr:$src2)),
1588          (ADD32rm GR32:$src1, addr:$src2)>;
1589
1590// add reg, imm
1591def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri  GR8:$src1 , imm:$src2)>;
1592def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>;
1593def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>;
1594def : Pat<(add GR16:$src1, i16immSExt8:$src2),
1595          (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>;
1596def : Pat<(add GR32:$src1, i32immSExt8:$src2),
1597          (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
1598
1599// sub reg, reg
1600def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr  GR8 :$src1, GR8 :$src2)>;
1601def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>;
1602def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>;
1603
1604// sub reg, mem
1605def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)),
1606          (SUB8rm GR8:$src1, addr:$src2)>;
1607def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)),
1608          (SUB16rm GR16:$src1, addr:$src2)>;
1609def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)),
1610          (SUB32rm GR32:$src1, addr:$src2)>;
1611
1612// sub reg, imm
1613def : Pat<(sub GR8:$src1, imm:$src2),
1614          (SUB8ri GR8:$src1, imm:$src2)>;
1615def : Pat<(sub GR16:$src1, imm:$src2),
1616          (SUB16ri GR16:$src1, imm:$src2)>;
1617def : Pat<(sub GR32:$src1, imm:$src2),
1618          (SUB32ri GR32:$src1, imm:$src2)>;
1619def : Pat<(sub GR16:$src1, i16immSExt8:$src2),
1620          (SUB16ri8 GR16:$src1, i16immSExt8:$src2)>;
1621def : Pat<(sub GR32:$src1, i32immSExt8:$src2),
1622          (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
1623
1624// sub 0, reg
1625def : Pat<(X86sub_flag 0, GR8 :$src), (NEG8r  GR8 :$src)>;
1626def : Pat<(X86sub_flag 0, GR16:$src), (NEG16r GR16:$src)>;
1627def : Pat<(X86sub_flag 0, GR32:$src), (NEG32r GR32:$src)>;
1628def : Pat<(X86sub_flag 0, GR64:$src), (NEG64r GR64:$src)>;
1629
1630// mul reg, reg
1631def : Pat<(mul GR16:$src1, GR16:$src2),
1632          (IMUL16rr GR16:$src1, GR16:$src2)>;
1633def : Pat<(mul GR32:$src1, GR32:$src2),
1634          (IMUL32rr GR32:$src1, GR32:$src2)>;
1635
1636// mul reg, mem
1637def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)),
1638          (IMUL16rm GR16:$src1, addr:$src2)>;
1639def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)),
1640          (IMUL32rm GR32:$src1, addr:$src2)>;
1641
1642// mul reg, imm
1643def : Pat<(mul GR16:$src1, imm:$src2),
1644          (IMUL16rri GR16:$src1, imm:$src2)>;
1645def : Pat<(mul GR32:$src1, imm:$src2),
1646          (IMUL32rri GR32:$src1, imm:$src2)>;
1647def : Pat<(mul GR16:$src1, i16immSExt8:$src2),
1648          (IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>;
1649def : Pat<(mul GR32:$src1, i32immSExt8:$src2),
1650          (IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>;
1651
1652// reg = mul mem, imm
1653def : Pat<(mul (loadi16 addr:$src1), imm:$src2),
1654          (IMUL16rmi addr:$src1, imm:$src2)>;
1655def : Pat<(mul (loadi32 addr:$src1), imm:$src2),
1656          (IMUL32rmi addr:$src1, imm:$src2)>;
1657def : Pat<(mul (loadi16 addr:$src1), i16immSExt8:$src2),
1658          (IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>;
1659def : Pat<(mul (loadi32 addr:$src1), i32immSExt8:$src2),
1660          (IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>;
1661
1662// Patterns for nodes that do not produce flags, for instructions that do.
1663
1664// addition
1665def : Pat<(add GR64:$src1, GR64:$src2),
1666          (ADD64rr GR64:$src1, GR64:$src2)>;
1667def : Pat<(add GR64:$src1, i64immSExt8:$src2),
1668          (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1669def : Pat<(add GR64:$src1, i64immSExt32:$src2),
1670          (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
1671def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
1672          (ADD64rm GR64:$src1, addr:$src2)>;
1673
1674// subtraction
1675def : Pat<(sub GR64:$src1, GR64:$src2),
1676          (SUB64rr GR64:$src1, GR64:$src2)>;
1677def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),
1678          (SUB64rm GR64:$src1, addr:$src2)>;
1679def : Pat<(sub GR64:$src1, i64immSExt8:$src2),
1680          (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
1681def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
1682          (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
1683
1684// Multiply
1685def : Pat<(mul GR64:$src1, GR64:$src2),
1686          (IMUL64rr GR64:$src1, GR64:$src2)>;
1687def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),
1688          (IMUL64rm GR64:$src1, addr:$src2)>;
1689def : Pat<(mul GR64:$src1, i64immSExt8:$src2),
1690          (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
1691def : Pat<(mul GR64:$src1, i64immSExt32:$src2),
1692          (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
1693def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2),
1694          (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
1695def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
1696          (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
1697
1698// Increment reg.
1699def : Pat<(add GR8 :$src, 1), (INC8r     GR8 :$src)>;
1700def : Pat<(add GR16:$src, 1), (INC16r    GR16:$src)>, Requires<[Not64BitMode]>;
1701def : Pat<(add GR16:$src, 1), (INC64_16r GR16:$src)>, Requires<[In64BitMode]>;
1702def : Pat<(add GR32:$src, 1), (INC32r    GR32:$src)>, Requires<[Not64BitMode]>;
1703def : Pat<(add GR32:$src, 1), (INC64_32r GR32:$src)>, Requires<[In64BitMode]>;
1704def : Pat<(add GR64:$src, 1), (INC64r    GR64:$src)>;
1705
1706// Decrement reg.
1707def : Pat<(add GR8 :$src, -1), (DEC8r     GR8 :$src)>;
1708def : Pat<(add GR16:$src, -1), (DEC16r    GR16:$src)>, Requires<[Not64BitMode]>;
1709def : Pat<(add GR16:$src, -1), (DEC64_16r GR16:$src)>, Requires<[In64BitMode]>;
1710def : Pat<(add GR32:$src, -1), (DEC32r    GR32:$src)>, Requires<[Not64BitMode]>;
1711def : Pat<(add GR32:$src, -1), (DEC64_32r GR32:$src)>, Requires<[In64BitMode]>;
1712def : Pat<(add GR64:$src, -1), (DEC64r    GR64:$src)>;
1713
1714// or reg/reg.
1715def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr  GR8 :$src1, GR8 :$src2)>;
1716def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>;
1717def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>;
1718def : Pat<(or GR64:$src1, GR64:$src2), (OR64rr GR64:$src1, GR64:$src2)>;
1719
1720// or reg/mem
1721def : Pat<(or GR8:$src1, (loadi8 addr:$src2)),
1722          (OR8rm GR8:$src1, addr:$src2)>;
1723def : Pat<(or GR16:$src1, (loadi16 addr:$src2)),
1724          (OR16rm GR16:$src1, addr:$src2)>;
1725def : Pat<(or GR32:$src1, (loadi32 addr:$src2)),
1726          (OR32rm GR32:$src1, addr:$src2)>;
1727def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),
1728          (OR64rm GR64:$src1, addr:$src2)>;
1729
1730// or reg/imm
1731def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri  GR8 :$src1, imm:$src2)>;
1732def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>;
1733def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>;
1734def : Pat<(or GR16:$src1, i16immSExt8:$src2),
1735          (OR16ri8 GR16:$src1, i16immSExt8:$src2)>;
1736def : Pat<(or GR32:$src1, i32immSExt8:$src2),
1737          (OR32ri8 GR32:$src1, i32immSExt8:$src2)>;
1738def : Pat<(or GR64:$src1, i64immSExt8:$src2),
1739          (OR64ri8 GR64:$src1, i64immSExt8:$src2)>;
1740def : Pat<(or GR64:$src1, i64immSExt32:$src2),
1741          (OR64ri32 GR64:$src1, i64immSExt32:$src2)>;
1742
1743// xor reg/reg
1744def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr  GR8 :$src1, GR8 :$src2)>;
1745def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>;
1746def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>;
1747def : Pat<(xor GR64:$src1, GR64:$src2), (XOR64rr GR64:$src1, GR64:$src2)>;
1748
1749// xor reg/mem
1750def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)),
1751          (XOR8rm GR8:$src1, addr:$src2)>;
1752def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)),
1753          (XOR16rm GR16:$src1, addr:$src2)>;
1754def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)),
1755          (XOR32rm GR32:$src1, addr:$src2)>;
1756def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)),
1757          (XOR64rm GR64:$src1, addr:$src2)>;
1758
1759// xor reg/imm
1760def : Pat<(xor GR8:$src1, imm:$src2),
1761          (XOR8ri GR8:$src1, imm:$src2)>;
1762def : Pat<(xor GR16:$src1, imm:$src2),
1763          (XOR16ri GR16:$src1, imm:$src2)>;
1764def : Pat<(xor GR32:$src1, imm:$src2),
1765          (XOR32ri GR32:$src1, imm:$src2)>;
1766def : Pat<(xor GR16:$src1, i16immSExt8:$src2),
1767          (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>;
1768def : Pat<(xor GR32:$src1, i32immSExt8:$src2),
1769          (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>;
1770def : Pat<(xor GR64:$src1, i64immSExt8:$src2),
1771          (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>;
1772def : Pat<(xor GR64:$src1, i64immSExt32:$src2),
1773          (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;
1774
1775// and reg/reg
1776def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr  GR8 :$src1, GR8 :$src2)>;
1777def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>;
1778def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>;
1779def : Pat<(and GR64:$src1, GR64:$src2), (AND64rr GR64:$src1, GR64:$src2)>;
1780
1781// and reg/mem
1782def : Pat<(and GR8:$src1, (loadi8 addr:$src2)),
1783          (AND8rm GR8:$src1, addr:$src2)>;
1784def : Pat<(and GR16:$src1, (loadi16 addr:$src2)),
1785          (AND16rm GR16:$src1, addr:$src2)>;
1786def : Pat<(and GR32:$src1, (loadi32 addr:$src2)),
1787          (AND32rm GR32:$src1, addr:$src2)>;
1788def : Pat<(and GR64:$src1, (loadi64 addr:$src2)),
1789          (AND64rm GR64:$src1, addr:$src2)>;
1790
1791// and reg/imm
1792def : Pat<(and GR8:$src1, imm:$src2),
1793          (AND8ri GR8:$src1, imm:$src2)>;
1794def : Pat<(and GR16:$src1, imm:$src2),
1795          (AND16ri GR16:$src1, imm:$src2)>;
1796def : Pat<(and GR32:$src1, imm:$src2),
1797          (AND32ri GR32:$src1, imm:$src2)>;
1798def : Pat<(and GR16:$src1, i16immSExt8:$src2),
1799          (AND16ri8 GR16:$src1, i16immSExt8:$src2)>;
1800def : Pat<(and GR32:$src1, i32immSExt8:$src2),
1801          (AND32ri8 GR32:$src1, i32immSExt8:$src2)>;
1802def : Pat<(and GR64:$src1, i64immSExt8:$src2),
1803          (AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
1804def : Pat<(and GR64:$src1, i64immSExt32:$src2),
1805          (AND64ri32 GR64:$src1, i64immSExt32:$src2)>;
1806
1807// Bit scan instruction patterns to match explicit zero-undef behavior.
1808def : Pat<(cttz_zero_undef GR16:$src), (BSF16rr GR16:$src)>;
1809def : Pat<(cttz_zero_undef GR32:$src), (BSF32rr GR32:$src)>;
1810def : Pat<(cttz_zero_undef GR64:$src), (BSF64rr GR64:$src)>;
1811def : Pat<(cttz_zero_undef (loadi16 addr:$src)), (BSF16rm addr:$src)>;
1812def : Pat<(cttz_zero_undef (loadi32 addr:$src)), (BSF32rm addr:$src)>;
1813def : Pat<(cttz_zero_undef (loadi64 addr:$src)), (BSF64rm addr:$src)>;
1814
1815// When HasMOVBE is enabled it is possible to get a non-legalized
1816// register-register 16 bit bswap. This maps it to a ROL instruction.
1817let Predicates = [HasMOVBE] in {
1818 def : Pat<(bswap GR16:$src), (ROL16ri GR16:$src, (i8 8))>;
1819}
1820