1/*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef MacroAssemblerX86Common_h
27#define MacroAssemblerX86Common_h
28
29#include <wtf/Platform.h>
30
31#if ENABLE(ASSEMBLER)
32
33#include "X86Assembler.h"
34#include "AbstractMacroAssembler.h"
35
36namespace JSC {
37
38class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
39    static const int DoubleConditionBitInvert = 0x10;
40    static const int DoubleConditionBitSpecial = 0x20;
41    static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
42
43public:
44
45    enum Condition {
46        Equal = X86Assembler::ConditionE,
47        NotEqual = X86Assembler::ConditionNE,
48        Above = X86Assembler::ConditionA,
49        AboveOrEqual = X86Assembler::ConditionAE,
50        Below = X86Assembler::ConditionB,
51        BelowOrEqual = X86Assembler::ConditionBE,
52        GreaterThan = X86Assembler::ConditionG,
53        GreaterThanOrEqual = X86Assembler::ConditionGE,
54        LessThan = X86Assembler::ConditionL,
55        LessThanOrEqual = X86Assembler::ConditionLE,
56        Overflow = X86Assembler::ConditionO,
57        Signed = X86Assembler::ConditionS,
58        Zero = X86Assembler::ConditionE,
59        NonZero = X86Assembler::ConditionNE
60    };
61
62    enum DoubleCondition {
63        // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
64        DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial,
65        DoubleNotEqual = X86Assembler::ConditionNE,
66        DoubleGreaterThan = X86Assembler::ConditionA,
67        DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
68        DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert,
69        DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert,
70        // If either operand is NaN, these conditions always evaluate to true.
71        DoubleEqualOrUnordered = X86Assembler::ConditionE,
72        DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial,
73        DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert,
74        DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert,
75        DoubleLessThanOrUnordered = X86Assembler::ConditionB,
76        DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE,
77    };
78    COMPILE_ASSERT(
79        !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits),
80        DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes);
81
82    static const RegisterID stackPointerRegister = X86Registers::esp;
83
84    // Integer arithmetic operations:
85    //
86    // Operations are typically two operand - operation(source, srcDst)
87    // For many operations the source may be an Imm32, the srcDst operand
88    // may often be a memory location (explictly described using an Address
89    // object).
90
91    void add32(RegisterID src, RegisterID dest)
92    {
93        m_assembler.addl_rr(src, dest);
94    }
95
96    void add32(Imm32 imm, Address address)
97    {
98        m_assembler.addl_im(imm.m_value, address.offset, address.base);
99    }
100
101    void add32(Imm32 imm, RegisterID dest)
102    {
103        m_assembler.addl_ir(imm.m_value, dest);
104    }
105
106    void add32(Address src, RegisterID dest)
107    {
108        m_assembler.addl_mr(src.offset, src.base, dest);
109    }
110
111    void add32(RegisterID src, Address dest)
112    {
113        m_assembler.addl_rm(src, dest.offset, dest.base);
114    }
115
116    void and32(RegisterID src, RegisterID dest)
117    {
118        m_assembler.andl_rr(src, dest);
119    }
120
121    void and32(Imm32 imm, RegisterID dest)
122    {
123        m_assembler.andl_ir(imm.m_value, dest);
124    }
125
126    void and32(RegisterID src, Address dest)
127    {
128        m_assembler.andl_rm(src, dest.offset, dest.base);
129    }
130
131    void and32(Address src, RegisterID dest)
132    {
133        m_assembler.andl_mr(src.offset, src.base, dest);
134    }
135
136    void and32(Imm32 imm, Address address)
137    {
138        m_assembler.andl_im(imm.m_value, address.offset, address.base);
139    }
140
141    void lshift32(Imm32 imm, RegisterID dest)
142    {
143        m_assembler.shll_i8r(imm.m_value, dest);
144    }
145
146    void lshift32(RegisterID shift_amount, RegisterID dest)
147    {
148        // On x86 we can only shift by ecx; if asked to shift by another register we'll
149        // need rejig the shift amount into ecx first, and restore the registers afterwards.
150        if (shift_amount != X86Registers::ecx) {
151            swap(shift_amount, X86Registers::ecx);
152
153            // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
154            if (dest == shift_amount)
155                m_assembler.shll_CLr(X86Registers::ecx);
156            // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
157            else if (dest == X86Registers::ecx)
158                m_assembler.shll_CLr(shift_amount);
159            // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
160            else
161                m_assembler.shll_CLr(dest);
162
163            swap(shift_amount, X86Registers::ecx);
164        } else
165            m_assembler.shll_CLr(dest);
166    }
167
168    void mul32(RegisterID src, RegisterID dest)
169    {
170        m_assembler.imull_rr(src, dest);
171    }
172
173    void mul32(Address src, RegisterID dest)
174    {
175        m_assembler.imull_mr(src.offset, src.base, dest);
176    }
177
178    void mul32(Imm32 imm, RegisterID src, RegisterID dest)
179    {
180        m_assembler.imull_i32r(src, imm.m_value, dest);
181    }
182
183    void neg32(RegisterID srcDest)
184    {
185        m_assembler.negl_r(srcDest);
186    }
187
188    void neg32(Address srcDest)
189    {
190        m_assembler.negl_m(srcDest.offset, srcDest.base);
191    }
192
193    void not32(RegisterID srcDest)
194    {
195        m_assembler.notl_r(srcDest);
196    }
197
198    void not32(Address srcDest)
199    {
200        m_assembler.notl_m(srcDest.offset, srcDest.base);
201    }
202
203    void or32(RegisterID src, RegisterID dest)
204    {
205        m_assembler.orl_rr(src, dest);
206    }
207
208    void or32(Imm32 imm, RegisterID dest)
209    {
210        m_assembler.orl_ir(imm.m_value, dest);
211    }
212
213    void or32(RegisterID src, Address dest)
214    {
215        m_assembler.orl_rm(src, dest.offset, dest.base);
216    }
217
218    void or32(Address src, RegisterID dest)
219    {
220        m_assembler.orl_mr(src.offset, src.base, dest);
221    }
222
223    void or32(Imm32 imm, Address address)
224    {
225        m_assembler.orl_im(imm.m_value, address.offset, address.base);
226    }
227
228    void rshift32(RegisterID shift_amount, RegisterID dest)
229    {
230        // On x86 we can only shift by ecx; if asked to shift by another register we'll
231        // need rejig the shift amount into ecx first, and restore the registers afterwards.
232        if (shift_amount != X86Registers::ecx) {
233            swap(shift_amount, X86Registers::ecx);
234
235            // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
236            if (dest == shift_amount)
237                m_assembler.sarl_CLr(X86Registers::ecx);
238            // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
239            else if (dest == X86Registers::ecx)
240                m_assembler.sarl_CLr(shift_amount);
241            // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
242            else
243                m_assembler.sarl_CLr(dest);
244
245            swap(shift_amount, X86Registers::ecx);
246        } else
247            m_assembler.sarl_CLr(dest);
248    }
249
250    void rshift32(Imm32 imm, RegisterID dest)
251    {
252        m_assembler.sarl_i8r(imm.m_value, dest);
253    }
254
255    void sub32(RegisterID src, RegisterID dest)
256    {
257        m_assembler.subl_rr(src, dest);
258    }
259
260    void sub32(Imm32 imm, RegisterID dest)
261    {
262        m_assembler.subl_ir(imm.m_value, dest);
263    }
264
265    void sub32(Imm32 imm, Address address)
266    {
267        m_assembler.subl_im(imm.m_value, address.offset, address.base);
268    }
269
270    void sub32(Address src, RegisterID dest)
271    {
272        m_assembler.subl_mr(src.offset, src.base, dest);
273    }
274
275    void sub32(RegisterID src, Address dest)
276    {
277        m_assembler.subl_rm(src, dest.offset, dest.base);
278    }
279
280
281    void xor32(RegisterID src, RegisterID dest)
282    {
283        m_assembler.xorl_rr(src, dest);
284    }
285
286    void xor32(Imm32 imm, Address dest)
287    {
288        m_assembler.xorl_im(imm.m_value, dest.offset, dest.base);
289    }
290
291    void xor32(Imm32 imm, RegisterID dest)
292    {
293        m_assembler.xorl_ir(imm.m_value, dest);
294    }
295
296    void xor32(RegisterID src, Address dest)
297    {
298        m_assembler.xorl_rm(src, dest.offset, dest.base);
299    }
300
301    void xor32(Address src, RegisterID dest)
302    {
303        m_assembler.xorl_mr(src.offset, src.base, dest);
304    }
305
306
307    // Memory access operations:
308    //
309    // Loads are of the form load(address, destination) and stores of the form
310    // store(source, address).  The source for a store may be an Imm32.  Address
311    // operand objects to loads and store will be implicitly constructed if a
312    // register is passed.
313
314    void load32(ImplicitAddress address, RegisterID dest)
315    {
316        m_assembler.movl_mr(address.offset, address.base, dest);
317    }
318
319    void load32(BaseIndex address, RegisterID dest)
320    {
321        m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
322    }
323
324    void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
325    {
326        load32(address, dest);
327    }
328
329    DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
330    {
331        m_assembler.movl_mr_disp32(address.offset, address.base, dest);
332        return DataLabel32(this);
333    }
334
335    void load16(BaseIndex address, RegisterID dest)
336    {
337        m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
338    }
339
340    DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
341    {
342        m_assembler.movl_rm_disp32(src, address.offset, address.base);
343        return DataLabel32(this);
344    }
345
346    void store32(RegisterID src, ImplicitAddress address)
347    {
348        m_assembler.movl_rm(src, address.offset, address.base);
349    }
350
351    void store32(RegisterID src, BaseIndex address)
352    {
353        m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
354    }
355
356    void store32(Imm32 imm, ImplicitAddress address)
357    {
358        m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
359    }
360
361
362    // Floating-point operation:
363    //
364    // Presently only supports SSE, not x87 floating point.
365
366    void loadDouble(ImplicitAddress address, FPRegisterID dest)
367    {
368        ASSERT(isSSE2Present());
369        m_assembler.movsd_mr(address.offset, address.base, dest);
370    }
371
372    void storeDouble(FPRegisterID src, ImplicitAddress address)
373    {
374        ASSERT(isSSE2Present());
375        m_assembler.movsd_rm(src, address.offset, address.base);
376    }
377
378    void addDouble(FPRegisterID src, FPRegisterID dest)
379    {
380        ASSERT(isSSE2Present());
381        m_assembler.addsd_rr(src, dest);
382    }
383
384    void addDouble(Address src, FPRegisterID dest)
385    {
386        ASSERT(isSSE2Present());
387        m_assembler.addsd_mr(src.offset, src.base, dest);
388    }
389
390    void divDouble(FPRegisterID src, FPRegisterID dest)
391    {
392        ASSERT(isSSE2Present());
393        m_assembler.divsd_rr(src, dest);
394    }
395
396    void divDouble(Address src, FPRegisterID dest)
397    {
398        ASSERT(isSSE2Present());
399        m_assembler.divsd_mr(src.offset, src.base, dest);
400    }
401
402    void subDouble(FPRegisterID src, FPRegisterID dest)
403    {
404        ASSERT(isSSE2Present());
405        m_assembler.subsd_rr(src, dest);
406    }
407
408    void subDouble(Address src, FPRegisterID dest)
409    {
410        ASSERT(isSSE2Present());
411        m_assembler.subsd_mr(src.offset, src.base, dest);
412    }
413
414    void mulDouble(FPRegisterID src, FPRegisterID dest)
415    {
416        ASSERT(isSSE2Present());
417        m_assembler.mulsd_rr(src, dest);
418    }
419
420    void mulDouble(Address src, FPRegisterID dest)
421    {
422        ASSERT(isSSE2Present());
423        m_assembler.mulsd_mr(src.offset, src.base, dest);
424    }
425
426    void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
427    {
428        ASSERT(isSSE2Present());
429        m_assembler.cvtsi2sd_rr(src, dest);
430    }
431
432    void convertInt32ToDouble(Address src, FPRegisterID dest)
433    {
434        ASSERT(isSSE2Present());
435        m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
436    }
437
438    Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
439    {
440        ASSERT(isSSE2Present());
441
442        if (cond & DoubleConditionBitInvert)
443            m_assembler.ucomisd_rr(left, right);
444        else
445            m_assembler.ucomisd_rr(right, left);
446
447        if (cond == DoubleEqual) {
448            Jump isUnordered(m_assembler.jp());
449            Jump result = Jump(m_assembler.je());
450            isUnordered.link(this);
451            return result;
452        } else if (cond == DoubleNotEqualOrUnordered) {
453            Jump isUnordered(m_assembler.jp());
454            Jump isEqual(m_assembler.je());
455            isUnordered.link(this);
456            Jump result = jump();
457            isEqual.link(this);
458            return result;
459        }
460
461        ASSERT(!(cond & DoubleConditionBitSpecial));
462        return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits)));
463    }
464
465    // Truncates 'src' to an integer, and places the resulting 'dest'.
466    // If the result is not representable as a 32 bit value, branch.
467    // May also branch for some values that are representable in 32 bits
468    // (specifically, in this case, INT_MIN).
469    Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
470    {
471        ASSERT(isSSE2Present());
472        m_assembler.cvttsd2si_rr(src, dest);
473        return branch32(Equal, dest, Imm32(0x80000000));
474    }
475
476    // Convert 'src' to an integer, and places the resulting 'dest'.
477    // If the result is not representable as a 32 bit value, branch.
478    // May also branch for some values that are representable in 32 bits
479    // (specifically, in this case, 0).
480    void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
481    {
482        ASSERT(isSSE2Present());
483        m_assembler.cvttsd2si_rr(src, dest);
484
485        // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
486        failureCases.append(branchTest32(Zero, dest));
487
488        // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
489        convertInt32ToDouble(dest, fpTemp);
490        m_assembler.ucomisd_rr(fpTemp, src);
491        failureCases.append(m_assembler.jp());
492        failureCases.append(m_assembler.jne());
493    }
494
495    void zeroDouble(FPRegisterID srcDest)
496    {
497        ASSERT(isSSE2Present());
498        m_assembler.xorpd_rr(srcDest, srcDest);
499    }
500
501
502    // Stack manipulation operations:
503    //
504    // The ABI is assumed to provide a stack abstraction to memory,
505    // containing machine word sized units of data.  Push and pop
506    // operations add and remove a single register sized unit of data
507    // to or from the stack.  Peek and poke operations read or write
508    // values on the stack, without moving the current stack position.
509
510    void pop(RegisterID dest)
511    {
512        m_assembler.pop_r(dest);
513    }
514
515    void push(RegisterID src)
516    {
517        m_assembler.push_r(src);
518    }
519
520    void push(Address address)
521    {
522        m_assembler.push_m(address.offset, address.base);
523    }
524
525    void push(Imm32 imm)
526    {
527        m_assembler.push_i32(imm.m_value);
528    }
529
530
531    // Register move operations:
532    //
533    // Move values in registers.
534
535    void move(Imm32 imm, RegisterID dest)
536    {
537        // Note: on 64-bit the Imm32 value is zero extended into the register, it
538        // may be useful to have a separate version that sign extends the value?
539        if (!imm.m_value)
540            m_assembler.xorl_rr(dest, dest);
541        else
542            m_assembler.movl_i32r(imm.m_value, dest);
543    }
544
545#if CPU(X86_64)
546    void move(RegisterID src, RegisterID dest)
547    {
548        // Note: on 64-bit this is is a full register move; perhaps it would be
549        // useful to have separate move32 & movePtr, with move32 zero extending?
550        if (src != dest)
551            m_assembler.movq_rr(src, dest);
552    }
553
554    void move(ImmPtr imm, RegisterID dest)
555    {
556        m_assembler.movq_i64r(imm.asIntptr(), dest);
557    }
558
559    void swap(RegisterID reg1, RegisterID reg2)
560    {
561        if (reg1 != reg2)
562            m_assembler.xchgq_rr(reg1, reg2);
563    }
564
565    void signExtend32ToPtr(RegisterID src, RegisterID dest)
566    {
567        m_assembler.movsxd_rr(src, dest);
568    }
569
570    void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
571    {
572        m_assembler.movl_rr(src, dest);
573    }
574#else
575    void move(RegisterID src, RegisterID dest)
576    {
577        if (src != dest)
578            m_assembler.movl_rr(src, dest);
579    }
580
581    void move(ImmPtr imm, RegisterID dest)
582    {
583        m_assembler.movl_i32r(imm.asIntptr(), dest);
584    }
585
586    void swap(RegisterID reg1, RegisterID reg2)
587    {
588        if (reg1 != reg2)
589            m_assembler.xchgl_rr(reg1, reg2);
590    }
591
592    void signExtend32ToPtr(RegisterID src, RegisterID dest)
593    {
594        move(src, dest);
595    }
596
597    void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
598    {
599        move(src, dest);
600    }
601#endif
602
603
604    // Forwards / external control flow operations:
605    //
606    // This set of jump and conditional branch operations return a Jump
607    // object which may linked at a later point, allow forwards jump,
608    // or jumps that will require external linkage (after the code has been
609    // relocated).
610    //
611    // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
612    // respecitvely, for unsigned comparisons the names b, a, be, and ae are
613    // used (representing the names 'below' and 'above').
614    //
615    // Operands to the comparision are provided in the expected order, e.g.
616    // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
617    // treated as a signed 32bit value, is less than or equal to 5.
618    //
619    // jz and jnz test whether the first operand is equal to zero, and take
620    // an optional second operand of a mask under which to perform the test.
621
622public:
623    Jump branch32(Condition cond, RegisterID left, RegisterID right)
624    {
625        m_assembler.cmpl_rr(right, left);
626        return Jump(m_assembler.jCC(x86Condition(cond)));
627    }
628
629    Jump branch32(Condition cond, RegisterID left, Imm32 right)
630    {
631        if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
632            m_assembler.testl_rr(left, left);
633        else
634            m_assembler.cmpl_ir(right.m_value, left);
635        return Jump(m_assembler.jCC(x86Condition(cond)));
636    }
637
638    Jump branch32(Condition cond, RegisterID left, Address right)
639    {
640        m_assembler.cmpl_mr(right.offset, right.base, left);
641        return Jump(m_assembler.jCC(x86Condition(cond)));
642    }
643
644    Jump branch32(Condition cond, Address left, RegisterID right)
645    {
646        m_assembler.cmpl_rm(right, left.offset, left.base);
647        return Jump(m_assembler.jCC(x86Condition(cond)));
648    }
649
650    Jump branch32(Condition cond, Address left, Imm32 right)
651    {
652        m_assembler.cmpl_im(right.m_value, left.offset, left.base);
653        return Jump(m_assembler.jCC(x86Condition(cond)));
654    }
655
656    Jump branch32(Condition cond, BaseIndex left, Imm32 right)
657    {
658        m_assembler.cmpl_im(right.m_value, left.offset, left.base, left.index, left.scale);
659        return Jump(m_assembler.jCC(x86Condition(cond)));
660    }
661
662    Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, Imm32 right)
663    {
664        return branch32(cond, left, right);
665    }
666
667    Jump branch16(Condition cond, BaseIndex left, RegisterID right)
668    {
669        m_assembler.cmpw_rm(right, left.offset, left.base, left.index, left.scale);
670        return Jump(m_assembler.jCC(x86Condition(cond)));
671    }
672
673    Jump branch16(Condition cond, BaseIndex left, Imm32 right)
674    {
675        ASSERT(!(right.m_value & 0xFFFF0000));
676
677        m_assembler.cmpw_im(right.m_value, left.offset, left.base, left.index, left.scale);
678        return Jump(m_assembler.jCC(x86Condition(cond)));
679    }
680
681    Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
682    {
683        ASSERT((cond == Zero) || (cond == NonZero));
684        m_assembler.testl_rr(reg, mask);
685        return Jump(m_assembler.jCC(x86Condition(cond)));
686    }
687
688    Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
689    {
690        ASSERT((cond == Zero) || (cond == NonZero));
691        // if we are only interested in the low seven bits, this can be tested with a testb
692        if (mask.m_value == -1)
693            m_assembler.testl_rr(reg, reg);
694        else if ((mask.m_value & ~0x7f) == 0)
695            m_assembler.testb_i8r(mask.m_value, reg);
696        else
697            m_assembler.testl_i32r(mask.m_value, reg);
698        return Jump(m_assembler.jCC(x86Condition(cond)));
699    }
700
701    Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
702    {
703        ASSERT((cond == Zero) || (cond == NonZero));
704        if (mask.m_value == -1)
705            m_assembler.cmpl_im(0, address.offset, address.base);
706        else
707            m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
708        return Jump(m_assembler.jCC(x86Condition(cond)));
709    }
710
711    Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
712    {
713        ASSERT((cond == Zero) || (cond == NonZero));
714        if (mask.m_value == -1)
715            m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
716        else
717            m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
718        return Jump(m_assembler.jCC(x86Condition(cond)));
719    }
720
721    Jump jump()
722    {
723        return Jump(m_assembler.jmp());
724    }
725
726    void jump(RegisterID target)
727    {
728        m_assembler.jmp_r(target);
729    }
730
731    // Address is a memory location containing the address to jump to
732    void jump(Address address)
733    {
734        m_assembler.jmp_m(address.offset, address.base);
735    }
736
737
738    // Arithmetic control flow operations:
739    //
740    // This set of conditional branch operations branch based
741    // on the result of an arithmetic operation.  The operation
742    // is performed as normal, storing the result.
743    //
744    // * jz operations branch if the result is zero.
745    // * jo operations branch if the (signed) arithmetic
746    //   operation caused an overflow to occur.
747
748    Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
749    {
750        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
751        add32(src, dest);
752        return Jump(m_assembler.jCC(x86Condition(cond)));
753    }
754
755    Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
756    {
757        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
758        add32(imm, dest);
759        return Jump(m_assembler.jCC(x86Condition(cond)));
760    }
761
762    Jump branchAdd32(Condition cond, Imm32 src, Address dest)
763    {
764        ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
765        add32(src, dest);
766        return Jump(m_assembler.jCC(x86Condition(cond)));
767    }
768
769    Jump branchAdd32(Condition cond, RegisterID src, Address dest)
770    {
771        ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
772        add32(src, dest);
773        return Jump(m_assembler.jCC(x86Condition(cond)));
774    }
775
776    Jump branchAdd32(Condition cond, Address src, RegisterID dest)
777    {
778        ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
779        add32(src, dest);
780        return Jump(m_assembler.jCC(x86Condition(cond)));
781    }
782
783    Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
784    {
785        ASSERT(cond == Overflow);
786        mul32(src, dest);
787        return Jump(m_assembler.jCC(x86Condition(cond)));
788    }
789
790    Jump branchMul32(Condition cond, Address src, RegisterID dest)
791    {
792        ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
793        mul32(src, dest);
794        return Jump(m_assembler.jCC(x86Condition(cond)));
795    }
796
797    Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
798    {
799        ASSERT(cond == Overflow);
800        mul32(imm, src, dest);
801        return Jump(m_assembler.jCC(x86Condition(cond)));
802    }
803
804    Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
805    {
806        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
807        sub32(src, dest);
808        return Jump(m_assembler.jCC(x86Condition(cond)));
809    }
810
811    Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
812    {
813        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
814        sub32(imm, dest);
815        return Jump(m_assembler.jCC(x86Condition(cond)));
816    }
817
818    Jump branchSub32(Condition cond, Imm32 imm, Address dest)
819    {
820        ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
821        sub32(imm, dest);
822        return Jump(m_assembler.jCC(x86Condition(cond)));
823    }
824
825    Jump branchSub32(Condition cond, RegisterID src, Address dest)
826    {
827        ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
828        sub32(src, dest);
829        return Jump(m_assembler.jCC(x86Condition(cond)));
830    }
831
832    Jump branchSub32(Condition cond, Address src, RegisterID dest)
833    {
834        ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
835        sub32(src, dest);
836        return Jump(m_assembler.jCC(x86Condition(cond)));
837    }
838
839    Jump branchOr32(Condition cond, RegisterID src, RegisterID dest)
840    {
841        ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
842        or32(src, dest);
843        return Jump(m_assembler.jCC(x86Condition(cond)));
844    }
845
846
847    // Miscellaneous operations:
848
849    void breakpoint()
850    {
851        m_assembler.int3();
852    }
853
854    Call nearCall()
855    {
856        return Call(m_assembler.call(), Call::LinkableNear);
857    }
858
859    Call call(RegisterID target)
860    {
861        return Call(m_assembler.call(target), Call::None);
862    }
863
864    void call(Address address)
865    {
866        m_assembler.call_m(address.offset, address.base);
867    }
868
869    void ret()
870    {
871        m_assembler.ret();
872    }
873
874    void set8(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
875    {
876        m_assembler.cmpl_rr(right, left);
877        m_assembler.setCC_r(x86Condition(cond), dest);
878    }
879
880    void set8(Condition cond, Address left, RegisterID right, RegisterID dest)
881    {
882        m_assembler.cmpl_mr(left.offset, left.base, right);
883        m_assembler.setCC_r(x86Condition(cond), dest);
884    }
885
886    void set8(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
887    {
888        if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
889            m_assembler.testl_rr(left, left);
890        else
891            m_assembler.cmpl_ir(right.m_value, left);
892        m_assembler.setCC_r(x86Condition(cond), dest);
893    }
894
895    void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
896    {
897        m_assembler.cmpl_rr(right, left);
898        m_assembler.setCC_r(x86Condition(cond), dest);
899        m_assembler.movzbl_rr(dest, dest);
900    }
901
902    void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
903    {
904        if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
905            m_assembler.testl_rr(left, left);
906        else
907            m_assembler.cmpl_ir(right.m_value, left);
908        m_assembler.setCC_r(x86Condition(cond), dest);
909        m_assembler.movzbl_rr(dest, dest);
910    }
911
912    // FIXME:
913    // The mask should be optional... paerhaps the argument order should be
914    // dest-src, operations always have a dest? ... possibly not true, considering
915    // asm ops like test, or pseudo ops like pop().
916
917    void setTest8(Condition cond, Address address, Imm32 mask, RegisterID dest)
918    {
919        if (mask.m_value == -1)
920            m_assembler.cmpl_im(0, address.offset, address.base);
921        else
922            m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
923        m_assembler.setCC_r(x86Condition(cond), dest);
924    }
925
926    void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
927    {
928        if (mask.m_value == -1)
929            m_assembler.cmpl_im(0, address.offset, address.base);
930        else
931            m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
932        m_assembler.setCC_r(x86Condition(cond), dest);
933        m_assembler.movzbl_rr(dest, dest);
934    }
935
936protected:
937    X86Assembler::Condition x86Condition(Condition cond)
938    {
939        return static_cast<X86Assembler::Condition>(cond);
940    }
941
942private:
943    // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
944    // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
945    friend class MacroAssemblerX86;
946
947#if CPU(X86)
948#if OS(MAC_OS_X)
949
950    // All X86 Macs are guaranteed to support at least SSE2,
951    static bool isSSE2Present()
952    {
953        return true;
954    }
955
956#else // OS(MAC_OS_X)
957
958    enum SSE2CheckState {
959        NotCheckedSSE2,
960        HasSSE2,
961        NoSSE2
962    };
963
964    static bool isSSE2Present()
965    {
966        if (s_sse2CheckState == NotCheckedSSE2) {
967            // Default the flags value to zero; if the compiler is
968            // not MSVC or GCC we will read this as SSE2 not present.
969            int flags = 0;
970#if COMPILER(MSVC)
971            _asm {
972                mov eax, 1 // cpuid function 1 gives us the standard feature set
973                cpuid;
974                mov flags, edx;
975            }
976#elif COMPILER(GCC)
977            asm (
978                 "movl $0x1, %%eax;"
979                 "pushl %%ebx;"
980                 "cpuid;"
981                 "popl %%ebx;"
982                 "movl %%edx, %0;"
983                 : "=g" (flags)
984                 :
985                 : "%eax", "%ecx", "%edx"
986                 );
987#endif
988            static const int SSE2FeatureBit = 1 << 26;
989            s_sse2CheckState = (flags & SSE2FeatureBit) ? HasSSE2 : NoSSE2;
990        }
991        // Only check once.
992        ASSERT(s_sse2CheckState != NotCheckedSSE2);
993
994        return s_sse2CheckState == HasSSE2;
995    }
996
997    static SSE2CheckState s_sse2CheckState;
998
999#endif // OS(MAC_OS_X)
1000#elif !defined(NDEBUG) // CPU(X86)
1001
1002    // On x86-64 we should never be checking for SSE2 in a non-debug build,
1003    // but non debug add this method to keep the asserts above happy.
1004    static bool isSSE2Present()
1005    {
1006        return true;
1007    }
1008
1009#endif
1010};
1011
1012} // namespace JSC
1013
1014#endif // ENABLE(ASSEMBLER)
1015
1016#endif // MacroAssemblerX86Common_h
1017