1/*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "JIT.h"
28
29#if ENABLE(JIT)
30
31#include "CodeBlock.h"
32#include "JITInlineMethods.h"
33#include "JITStubCall.h"
34#include "JITStubs.h"
35#include "JSArray.h"
36#include "JSFunction.h"
37#include "Interpreter.h"
38#include "ResultType.h"
39#include "SamplingTool.h"
40
41#ifndef NDEBUG
42#include <stdio.h>
43#endif
44
45using namespace std;
46
47namespace JSC {
48
49#if USE(JSVALUE32_64)
50
51void JIT::emit_op_negate(Instruction* currentInstruction)
52{
53    unsigned dst = currentInstruction[1].u.operand;
54    unsigned src = currentInstruction[2].u.operand;
55
56    emitLoad(src, regT1, regT0);
57
58    Jump srcNotInt = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
59    addSlowCase(branch32(Equal, regT0, Imm32(0)));
60
61    neg32(regT0);
62    emitStoreInt32(dst, regT0, (dst == src));
63
64    Jump end = jump();
65
66    srcNotInt.link(this);
67    addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
68
69    xor32(Imm32(1 << 31), regT1);
70    store32(regT1, tagFor(dst));
71    if (dst != src)
72        store32(regT0, payloadFor(dst));
73
74    end.link(this);
75}
76
77void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
78{
79    unsigned dst = currentInstruction[1].u.operand;
80
81    linkSlowCase(iter); // 0 check
82    linkSlowCase(iter); // double check
83
84    JITStubCall stubCall(this, cti_op_negate);
85    stubCall.addArgument(regT1, regT0);
86    stubCall.call(dst);
87}
88
89void JIT::emit_op_jnless(Instruction* currentInstruction)
90{
91    unsigned op1 = currentInstruction[1].u.operand;
92    unsigned op2 = currentInstruction[2].u.operand;
93    unsigned target = currentInstruction[3].u.operand;
94
95    JumpList notInt32Op1;
96    JumpList notInt32Op2;
97
98    // Int32 less.
99    if (isOperandConstantImmediateInt(op1)) {
100        emitLoad(op2, regT3, regT2);
101        notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
102        addJump(branch32(LessThanOrEqual, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
103    } else if (isOperandConstantImmediateInt(op2)) {
104        emitLoad(op1, regT1, regT0);
105        notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
106        addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
107    } else {
108        emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
109        notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
110        notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
111        addJump(branch32(GreaterThanOrEqual, regT0, regT2), target);
112    }
113
114    if (!supportsFloatingPoint()) {
115        addSlowCase(notInt32Op1);
116        addSlowCase(notInt32Op2);
117        return;
118    }
119    Jump end = jump();
120
121    // Double less.
122    emitBinaryDoubleOp(op_jnless, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
123    end.link(this);
124}
125
126void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
127{
128    unsigned op1 = currentInstruction[1].u.operand;
129    unsigned op2 = currentInstruction[2].u.operand;
130    unsigned target = currentInstruction[3].u.operand;
131
132    if (!supportsFloatingPoint()) {
133        if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
134            linkSlowCase(iter); // int32 check
135        linkSlowCase(iter); // int32 check
136    } else {
137        if (!isOperandConstantImmediateInt(op1)) {
138            linkSlowCase(iter); // double check
139            linkSlowCase(iter); // int32 check
140        }
141        if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
142            linkSlowCase(iter); // double check
143    }
144
145    JITStubCall stubCall(this, cti_op_jless);
146    stubCall.addArgument(op1);
147    stubCall.addArgument(op2);
148    stubCall.call();
149    emitJumpSlowToHot(branchTest32(Zero, regT0), target);
150}
151
152void JIT::emit_op_jless(Instruction* currentInstruction)
153{
154    unsigned op1 = currentInstruction[1].u.operand;
155    unsigned op2 = currentInstruction[2].u.operand;
156    unsigned target = currentInstruction[3].u.operand;
157
158    JumpList notInt32Op1;
159    JumpList notInt32Op2;
160
161    // Int32 less.
162    if (isOperandConstantImmediateInt(op1)) {
163        emitLoad(op2, regT3, regT2);
164        notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
165        addJump(branch32(GreaterThan, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
166    } else if (isOperandConstantImmediateInt(op2)) {
167        emitLoad(op1, regT1, regT0);
168        notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
169        addJump(branch32(LessThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
170    } else {
171        emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
172        notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
173        notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
174        addJump(branch32(LessThan, regT0, regT2), target);
175    }
176
177    if (!supportsFloatingPoint()) {
178        addSlowCase(notInt32Op1);
179        addSlowCase(notInt32Op2);
180        return;
181    }
182    Jump end = jump();
183
184    // Double less.
185    emitBinaryDoubleOp(op_jless, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
186    end.link(this);
187}
188
189void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
190{
191    unsigned op1 = currentInstruction[1].u.operand;
192    unsigned op2 = currentInstruction[2].u.operand;
193    unsigned target = currentInstruction[3].u.operand;
194
195    if (!supportsFloatingPoint()) {
196        if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
197            linkSlowCase(iter); // int32 check
198        linkSlowCase(iter); // int32 check
199    } else {
200        if (!isOperandConstantImmediateInt(op1)) {
201            linkSlowCase(iter); // double check
202            linkSlowCase(iter); // int32 check
203        }
204        if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
205            linkSlowCase(iter); // double check
206    }
207
208    JITStubCall stubCall(this, cti_op_jless);
209    stubCall.addArgument(op1);
210    stubCall.addArgument(op2);
211    stubCall.call();
212    emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
213}
214
215void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
216{
217    unsigned op1 = currentInstruction[1].u.operand;
218    unsigned op2 = currentInstruction[2].u.operand;
219    unsigned target = currentInstruction[3].u.operand;
220
221    JumpList notInt32Op1;
222    JumpList notInt32Op2;
223
224    // Int32 less.
225    if (isOperandConstantImmediateInt(op1)) {
226        emitLoad(op2, regT3, regT2);
227        notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
228        addJump(branch32(LessThan, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
229    } else if (isOperandConstantImmediateInt(op2)) {
230        emitLoad(op1, regT1, regT0);
231        notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
232        addJump(branch32(GreaterThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
233    } else {
234        emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
235        notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
236        notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
237        addJump(branch32(GreaterThan, regT0, regT2), target);
238    }
239
240    if (!supportsFloatingPoint()) {
241        addSlowCase(notInt32Op1);
242        addSlowCase(notInt32Op2);
243        return;
244    }
245    Jump end = jump();
246
247    // Double less.
248    emitBinaryDoubleOp(op_jnlesseq, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
249    end.link(this);
250}
251
252void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
253{
254    unsigned op1 = currentInstruction[1].u.operand;
255    unsigned op2 = currentInstruction[2].u.operand;
256    unsigned target = currentInstruction[3].u.operand;
257
258    if (!supportsFloatingPoint()) {
259        if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
260            linkSlowCase(iter); // int32 check
261        linkSlowCase(iter); // int32 check
262    } else {
263        if (!isOperandConstantImmediateInt(op1)) {
264            linkSlowCase(iter); // double check
265            linkSlowCase(iter); // int32 check
266        }
267        if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
268            linkSlowCase(iter); // double check
269    }
270
271    JITStubCall stubCall(this, cti_op_jlesseq);
272    stubCall.addArgument(op1);
273    stubCall.addArgument(op2);
274    stubCall.call();
275    emitJumpSlowToHot(branchTest32(Zero, regT0), target);
276}
277
278// LeftShift (<<)
279
280void JIT::emit_op_lshift(Instruction* currentInstruction)
281{
282    unsigned dst = currentInstruction[1].u.operand;
283    unsigned op1 = currentInstruction[2].u.operand;
284    unsigned op2 = currentInstruction[3].u.operand;
285
286    if (isOperandConstantImmediateInt(op2)) {
287        emitLoad(op1, regT1, regT0);
288        addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
289        lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
290        emitStoreInt32(dst, regT0, dst == op1);
291        return;
292    }
293
294    emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
295    if (!isOperandConstantImmediateInt(op1))
296        addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
297    addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
298    lshift32(regT2, regT0);
299    emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
300}
301
302void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
303{
304    unsigned dst = currentInstruction[1].u.operand;
305    unsigned op1 = currentInstruction[2].u.operand;
306    unsigned op2 = currentInstruction[3].u.operand;
307
308    if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
309        linkSlowCase(iter); // int32 check
310    linkSlowCase(iter); // int32 check
311
312    JITStubCall stubCall(this, cti_op_lshift);
313    stubCall.addArgument(op1);
314    stubCall.addArgument(op2);
315    stubCall.call(dst);
316}
317
318// RightShift (>>)
319
320void JIT::emit_op_rshift(Instruction* currentInstruction)
321{
322    unsigned dst = currentInstruction[1].u.operand;
323    unsigned op1 = currentInstruction[2].u.operand;
324    unsigned op2 = currentInstruction[3].u.operand;
325
326    if (isOperandConstantImmediateInt(op2)) {
327        emitLoad(op1, regT1, regT0);
328        addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
329        rshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
330        emitStoreInt32(dst, regT0, dst == op1);
331        return;
332    }
333
334    emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
335    if (!isOperandConstantImmediateInt(op1))
336        addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
337    addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
338    rshift32(regT2, regT0);
339    emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
340}
341
342void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
343{
344    unsigned dst = currentInstruction[1].u.operand;
345    unsigned op1 = currentInstruction[2].u.operand;
346    unsigned op2 = currentInstruction[3].u.operand;
347
348    if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
349        linkSlowCase(iter); // int32 check
350    linkSlowCase(iter); // int32 check
351
352    JITStubCall stubCall(this, cti_op_rshift);
353    stubCall.addArgument(op1);
354    stubCall.addArgument(op2);
355    stubCall.call(dst);
356}
357
358// BitAnd (&)
359
360void JIT::emit_op_bitand(Instruction* currentInstruction)
361{
362    unsigned dst = currentInstruction[1].u.operand;
363    unsigned op1 = currentInstruction[2].u.operand;
364    unsigned op2 = currentInstruction[3].u.operand;
365
366    unsigned op;
367    int32_t constant;
368    if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
369        emitLoad(op, regT1, regT0);
370        addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
371        and32(Imm32(constant), regT0);
372        emitStoreInt32(dst, regT0, (op == dst));
373        return;
374    }
375
376    emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
377    addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
378    addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
379    and32(regT2, regT0);
380    emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
381}
382
383void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
384{
385    unsigned dst = currentInstruction[1].u.operand;
386    unsigned op1 = currentInstruction[2].u.operand;
387    unsigned op2 = currentInstruction[3].u.operand;
388
389    if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
390        linkSlowCase(iter); // int32 check
391    linkSlowCase(iter); // int32 check
392
393    JITStubCall stubCall(this, cti_op_bitand);
394    stubCall.addArgument(op1);
395    stubCall.addArgument(op2);
396    stubCall.call(dst);
397}
398
399// BitOr (|)
400
401void JIT::emit_op_bitor(Instruction* currentInstruction)
402{
403    unsigned dst = currentInstruction[1].u.operand;
404    unsigned op1 = currentInstruction[2].u.operand;
405    unsigned op2 = currentInstruction[3].u.operand;
406
407    unsigned op;
408    int32_t constant;
409    if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
410        emitLoad(op, regT1, regT0);
411        addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
412        or32(Imm32(constant), regT0);
413        emitStoreInt32(dst, regT0, (op == dst));
414        return;
415    }
416
417    emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
418    addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
419    addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
420    or32(regT2, regT0);
421    emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
422}
423
424void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
425{
426    unsigned dst = currentInstruction[1].u.operand;
427    unsigned op1 = currentInstruction[2].u.operand;
428    unsigned op2 = currentInstruction[3].u.operand;
429
430    if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
431        linkSlowCase(iter); // int32 check
432    linkSlowCase(iter); // int32 check
433
434    JITStubCall stubCall(this, cti_op_bitor);
435    stubCall.addArgument(op1);
436    stubCall.addArgument(op2);
437    stubCall.call(dst);
438}
439
440// BitXor (^)
441
442void JIT::emit_op_bitxor(Instruction* currentInstruction)
443{
444    unsigned dst = currentInstruction[1].u.operand;
445    unsigned op1 = currentInstruction[2].u.operand;
446    unsigned op2 = currentInstruction[3].u.operand;
447
448    unsigned op;
449    int32_t constant;
450    if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
451        emitLoad(op, regT1, regT0);
452        addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
453        xor32(Imm32(constant), regT0);
454        emitStoreInt32(dst, regT0, (op == dst));
455        return;
456    }
457
458    emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
459    addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
460    addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
461    xor32(regT2, regT0);
462    emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
463}
464
465void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
466{
467    unsigned dst = currentInstruction[1].u.operand;
468    unsigned op1 = currentInstruction[2].u.operand;
469    unsigned op2 = currentInstruction[3].u.operand;
470
471    if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
472        linkSlowCase(iter); // int32 check
473    linkSlowCase(iter); // int32 check
474
475    JITStubCall stubCall(this, cti_op_bitxor);
476    stubCall.addArgument(op1);
477    stubCall.addArgument(op2);
478    stubCall.call(dst);
479}
480
481// BitNot (~)
482
483void JIT::emit_op_bitnot(Instruction* currentInstruction)
484{
485    unsigned dst = currentInstruction[1].u.operand;
486    unsigned src = currentInstruction[2].u.operand;
487
488    emitLoad(src, regT1, regT0);
489    addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
490
491    not32(regT0);
492    emitStoreInt32(dst, regT0, (dst == src));
493}
494
495void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
496{
497    unsigned dst = currentInstruction[1].u.operand;
498
499    linkSlowCase(iter); // int32 check
500
501    JITStubCall stubCall(this, cti_op_bitnot);
502    stubCall.addArgument(regT1, regT0);
503    stubCall.call(dst);
504}
505
506// PostInc (i++)
507
508void JIT::emit_op_post_inc(Instruction* currentInstruction)
509{
510    unsigned dst = currentInstruction[1].u.operand;
511    unsigned srcDst = currentInstruction[2].u.operand;
512
513    emitLoad(srcDst, regT1, regT0);
514    addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
515
516    if (dst == srcDst) // x = x++ is a noop for ints.
517        return;
518
519    emitStoreInt32(dst, regT0);
520
521    addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
522    emitStoreInt32(srcDst, regT0, true);
523}
524
525void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
526{
527    unsigned dst = currentInstruction[1].u.operand;
528    unsigned srcDst = currentInstruction[2].u.operand;
529
530    linkSlowCase(iter); // int32 check
531    if (dst != srcDst)
532        linkSlowCase(iter); // overflow check
533
534    JITStubCall stubCall(this, cti_op_post_inc);
535    stubCall.addArgument(srcDst);
536    stubCall.addArgument(Imm32(srcDst));
537    stubCall.call(dst);
538}
539
540// PostDec (i--)
541
542void JIT::emit_op_post_dec(Instruction* currentInstruction)
543{
544    unsigned dst = currentInstruction[1].u.operand;
545    unsigned srcDst = currentInstruction[2].u.operand;
546
547    emitLoad(srcDst, regT1, regT0);
548    addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
549
550    if (dst == srcDst) // x = x-- is a noop for ints.
551        return;
552
553    emitStoreInt32(dst, regT0);
554
555    addSlowCase(branchSub32(Overflow, Imm32(1), regT0));
556    emitStoreInt32(srcDst, regT0, true);
557}
558
559void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
560{
561    unsigned dst = currentInstruction[1].u.operand;
562    unsigned srcDst = currentInstruction[2].u.operand;
563
564    linkSlowCase(iter); // int32 check
565    if (dst != srcDst)
566        linkSlowCase(iter); // overflow check
567
568    JITStubCall stubCall(this, cti_op_post_dec);
569    stubCall.addArgument(srcDst);
570    stubCall.addArgument(Imm32(srcDst));
571    stubCall.call(dst);
572}
573
574// PreInc (++i)
575
576void JIT::emit_op_pre_inc(Instruction* currentInstruction)
577{
578    unsigned srcDst = currentInstruction[1].u.operand;
579
580    emitLoad(srcDst, regT1, regT0);
581
582    addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
583    addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
584    emitStoreInt32(srcDst, regT0, true);
585}
586
587void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
588{
589    unsigned srcDst = currentInstruction[1].u.operand;
590
591    linkSlowCase(iter); // int32 check
592    linkSlowCase(iter); // overflow check
593
594    JITStubCall stubCall(this, cti_op_pre_inc);
595    stubCall.addArgument(srcDst);
596    stubCall.call(srcDst);
597}
598
599// PreDec (--i)
600
601void JIT::emit_op_pre_dec(Instruction* currentInstruction)
602{
603    unsigned srcDst = currentInstruction[1].u.operand;
604
605    emitLoad(srcDst, regT1, regT0);
606
607    addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
608    addSlowCase(branchSub32(Overflow, Imm32(1), regT0));
609    emitStoreInt32(srcDst, regT0, true);
610}
611
612void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
613{
614    unsigned srcDst = currentInstruction[1].u.operand;
615
616    linkSlowCase(iter); // int32 check
617    linkSlowCase(iter); // overflow check
618
619    JITStubCall stubCall(this, cti_op_pre_dec);
620    stubCall.addArgument(srcDst);
621    stubCall.call(srcDst);
622}
623
624// Addition (+)
625
626void JIT::emit_op_add(Instruction* currentInstruction)
627{
628    unsigned dst = currentInstruction[1].u.operand;
629    unsigned op1 = currentInstruction[2].u.operand;
630    unsigned op2 = currentInstruction[3].u.operand;
631    OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
632
633    if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
634        JITStubCall stubCall(this, cti_op_add);
635        stubCall.addArgument(op1);
636        stubCall.addArgument(op2);
637        stubCall.call(dst);
638        return;
639    }
640
641    JumpList notInt32Op1;
642    JumpList notInt32Op2;
643
644    unsigned op;
645    int32_t constant;
646    if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
647        emitAdd32Constant(dst, op, constant, op == op1 ? types.first() : types.second());
648        return;
649    }
650
651    emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
652    notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
653    notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
654
655    // Int32 case.
656    addSlowCase(branchAdd32(Overflow, regT2, regT0));
657    emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
658
659    if (!supportsFloatingPoint()) {
660        addSlowCase(notInt32Op1);
661        addSlowCase(notInt32Op2);
662        return;
663    }
664    Jump end = jump();
665
666    // Double case.
667    emitBinaryDoubleOp(op_add, dst, op1, op2, types, notInt32Op1, notInt32Op2);
668    end.link(this);
669}
670
671void JIT::emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
672{
673    // Int32 case.
674    emitLoad(op, regT1, regT0);
675    Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
676    addSlowCase(branchAdd32(Overflow, Imm32(constant), regT0));
677    emitStoreInt32(dst, regT0, (op == dst));
678
679    // Double case.
680    if (!supportsFloatingPoint()) {
681        addSlowCase(notInt32);
682        return;
683    }
684    Jump end = jump();
685
686    notInt32.link(this);
687    if (!opType.definitelyIsNumber())
688        addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
689    move(Imm32(constant), regT2);
690    convertInt32ToDouble(regT2, fpRegT0);
691    emitLoadDouble(op, fpRegT1);
692    addDouble(fpRegT1, fpRegT0);
693    emitStoreDouble(dst, fpRegT0);
694
695    end.link(this);
696}
697
698void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
699{
700    unsigned dst = currentInstruction[1].u.operand;
701    unsigned op1 = currentInstruction[2].u.operand;
702    unsigned op2 = currentInstruction[3].u.operand;
703    OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
704
705    if (!types.first().mightBeNumber() || !types.second().mightBeNumber())
706        return;
707
708    unsigned op;
709    int32_t constant;
710    if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
711        linkSlowCase(iter); // overflow check
712
713        if (!supportsFloatingPoint())
714            linkSlowCase(iter); // non-sse case
715        else {
716            ResultType opType = op == op1 ? types.first() : types.second();
717            if (!opType.definitelyIsNumber())
718                linkSlowCase(iter); // double check
719        }
720    } else {
721        linkSlowCase(iter); // overflow check
722
723        if (!supportsFloatingPoint()) {
724            linkSlowCase(iter); // int32 check
725            linkSlowCase(iter); // int32 check
726        } else {
727            if (!types.first().definitelyIsNumber())
728                linkSlowCase(iter); // double check
729
730            if (!types.second().definitelyIsNumber()) {
731                linkSlowCase(iter); // int32 check
732                linkSlowCase(iter); // double check
733            }
734        }
735    }
736
737    JITStubCall stubCall(this, cti_op_add);
738    stubCall.addArgument(op1);
739    stubCall.addArgument(op2);
740    stubCall.call(dst);
741}
742
743// Subtraction (-)
744
745void JIT::emit_op_sub(Instruction* currentInstruction)
746{
747    unsigned dst = currentInstruction[1].u.operand;
748    unsigned op1 = currentInstruction[2].u.operand;
749    unsigned op2 = currentInstruction[3].u.operand;
750    OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
751
752    JumpList notInt32Op1;
753    JumpList notInt32Op2;
754
755    if (isOperandConstantImmediateInt(op2)) {
756        emitSub32Constant(dst, op1, getConstantOperand(op2).asInt32(), types.first());
757        return;
758    }
759
760    emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
761    notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
762    notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
763
764    // Int32 case.
765    addSlowCase(branchSub32(Overflow, regT2, regT0));
766    emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
767
768    if (!supportsFloatingPoint()) {
769        addSlowCase(notInt32Op1);
770        addSlowCase(notInt32Op2);
771        return;
772    }
773    Jump end = jump();
774
775    // Double case.
776    emitBinaryDoubleOp(op_sub, dst, op1, op2, types, notInt32Op1, notInt32Op2);
777    end.link(this);
778}
779
780void JIT::emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
781{
782    // Int32 case.
783    emitLoad(op, regT1, regT0);
784    Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
785    addSlowCase(branchSub32(Overflow, Imm32(constant), regT0));
786    emitStoreInt32(dst, regT0, (op == dst));
787
788    // Double case.
789    if (!supportsFloatingPoint()) {
790        addSlowCase(notInt32);
791        return;
792    }
793    Jump end = jump();
794
795    notInt32.link(this);
796    if (!opType.definitelyIsNumber())
797        addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
798    move(Imm32(constant), regT2);
799    convertInt32ToDouble(regT2, fpRegT0);
800    emitLoadDouble(op, fpRegT1);
801    subDouble(fpRegT0, fpRegT1);
802    emitStoreDouble(dst, fpRegT1);
803
804    end.link(this);
805}
806
807void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
808{
809    unsigned dst = currentInstruction[1].u.operand;
810    unsigned op1 = currentInstruction[2].u.operand;
811    unsigned op2 = currentInstruction[3].u.operand;
812    OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
813
814    if (isOperandConstantImmediateInt(op2)) {
815        linkSlowCase(iter); // overflow check
816
817        if (!supportsFloatingPoint() || !types.first().definitelyIsNumber())
818            linkSlowCase(iter); // int32 or double check
819    } else {
820        linkSlowCase(iter); // overflow check
821
822        if (!supportsFloatingPoint()) {
823            linkSlowCase(iter); // int32 check
824            linkSlowCase(iter); // int32 check
825        } else {
826            if (!types.first().definitelyIsNumber())
827                linkSlowCase(iter); // double check
828
829            if (!types.second().definitelyIsNumber()) {
830                linkSlowCase(iter); // int32 check
831                linkSlowCase(iter); // double check
832            }
833        }
834    }
835
836    JITStubCall stubCall(this, cti_op_sub);
837    stubCall.addArgument(op1);
838    stubCall.addArgument(op2);
839    stubCall.call(dst);
840}
841
842void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
843{
844    JumpList end;
845
846    if (!notInt32Op1.empty()) {
847        // Double case 1: Op1 is not int32; Op2 is unknown.
848        notInt32Op1.link(this);
849
850        ASSERT(op1IsInRegisters);
851
852        // Verify Op1 is double.
853        if (!types.first().definitelyIsNumber())
854            addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
855
856        if (!op2IsInRegisters)
857            emitLoad(op2, regT3, regT2);
858
859        Jump doubleOp2 = branch32(Below, regT3, Imm32(JSValue::LowestTag));
860
861        if (!types.second().definitelyIsNumber())
862            addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
863
864        convertInt32ToDouble(regT2, fpRegT0);
865        Jump doTheMath = jump();
866
867        // Load Op2 as double into double register.
868        doubleOp2.link(this);
869        emitLoadDouble(op2, fpRegT0);
870
871        // Do the math.
872        doTheMath.link(this);
873        switch (opcodeID) {
874            case op_mul:
875                emitLoadDouble(op1, fpRegT2);
876                mulDouble(fpRegT2, fpRegT0);
877                emitStoreDouble(dst, fpRegT0);
878                break;
879            case op_add:
880                emitLoadDouble(op1, fpRegT2);
881                addDouble(fpRegT2, fpRegT0);
882                emitStoreDouble(dst, fpRegT0);
883                break;
884            case op_sub:
885                emitLoadDouble(op1, fpRegT1);
886                subDouble(fpRegT0, fpRegT1);
887                emitStoreDouble(dst, fpRegT1);
888                break;
889            case op_div:
890                emitLoadDouble(op1, fpRegT1);
891                divDouble(fpRegT0, fpRegT1);
892                emitStoreDouble(dst, fpRegT1);
893                break;
894            case op_jnless:
895                emitLoadDouble(op1, fpRegT2);
896                addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
897                break;
898            case op_jless:
899                emitLoadDouble(op1, fpRegT2);
900                addJump(branchDouble(DoubleLessThan, fpRegT2, fpRegT0), dst);
901                break;
902            case op_jnlesseq:
903                emitLoadDouble(op1, fpRegT2);
904                addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT0, fpRegT2), dst);
905                break;
906            default:
907                ASSERT_NOT_REACHED();
908        }
909
910        if (!notInt32Op2.empty())
911            end.append(jump());
912    }
913
914    if (!notInt32Op2.empty()) {
915        // Double case 2: Op1 is int32; Op2 is not int32.
916        notInt32Op2.link(this);
917
918        ASSERT(op2IsInRegisters);
919
920        if (!op1IsInRegisters)
921            emitLoadPayload(op1, regT0);
922
923        convertInt32ToDouble(regT0, fpRegT0);
924
925        // Verify op2 is double.
926        if (!types.second().definitelyIsNumber())
927            addSlowCase(branch32(Above, regT3, Imm32(JSValue::LowestTag)));
928
929        // Do the math.
930        switch (opcodeID) {
931            case op_mul:
932                emitLoadDouble(op2, fpRegT2);
933                mulDouble(fpRegT2, fpRegT0);
934                emitStoreDouble(dst, fpRegT0);
935                break;
936            case op_add:
937                emitLoadDouble(op2, fpRegT2);
938                addDouble(fpRegT2, fpRegT0);
939                emitStoreDouble(dst, fpRegT0);
940                break;
941            case op_sub:
942                emitLoadDouble(op2, fpRegT2);
943                subDouble(fpRegT2, fpRegT0);
944                emitStoreDouble(dst, fpRegT0);
945                break;
946            case op_div:
947                emitLoadDouble(op2, fpRegT2);
948                divDouble(fpRegT2, fpRegT0);
949                emitStoreDouble(dst, fpRegT0);
950                break;
951            case op_jnless:
952                emitLoadDouble(op2, fpRegT1);
953                addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
954                break;
955            case op_jless:
956                emitLoadDouble(op2, fpRegT1);
957                addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), dst);
958                break;
959            case op_jnlesseq:
960                emitLoadDouble(op2, fpRegT1);
961                addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), dst);
962                break;
963            default:
964                ASSERT_NOT_REACHED();
965        }
966    }
967
968    end.link(this);
969}
970
971// Multiplication (*)
972
973void JIT::emit_op_mul(Instruction* currentInstruction)
974{
975    unsigned dst = currentInstruction[1].u.operand;
976    unsigned op1 = currentInstruction[2].u.operand;
977    unsigned op2 = currentInstruction[3].u.operand;
978    OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
979
980    JumpList notInt32Op1;
981    JumpList notInt32Op2;
982
983    emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
984    notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
985    notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
986
987    // Int32 case.
988    move(regT0, regT3);
989    addSlowCase(branchMul32(Overflow, regT2, regT0));
990    addSlowCase(branchTest32(Zero, regT0));
991    emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
992
993    if (!supportsFloatingPoint()) {
994        addSlowCase(notInt32Op1);
995        addSlowCase(notInt32Op2);
996        return;
997    }
998    Jump end = jump();
999
1000    // Double case.
1001    emitBinaryDoubleOp(op_mul, dst, op1, op2, types, notInt32Op1, notInt32Op2);
1002    end.link(this);
1003}
1004
1005void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1006{
1007    unsigned dst = currentInstruction[1].u.operand;
1008    unsigned op1 = currentInstruction[2].u.operand;
1009    unsigned op2 = currentInstruction[3].u.operand;
1010    OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1011
1012    Jump overflow = getSlowCase(iter); // overflow check
1013    linkSlowCase(iter); // zero result check
1014
1015    Jump negZero = branchOr32(Signed, regT2, regT3);
1016    emitStoreInt32(dst, Imm32(0), (op1 == dst || op2 == dst));
1017
1018    emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
1019
1020    negZero.link(this);
1021    overflow.link(this);
1022
1023    if (!supportsFloatingPoint()) {
1024        linkSlowCase(iter); // int32 check
1025        linkSlowCase(iter); // int32 check
1026    }
1027
1028    if (supportsFloatingPoint()) {
1029        if (!types.first().definitelyIsNumber())
1030            linkSlowCase(iter); // double check
1031
1032        if (!types.second().definitelyIsNumber()) {
1033            linkSlowCase(iter); // int32 check
1034            linkSlowCase(iter); // double check
1035        }
1036    }
1037
1038    Label jitStubCall(this);
1039    JITStubCall stubCall(this, cti_op_mul);
1040    stubCall.addArgument(op1);
1041    stubCall.addArgument(op2);
1042    stubCall.call(dst);
1043}
1044
1045// Division (/)
1046
1047void JIT::emit_op_div(Instruction* currentInstruction)
1048{
1049    unsigned dst = currentInstruction[1].u.operand;
1050    unsigned op1 = currentInstruction[2].u.operand;
1051    unsigned op2 = currentInstruction[3].u.operand;
1052    OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1053
1054    if (!supportsFloatingPoint()) {
1055        addSlowCase(jump());
1056        return;
1057    }
1058
1059    // Int32 divide.
1060    JumpList notInt32Op1;
1061    JumpList notInt32Op2;
1062
1063    JumpList end;
1064
1065    emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
1066
1067    notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
1068    notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
1069
1070    convertInt32ToDouble(regT0, fpRegT0);
1071    convertInt32ToDouble(regT2, fpRegT1);
1072    divDouble(fpRegT1, fpRegT0);
1073
1074    JumpList doubleResult;
1075    branchConvertDoubleToInt32(fpRegT0, regT0, doubleResult, fpRegT1);
1076
1077    // Int32 result.
1078    emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
1079    end.append(jump());
1080
1081    // Double result.
1082    doubleResult.link(this);
1083    emitStoreDouble(dst, fpRegT0);
1084    end.append(jump());
1085
1086    // Double divide.
1087    emitBinaryDoubleOp(op_div, dst, op1, op2, types, notInt32Op1, notInt32Op2);
1088    end.link(this);
1089}
1090
1091void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1092{
1093    unsigned dst = currentInstruction[1].u.operand;
1094    unsigned op1 = currentInstruction[2].u.operand;
1095    unsigned op2 = currentInstruction[3].u.operand;
1096    OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1097
1098    if (!supportsFloatingPoint())
1099        linkSlowCase(iter);
1100    else {
1101        if (!types.first().definitelyIsNumber())
1102            linkSlowCase(iter); // double check
1103
1104        if (!types.second().definitelyIsNumber()) {
1105            linkSlowCase(iter); // int32 check
1106            linkSlowCase(iter); // double check
1107        }
1108    }
1109
1110    JITStubCall stubCall(this, cti_op_div);
1111    stubCall.addArgument(op1);
1112    stubCall.addArgument(op2);
1113    stubCall.call(dst);
1114}
1115
1116// Mod (%)
1117
1118/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
1119
1120#if CPU(X86) || CPU(X86_64)
1121
1122void JIT::emit_op_mod(Instruction* currentInstruction)
1123{
1124    unsigned dst = currentInstruction[1].u.operand;
1125    unsigned op1 = currentInstruction[2].u.operand;
1126    unsigned op2 = currentInstruction[3].u.operand;
1127
1128    if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
1129        emitLoad(op1, X86Registers::edx, X86Registers::eax);
1130        move(Imm32(getConstantOperand(op2).asInt32()), X86Registers::ecx);
1131        addSlowCase(branch32(NotEqual, X86Registers::edx, Imm32(JSValue::Int32Tag)));
1132        if (getConstantOperand(op2).asInt32() == -1)
1133            addSlowCase(branch32(Equal, X86Registers::eax, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
1134    } else {
1135        emitLoad2(op1, X86Registers::edx, X86Registers::eax, op2, X86Registers::ebx, X86Registers::ecx);
1136        addSlowCase(branch32(NotEqual, X86Registers::edx, Imm32(JSValue::Int32Tag)));
1137        addSlowCase(branch32(NotEqual, X86Registers::ebx, Imm32(JSValue::Int32Tag)));
1138
1139        addSlowCase(branch32(Equal, X86Registers::eax, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
1140        addSlowCase(branch32(Equal, X86Registers::ecx, Imm32(0))); // divide by 0
1141    }
1142
1143    move(X86Registers::eax, X86Registers::ebx); // Save dividend payload, in case of 0.
1144    m_assembler.cdq();
1145    m_assembler.idivl_r(X86Registers::ecx);
1146
1147    // If the remainder is zero and the dividend is negative, the result is -0.
1148    Jump storeResult1 = branchTest32(NonZero, X86Registers::edx);
1149    Jump storeResult2 = branchTest32(Zero, X86Registers::ebx, Imm32(0x80000000)); // not negative
1150    emitStore(dst, jsNumber(m_globalData, -0.0));
1151    Jump end = jump();
1152
1153    storeResult1.link(this);
1154    storeResult2.link(this);
1155    emitStoreInt32(dst, X86Registers::edx, (op1 == dst || op2 == dst));
1156    end.link(this);
1157}
1158
1159void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1160{
1161    unsigned dst = currentInstruction[1].u.operand;
1162    unsigned op1 = currentInstruction[2].u.operand;
1163    unsigned op2 = currentInstruction[3].u.operand;
1164
1165    if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
1166        linkSlowCase(iter); // int32 check
1167        if (getConstantOperand(op2).asInt32() == -1)
1168            linkSlowCase(iter); // 0x80000000 check
1169    } else {
1170        linkSlowCase(iter); // int32 check
1171        linkSlowCase(iter); // int32 check
1172        linkSlowCase(iter); // 0 check
1173        linkSlowCase(iter); // 0x80000000 check
1174    }
1175
1176    JITStubCall stubCall(this, cti_op_mod);
1177    stubCall.addArgument(op1);
1178    stubCall.addArgument(op2);
1179    stubCall.call(dst);
1180}
1181
1182#else // CPU(X86) || CPU(X86_64)
1183
1184void JIT::emit_op_mod(Instruction* currentInstruction)
1185{
1186    unsigned dst = currentInstruction[1].u.operand;
1187    unsigned op1 = currentInstruction[2].u.operand;
1188    unsigned op2 = currentInstruction[3].u.operand;
1189
1190#if ENABLE(JIT_OPTIMIZE_MOD)
1191    emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
1192    addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
1193    addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
1194
1195    addSlowCase(branch32(Equal, regT2, Imm32(0)));
1196
1197    emitNakedCall(m_globalData->jitStubs.ctiSoftModulo());
1198
1199    emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
1200#else
1201    JITStubCall stubCall(this, cti_op_mod);
1202    stubCall.addArgument(op1);
1203    stubCall.addArgument(op2);
1204    stubCall.call(dst);
1205#endif
1206}
1207
1208void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1209{
1210#if ENABLE(JIT_OPTIMIZE_MOD)
1211    unsigned result = currentInstruction[1].u.operand;
1212    unsigned op1 = currentInstruction[2].u.operand;
1213    unsigned op2 = currentInstruction[3].u.operand;
1214    linkSlowCase(iter);
1215    linkSlowCase(iter);
1216    linkSlowCase(iter);
1217    JITStubCall stubCall(this, cti_op_mod);
1218    stubCall.addArgument(op1);
1219    stubCall.addArgument(op2);
1220    stubCall.call(result);
1221#else
1222    ASSERT_NOT_REACHED();
1223#endif
1224}
1225
1226#endif // CPU(X86) || CPU(X86_64)
1227
1228/* ------------------------------ END: OP_MOD ------------------------------ */
1229
1230#else // USE(JSVALUE32_64)
1231
1232void JIT::emit_op_lshift(Instruction* currentInstruction)
1233{
1234    unsigned result = currentInstruction[1].u.operand;
1235    unsigned op1 = currentInstruction[2].u.operand;
1236    unsigned op2 = currentInstruction[3].u.operand;
1237
1238    emitGetVirtualRegisters(op1, regT0, op2, regT2);
1239    // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
1240    emitJumpSlowCaseIfNotImmediateInteger(regT0);
1241    emitJumpSlowCaseIfNotImmediateInteger(regT2);
1242    emitFastArithImmToInt(regT0);
1243    emitFastArithImmToInt(regT2);
1244    lshift32(regT2, regT0);
1245#if USE(JSVALUE32)
1246    addSlowCase(branchAdd32(Overflow, regT0, regT0));
1247    signExtend32ToPtr(regT0, regT0);
1248#endif
1249    emitFastArithReTagImmediate(regT0, regT0);
1250    emitPutVirtualRegister(result);
1251}
1252
1253void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1254{
1255    unsigned result = currentInstruction[1].u.operand;
1256    unsigned op1 = currentInstruction[2].u.operand;
1257    unsigned op2 = currentInstruction[3].u.operand;
1258
1259#if USE(JSVALUE64)
1260    UNUSED_PARAM(op1);
1261    UNUSED_PARAM(op2);
1262    linkSlowCase(iter);
1263    linkSlowCase(iter);
1264#else
1265    // If we are limited to 32-bit immediates there is a third slow case, which required the operands to have been reloaded.
1266    Jump notImm1 = getSlowCase(iter);
1267    Jump notImm2 = getSlowCase(iter);
1268    linkSlowCase(iter);
1269    emitGetVirtualRegisters(op1, regT0, op2, regT2);
1270    notImm1.link(this);
1271    notImm2.link(this);
1272#endif
1273    JITStubCall stubCall(this, cti_op_lshift);
1274    stubCall.addArgument(regT0);
1275    stubCall.addArgument(regT2);
1276    stubCall.call(result);
1277}
1278
1279void JIT::emit_op_rshift(Instruction* currentInstruction)
1280{
1281    unsigned result = currentInstruction[1].u.operand;
1282    unsigned op1 = currentInstruction[2].u.operand;
1283    unsigned op2 = currentInstruction[3].u.operand;
1284
1285    if (isOperandConstantImmediateInt(op2)) {
1286        // isOperandConstantImmediateInt(op2) => 1 SlowCase
1287        emitGetVirtualRegister(op1, regT0);
1288        emitJumpSlowCaseIfNotImmediateInteger(regT0);
1289        // Mask with 0x1f as per ecma-262 11.7.2 step 7.
1290        rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
1291    } else {
1292        emitGetVirtualRegisters(op1, regT0, op2, regT2);
1293        if (supportsFloatingPointTruncate()) {
1294            Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
1295#if USE(JSVALUE64)
1296            // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
1297            addSlowCase(emitJumpIfNotImmediateNumber(regT0));
1298            addPtr(tagTypeNumberRegister, regT0);
1299            movePtrToDouble(regT0, fpRegT0);
1300            addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
1301#else
1302            // supportsFloatingPoint() && !USE(JSVALUE64) => 5 SlowCases (of which 1 IfNotJSCell)
1303            emitJumpSlowCaseIfNotJSCell(regT0, op1);
1304            addSlowCase(checkStructure(regT0, m_globalData->numberStructure.get()));
1305            loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1306            addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
1307            addSlowCase(branchAdd32(Overflow, regT0, regT0));
1308#endif
1309            lhsIsInt.link(this);
1310            emitJumpSlowCaseIfNotImmediateInteger(regT2);
1311        } else {
1312            // !supportsFloatingPoint() => 2 SlowCases
1313            emitJumpSlowCaseIfNotImmediateInteger(regT0);
1314            emitJumpSlowCaseIfNotImmediateInteger(regT2);
1315        }
1316        emitFastArithImmToInt(regT2);
1317        rshift32(regT2, regT0);
1318#if USE(JSVALUE32)
1319        signExtend32ToPtr(regT0, regT0);
1320#endif
1321    }
1322#if USE(JSVALUE64)
1323    emitFastArithIntToImmNoCheck(regT0, regT0);
1324#else
1325    orPtr(Imm32(JSImmediate::TagTypeNumber), regT0);
1326#endif
1327    emitPutVirtualRegister(result);
1328}
1329
1330void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1331{
1332    unsigned result = currentInstruction[1].u.operand;
1333    unsigned op1 = currentInstruction[2].u.operand;
1334    unsigned op2 = currentInstruction[3].u.operand;
1335
1336    JITStubCall stubCall(this, cti_op_rshift);
1337
1338    if (isOperandConstantImmediateInt(op2)) {
1339        linkSlowCase(iter);
1340        stubCall.addArgument(regT0);
1341        stubCall.addArgument(op2, regT2);
1342    } else {
1343        if (supportsFloatingPointTruncate()) {
1344#if USE(JSVALUE64)
1345            linkSlowCase(iter);
1346            linkSlowCase(iter);
1347            linkSlowCase(iter);
1348#else
1349            linkSlowCaseIfNotJSCell(iter, op1);
1350            linkSlowCase(iter);
1351            linkSlowCase(iter);
1352            linkSlowCase(iter);
1353            linkSlowCase(iter);
1354#endif
1355            // We're reloading op1 to regT0 as we can no longer guarantee that
1356            // we have not munged the operand.  It may have already been shifted
1357            // correctly, but it still will not have been tagged.
1358            stubCall.addArgument(op1, regT0);
1359            stubCall.addArgument(regT2);
1360        } else {
1361            linkSlowCase(iter);
1362            linkSlowCase(iter);
1363            stubCall.addArgument(regT0);
1364            stubCall.addArgument(regT2);
1365        }
1366    }
1367
1368    stubCall.call(result);
1369}
1370
1371void JIT::emit_op_jnless(Instruction* currentInstruction)
1372{
1373    unsigned op1 = currentInstruction[1].u.operand;
1374    unsigned op2 = currentInstruction[2].u.operand;
1375    unsigned target = currentInstruction[3].u.operand;
1376
1377    // We generate inline code for the following cases in the fast path:
1378    // - int immediate to constant int immediate
1379    // - constant int immediate to int immediate
1380    // - int immediate to int immediate
1381
1382    if (isOperandConstantImmediateInt(op2)) {
1383        emitGetVirtualRegister(op1, regT0);
1384        emitJumpSlowCaseIfNotImmediateInteger(regT0);
1385#if USE(JSVALUE64)
1386        int32_t op2imm = getConstantOperandImmediateInt(op2);
1387#else
1388        int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
1389#endif
1390        addJump(branch32(GreaterThanOrEqual, regT0, Imm32(op2imm)), target);
1391    } else if (isOperandConstantImmediateInt(op1)) {
1392        emitGetVirtualRegister(op2, regT1);
1393        emitJumpSlowCaseIfNotImmediateInteger(regT1);
1394#if USE(JSVALUE64)
1395        int32_t op1imm = getConstantOperandImmediateInt(op1);
1396#else
1397        int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
1398#endif
1399        addJump(branch32(LessThanOrEqual, regT1, Imm32(op1imm)), target);
1400    } else {
1401        emitGetVirtualRegisters(op1, regT0, op2, regT1);
1402        emitJumpSlowCaseIfNotImmediateInteger(regT0);
1403        emitJumpSlowCaseIfNotImmediateInteger(regT1);
1404
1405        addJump(branch32(GreaterThanOrEqual, regT0, regT1), target);
1406    }
1407}
1408
1409void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1410{
1411    unsigned op1 = currentInstruction[1].u.operand;
1412    unsigned op2 = currentInstruction[2].u.operand;
1413    unsigned target = currentInstruction[3].u.operand;
1414
1415    // We generate inline code for the following cases in the slow path:
1416    // - floating-point number to constant int immediate
1417    // - constant int immediate to floating-point number
1418    // - floating-point number to floating-point number.
1419
1420    if (isOperandConstantImmediateInt(op2)) {
1421        linkSlowCase(iter);
1422
1423        if (supportsFloatingPoint()) {
1424#if USE(JSVALUE64)
1425            Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
1426            addPtr(tagTypeNumberRegister, regT0);
1427            movePtrToDouble(regT0, fpRegT0);
1428#else
1429            Jump fail1;
1430            if (!m_codeBlock->isKnownNotImmediate(op1))
1431                fail1 = emitJumpIfNotJSCell(regT0);
1432
1433            Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
1434            loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1435#endif
1436
1437            int32_t op2imm = getConstantOperand(op2).asInt32();;
1438
1439            move(Imm32(op2imm), regT1);
1440            convertInt32ToDouble(regT1, fpRegT1);
1441
1442            emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), target);
1443
1444            emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
1445
1446#if USE(JSVALUE64)
1447            fail1.link(this);
1448#else
1449            if (!m_codeBlock->isKnownNotImmediate(op1))
1450                fail1.link(this);
1451            fail2.link(this);
1452#endif
1453        }
1454
1455        JITStubCall stubCall(this, cti_op_jless);
1456        stubCall.addArgument(regT0);
1457        stubCall.addArgument(op2, regT2);
1458        stubCall.call();
1459        emitJumpSlowToHot(branchTest32(Zero, regT0), target);
1460
1461    } else if (isOperandConstantImmediateInt(op1)) {
1462        linkSlowCase(iter);
1463
1464        if (supportsFloatingPoint()) {
1465#if USE(JSVALUE64)
1466            Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
1467            addPtr(tagTypeNumberRegister, regT1);
1468            movePtrToDouble(regT1, fpRegT1);
1469#else
1470            Jump fail1;
1471            if (!m_codeBlock->isKnownNotImmediate(op2))
1472                fail1 = emitJumpIfNotJSCell(regT1);
1473
1474            Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
1475            loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
1476#endif
1477
1478            int32_t op1imm = getConstantOperand(op1).asInt32();;
1479
1480            move(Imm32(op1imm), regT0);
1481            convertInt32ToDouble(regT0, fpRegT0);
1482
1483            emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), target);
1484
1485            emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
1486
1487#if USE(JSVALUE64)
1488            fail1.link(this);
1489#else
1490            if (!m_codeBlock->isKnownNotImmediate(op2))
1491                fail1.link(this);
1492            fail2.link(this);
1493#endif
1494        }
1495
1496        JITStubCall stubCall(this, cti_op_jless);
1497        stubCall.addArgument(op1, regT2);
1498        stubCall.addArgument(regT1);
1499        stubCall.call();
1500        emitJumpSlowToHot(branchTest32(Zero, regT0), target);
1501
1502    } else {
1503        linkSlowCase(iter);
1504
1505        if (supportsFloatingPoint()) {
1506#if USE(JSVALUE64)
1507            Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
1508            Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
1509            Jump fail3 = emitJumpIfImmediateInteger(regT1);
1510            addPtr(tagTypeNumberRegister, regT0);
1511            addPtr(tagTypeNumberRegister, regT1);
1512            movePtrToDouble(regT0, fpRegT0);
1513            movePtrToDouble(regT1, fpRegT1);
1514#else
1515            Jump fail1;
1516            if (!m_codeBlock->isKnownNotImmediate(op1))
1517                fail1 = emitJumpIfNotJSCell(regT0);
1518
1519            Jump fail2;
1520            if (!m_codeBlock->isKnownNotImmediate(op2))
1521                fail2 = emitJumpIfNotJSCell(regT1);
1522
1523            Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
1524            Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
1525            loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1526            loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
1527#endif
1528
1529            emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), target);
1530
1531            emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
1532
1533#if USE(JSVALUE64)
1534            fail1.link(this);
1535            fail2.link(this);
1536            fail3.link(this);
1537#else
1538            if (!m_codeBlock->isKnownNotImmediate(op1))
1539                fail1.link(this);
1540            if (!m_codeBlock->isKnownNotImmediate(op2))
1541                fail2.link(this);
1542            fail3.link(this);
1543            fail4.link(this);
1544#endif
1545        }
1546
1547        linkSlowCase(iter);
1548        JITStubCall stubCall(this, cti_op_jless);
1549        stubCall.addArgument(regT0);
1550        stubCall.addArgument(regT1);
1551        stubCall.call();
1552        emitJumpSlowToHot(branchTest32(Zero, regT0), target);
1553    }
1554}
1555
1556void JIT::emit_op_jless(Instruction* currentInstruction)
1557{
1558    unsigned op1 = currentInstruction[1].u.operand;
1559    unsigned op2 = currentInstruction[2].u.operand;
1560    unsigned target = currentInstruction[3].u.operand;
1561
1562    // We generate inline code for the following cases in the fast path:
1563    // - int immediate to constant int immediate
1564    // - constant int immediate to int immediate
1565    // - int immediate to int immediate
1566
1567    if (isOperandConstantImmediateInt(op2)) {
1568        emitGetVirtualRegister(op1, regT0);
1569        emitJumpSlowCaseIfNotImmediateInteger(regT0);
1570#if USE(JSVALUE64)
1571        int32_t op2imm = getConstantOperandImmediateInt(op2);
1572#else
1573        int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
1574#endif
1575        addJump(branch32(LessThan, regT0, Imm32(op2imm)), target);
1576    } else if (isOperandConstantImmediateInt(op1)) {
1577        emitGetVirtualRegister(op2, regT1);
1578        emitJumpSlowCaseIfNotImmediateInteger(regT1);
1579#if USE(JSVALUE64)
1580        int32_t op1imm = getConstantOperandImmediateInt(op1);
1581#else
1582        int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
1583#endif
1584        addJump(branch32(GreaterThan, regT1, Imm32(op1imm)), target);
1585    } else {
1586        emitGetVirtualRegisters(op1, regT0, op2, regT1);
1587        emitJumpSlowCaseIfNotImmediateInteger(regT0);
1588        emitJumpSlowCaseIfNotImmediateInteger(regT1);
1589
1590        addJump(branch32(LessThan, regT0, regT1), target);
1591    }
1592}
1593
1594void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1595{
1596    unsigned op1 = currentInstruction[1].u.operand;
1597    unsigned op2 = currentInstruction[2].u.operand;
1598    unsigned target = currentInstruction[3].u.operand;
1599
1600    // We generate inline code for the following cases in the slow path:
1601    // - floating-point number to constant int immediate
1602    // - constant int immediate to floating-point number
1603    // - floating-point number to floating-point number.
1604
1605    if (isOperandConstantImmediateInt(op2)) {
1606        linkSlowCase(iter);
1607
1608        if (supportsFloatingPoint()) {
1609#if USE(JSVALUE64)
1610            Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
1611            addPtr(tagTypeNumberRegister, regT0);
1612            movePtrToDouble(regT0, fpRegT0);
1613#else
1614            Jump fail1;
1615            if (!m_codeBlock->isKnownNotImmediate(op1))
1616                fail1 = emitJumpIfNotJSCell(regT0);
1617
1618            Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
1619            loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1620#endif
1621
1622            int32_t op2imm = getConstantOperand(op2).asInt32();
1623
1624            move(Imm32(op2imm), regT1);
1625            convertInt32ToDouble(regT1, fpRegT1);
1626
1627            emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), target);
1628
1629            emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
1630
1631#if USE(JSVALUE64)
1632            fail1.link(this);
1633#else
1634            if (!m_codeBlock->isKnownNotImmediate(op1))
1635                fail1.link(this);
1636            fail2.link(this);
1637#endif
1638        }
1639
1640        JITStubCall stubCall(this, cti_op_jless);
1641        stubCall.addArgument(regT0);
1642        stubCall.addArgument(op2, regT2);
1643        stubCall.call();
1644        emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
1645
1646    } else if (isOperandConstantImmediateInt(op1)) {
1647        linkSlowCase(iter);
1648
1649        if (supportsFloatingPoint()) {
1650#if USE(JSVALUE64)
1651            Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
1652            addPtr(tagTypeNumberRegister, regT1);
1653            movePtrToDouble(regT1, fpRegT1);
1654#else
1655            Jump fail1;
1656            if (!m_codeBlock->isKnownNotImmediate(op2))
1657                fail1 = emitJumpIfNotJSCell(regT1);
1658
1659            Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
1660            loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
1661#endif
1662
1663            int32_t op1imm = getConstantOperand(op1).asInt32();
1664
1665            move(Imm32(op1imm), regT0);
1666            convertInt32ToDouble(regT0, fpRegT0);
1667
1668            emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), target);
1669
1670            emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
1671
1672#if USE(JSVALUE64)
1673            fail1.link(this);
1674#else
1675            if (!m_codeBlock->isKnownNotImmediate(op2))
1676                fail1.link(this);
1677            fail2.link(this);
1678#endif
1679        }
1680
1681        JITStubCall stubCall(this, cti_op_jless);
1682        stubCall.addArgument(op1, regT2);
1683        stubCall.addArgument(regT1);
1684        stubCall.call();
1685        emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
1686
1687    } else {
1688        linkSlowCase(iter);
1689
1690        if (supportsFloatingPoint()) {
1691#if USE(JSVALUE64)
1692            Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
1693            Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
1694            Jump fail3 = emitJumpIfImmediateInteger(regT1);
1695            addPtr(tagTypeNumberRegister, regT0);
1696            addPtr(tagTypeNumberRegister, regT1);
1697            movePtrToDouble(regT0, fpRegT0);
1698            movePtrToDouble(regT1, fpRegT1);
1699#else
1700            Jump fail1;
1701            if (!m_codeBlock->isKnownNotImmediate(op1))
1702                fail1 = emitJumpIfNotJSCell(regT0);
1703
1704            Jump fail2;
1705            if (!m_codeBlock->isKnownNotImmediate(op2))
1706                fail2 = emitJumpIfNotJSCell(regT1);
1707
1708            Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
1709            Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
1710            loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1711            loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
1712#endif
1713
1714            emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), target);
1715
1716            emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
1717
1718#if USE(JSVALUE64)
1719            fail1.link(this);
1720            fail2.link(this);
1721            fail3.link(this);
1722#else
1723            if (!m_codeBlock->isKnownNotImmediate(op1))
1724                fail1.link(this);
1725            if (!m_codeBlock->isKnownNotImmediate(op2))
1726                fail2.link(this);
1727            fail3.link(this);
1728            fail4.link(this);
1729#endif
1730        }
1731
1732        linkSlowCase(iter);
1733        JITStubCall stubCall(this, cti_op_jless);
1734        stubCall.addArgument(regT0);
1735        stubCall.addArgument(regT1);
1736        stubCall.call();
1737        emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
1738    }
1739}
1740
1741void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
1742{
1743    unsigned op1 = currentInstruction[1].u.operand;
1744    unsigned op2 = currentInstruction[2].u.operand;
1745    unsigned target = currentInstruction[3].u.operand;
1746
1747    // We generate inline code for the following cases in the fast path:
1748    // - int immediate to constant int immediate
1749    // - constant int immediate to int immediate
1750    // - int immediate to int immediate
1751
1752    if (isOperandConstantImmediateInt(op2)) {
1753        emitGetVirtualRegister(op1, regT0);
1754        emitJumpSlowCaseIfNotImmediateInteger(regT0);
1755#if USE(JSVALUE64)
1756        int32_t op2imm = getConstantOperandImmediateInt(op2);
1757#else
1758        int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
1759#endif
1760        addJump(branch32(GreaterThan, regT0, Imm32(op2imm)), target);
1761    } else if (isOperandConstantImmediateInt(op1)) {
1762        emitGetVirtualRegister(op2, regT1);
1763        emitJumpSlowCaseIfNotImmediateInteger(regT1);
1764#if USE(JSVALUE64)
1765        int32_t op1imm = getConstantOperandImmediateInt(op1);
1766#else
1767        int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
1768#endif
1769        addJump(branch32(LessThan, regT1, Imm32(op1imm)), target);
1770    } else {
1771        emitGetVirtualRegisters(op1, regT0, op2, regT1);
1772        emitJumpSlowCaseIfNotImmediateInteger(regT0);
1773        emitJumpSlowCaseIfNotImmediateInteger(regT1);
1774
1775        addJump(branch32(GreaterThan, regT0, regT1), target);
1776    }
1777}
1778
1779void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1780{
1781    unsigned op1 = currentInstruction[1].u.operand;
1782    unsigned op2 = currentInstruction[2].u.operand;
1783    unsigned target = currentInstruction[3].u.operand;
1784
1785    // We generate inline code for the following cases in the slow path:
1786    // - floating-point number to constant int immediate
1787    // - constant int immediate to floating-point number
1788    // - floating-point number to floating-point number.
1789
1790    if (isOperandConstantImmediateInt(op2)) {
1791        linkSlowCase(iter);
1792
1793        if (supportsFloatingPoint()) {
1794#if USE(JSVALUE64)
1795            Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
1796            addPtr(tagTypeNumberRegister, regT0);
1797            movePtrToDouble(regT0, fpRegT0);
1798#else
1799            Jump fail1;
1800            if (!m_codeBlock->isKnownNotImmediate(op1))
1801                fail1 = emitJumpIfNotJSCell(regT0);
1802
1803            Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
1804            loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1805#endif
1806
1807            int32_t op2imm = getConstantOperand(op2).asInt32();;
1808
1809            move(Imm32(op2imm), regT1);
1810            convertInt32ToDouble(regT1, fpRegT1);
1811
1812            emitJumpSlowToHot(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), target);
1813
1814            emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
1815
1816#if USE(JSVALUE64)
1817            fail1.link(this);
1818#else
1819            if (!m_codeBlock->isKnownNotImmediate(op1))
1820                fail1.link(this);
1821            fail2.link(this);
1822#endif
1823        }
1824
1825        JITStubCall stubCall(this, cti_op_jlesseq);
1826        stubCall.addArgument(regT0);
1827        stubCall.addArgument(op2, regT2);
1828        stubCall.call();
1829        emitJumpSlowToHot(branchTest32(Zero, regT0), target);
1830
1831    } else if (isOperandConstantImmediateInt(op1)) {
1832        linkSlowCase(iter);
1833
1834        if (supportsFloatingPoint()) {
1835#if USE(JSVALUE64)
1836            Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
1837            addPtr(tagTypeNumberRegister, regT1);
1838            movePtrToDouble(regT1, fpRegT1);
1839#else
1840            Jump fail1;
1841            if (!m_codeBlock->isKnownNotImmediate(op2))
1842                fail1 = emitJumpIfNotJSCell(regT1);
1843
1844            Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
1845            loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
1846#endif
1847
1848            int32_t op1imm = getConstantOperand(op1).asInt32();;
1849
1850            move(Imm32(op1imm), regT0);
1851            convertInt32ToDouble(regT0, fpRegT0);
1852
1853            emitJumpSlowToHot(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), target);
1854
1855            emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
1856
1857#if USE(JSVALUE64)
1858            fail1.link(this);
1859#else
1860            if (!m_codeBlock->isKnownNotImmediate(op2))
1861                fail1.link(this);
1862            fail2.link(this);
1863#endif
1864        }
1865
1866        JITStubCall stubCall(this, cti_op_jlesseq);
1867        stubCall.addArgument(op1, regT2);
1868        stubCall.addArgument(regT1);
1869        stubCall.call();
1870        emitJumpSlowToHot(branchTest32(Zero, regT0), target);
1871
1872    } else {
1873        linkSlowCase(iter);
1874
1875        if (supportsFloatingPoint()) {
1876#if USE(JSVALUE64)
1877            Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
1878            Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
1879            Jump fail3 = emitJumpIfImmediateInteger(regT1);
1880            addPtr(tagTypeNumberRegister, regT0);
1881            addPtr(tagTypeNumberRegister, regT1);
1882            movePtrToDouble(regT0, fpRegT0);
1883            movePtrToDouble(regT1, fpRegT1);
1884#else
1885            Jump fail1;
1886            if (!m_codeBlock->isKnownNotImmediate(op1))
1887                fail1 = emitJumpIfNotJSCell(regT0);
1888
1889            Jump fail2;
1890            if (!m_codeBlock->isKnownNotImmediate(op2))
1891                fail2 = emitJumpIfNotJSCell(regT1);
1892
1893            Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
1894            Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
1895            loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1896            loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
1897#endif
1898
1899            emitJumpSlowToHot(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), target);
1900
1901            emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
1902
1903#if USE(JSVALUE64)
1904            fail1.link(this);
1905            fail2.link(this);
1906            fail3.link(this);
1907#else
1908            if (!m_codeBlock->isKnownNotImmediate(op1))
1909                fail1.link(this);
1910            if (!m_codeBlock->isKnownNotImmediate(op2))
1911                fail2.link(this);
1912            fail3.link(this);
1913            fail4.link(this);
1914#endif
1915        }
1916
1917        linkSlowCase(iter);
1918        JITStubCall stubCall(this, cti_op_jlesseq);
1919        stubCall.addArgument(regT0);
1920        stubCall.addArgument(regT1);
1921        stubCall.call();
1922        emitJumpSlowToHot(branchTest32(Zero, regT0), target);
1923    }
1924}
1925
1926void JIT::emit_op_bitand(Instruction* currentInstruction)
1927{
1928    unsigned result = currentInstruction[1].u.operand;
1929    unsigned op1 = currentInstruction[2].u.operand;
1930    unsigned op2 = currentInstruction[3].u.operand;
1931
1932    if (isOperandConstantImmediateInt(op1)) {
1933        emitGetVirtualRegister(op2, regT0);
1934        emitJumpSlowCaseIfNotImmediateInteger(regT0);
1935#if USE(JSVALUE64)
1936        int32_t imm = getConstantOperandImmediateInt(op1);
1937        andPtr(Imm32(imm), regT0);
1938        if (imm >= 0)
1939            emitFastArithIntToImmNoCheck(regT0, regT0);
1940#else
1941        andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)))), regT0);
1942#endif
1943    } else if (isOperandConstantImmediateInt(op2)) {
1944        emitGetVirtualRegister(op1, regT0);
1945        emitJumpSlowCaseIfNotImmediateInteger(regT0);
1946#if USE(JSVALUE64)
1947        int32_t imm = getConstantOperandImmediateInt(op2);
1948        andPtr(Imm32(imm), regT0);
1949        if (imm >= 0)
1950            emitFastArithIntToImmNoCheck(regT0, regT0);
1951#else
1952        andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)))), regT0);
1953#endif
1954    } else {
1955        emitGetVirtualRegisters(op1, regT0, op2, regT1);
1956        andPtr(regT1, regT0);
1957        emitJumpSlowCaseIfNotImmediateInteger(regT0);
1958    }
1959    emitPutVirtualRegister(result);
1960}
1961
1962void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1963{
1964    unsigned result = currentInstruction[1].u.operand;
1965    unsigned op1 = currentInstruction[2].u.operand;
1966    unsigned op2 = currentInstruction[3].u.operand;
1967
1968    linkSlowCase(iter);
1969    if (isOperandConstantImmediateInt(op1)) {
1970        JITStubCall stubCall(this, cti_op_bitand);
1971        stubCall.addArgument(op1, regT2);
1972        stubCall.addArgument(regT0);
1973        stubCall.call(result);
1974    } else if (isOperandConstantImmediateInt(op2)) {
1975        JITStubCall stubCall(this, cti_op_bitand);
1976        stubCall.addArgument(regT0);
1977        stubCall.addArgument(op2, regT2);
1978        stubCall.call(result);
1979    } else {
1980        JITStubCall stubCall(this, cti_op_bitand);
1981        stubCall.addArgument(op1, regT2);
1982        stubCall.addArgument(regT1);
1983        stubCall.call(result);
1984    }
1985}
1986
1987void JIT::emit_op_post_inc(Instruction* currentInstruction)
1988{
1989    unsigned result = currentInstruction[1].u.operand;
1990    unsigned srcDst = currentInstruction[2].u.operand;
1991
1992    emitGetVirtualRegister(srcDst, regT0);
1993    move(regT0, regT1);
1994    emitJumpSlowCaseIfNotImmediateInteger(regT0);
1995#if USE(JSVALUE64)
1996    addSlowCase(branchAdd32(Overflow, Imm32(1), regT1));
1997    emitFastArithIntToImmNoCheck(regT1, regT1);
1998#else
1999    addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
2000    signExtend32ToPtr(regT1, regT1);
2001#endif
2002    emitPutVirtualRegister(srcDst, regT1);
2003    emitPutVirtualRegister(result);
2004}
2005
2006void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2007{
2008    unsigned result = currentInstruction[1].u.operand;
2009    unsigned srcDst = currentInstruction[2].u.operand;
2010
2011    linkSlowCase(iter);
2012    linkSlowCase(iter);
2013    JITStubCall stubCall(this, cti_op_post_inc);
2014    stubCall.addArgument(regT0);
2015    stubCall.addArgument(Imm32(srcDst));
2016    stubCall.call(result);
2017}
2018
2019void JIT::emit_op_post_dec(Instruction* currentInstruction)
2020{
2021    unsigned result = currentInstruction[1].u.operand;
2022    unsigned srcDst = currentInstruction[2].u.operand;
2023
2024    emitGetVirtualRegister(srcDst, regT0);
2025    move(regT0, regT1);
2026    emitJumpSlowCaseIfNotImmediateInteger(regT0);
2027#if USE(JSVALUE64)
2028    addSlowCase(branchSub32(Zero, Imm32(1), regT1));
2029    emitFastArithIntToImmNoCheck(regT1, regT1);
2030#else
2031    addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
2032    signExtend32ToPtr(regT1, regT1);
2033#endif
2034    emitPutVirtualRegister(srcDst, regT1);
2035    emitPutVirtualRegister(result);
2036}
2037
2038void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2039{
2040    unsigned result = currentInstruction[1].u.operand;
2041    unsigned srcDst = currentInstruction[2].u.operand;
2042
2043    linkSlowCase(iter);
2044    linkSlowCase(iter);
2045    JITStubCall stubCall(this, cti_op_post_dec);
2046    stubCall.addArgument(regT0);
2047    stubCall.addArgument(Imm32(srcDst));
2048    stubCall.call(result);
2049}
2050
2051void JIT::emit_op_pre_inc(Instruction* currentInstruction)
2052{
2053    unsigned srcDst = currentInstruction[1].u.operand;
2054
2055    emitGetVirtualRegister(srcDst, regT0);
2056    emitJumpSlowCaseIfNotImmediateInteger(regT0);
2057#if USE(JSVALUE64)
2058    addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
2059    emitFastArithIntToImmNoCheck(regT0, regT0);
2060#else
2061    addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
2062    signExtend32ToPtr(regT0, regT0);
2063#endif
2064    emitPutVirtualRegister(srcDst);
2065}
2066
2067void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2068{
2069    unsigned srcDst = currentInstruction[1].u.operand;
2070
2071    Jump notImm = getSlowCase(iter);
2072    linkSlowCase(iter);
2073    emitGetVirtualRegister(srcDst, regT0);
2074    notImm.link(this);
2075    JITStubCall stubCall(this, cti_op_pre_inc);
2076    stubCall.addArgument(regT0);
2077    stubCall.call(srcDst);
2078}
2079
2080void JIT::emit_op_pre_dec(Instruction* currentInstruction)
2081{
2082    unsigned srcDst = currentInstruction[1].u.operand;
2083
2084    emitGetVirtualRegister(srcDst, regT0);
2085    emitJumpSlowCaseIfNotImmediateInteger(regT0);
2086#if USE(JSVALUE64)
2087    addSlowCase(branchSub32(Zero, Imm32(1), regT0));
2088    emitFastArithIntToImmNoCheck(regT0, regT0);
2089#else
2090    addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
2091    signExtend32ToPtr(regT0, regT0);
2092#endif
2093    emitPutVirtualRegister(srcDst);
2094}
2095
2096void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2097{
2098    unsigned srcDst = currentInstruction[1].u.operand;
2099
2100    Jump notImm = getSlowCase(iter);
2101    linkSlowCase(iter);
2102    emitGetVirtualRegister(srcDst, regT0);
2103    notImm.link(this);
2104    JITStubCall stubCall(this, cti_op_pre_dec);
2105    stubCall.addArgument(regT0);
2106    stubCall.call(srcDst);
2107}
2108
2109/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
2110
2111#if CPU(X86) || CPU(X86_64)
2112
2113void JIT::emit_op_mod(Instruction* currentInstruction)
2114{
2115    unsigned result = currentInstruction[1].u.operand;
2116    unsigned op1 = currentInstruction[2].u.operand;
2117    unsigned op2 = currentInstruction[3].u.operand;
2118
2119    emitGetVirtualRegisters(op1, X86Registers::eax, op2, X86Registers::ecx);
2120    emitJumpSlowCaseIfNotImmediateInteger(X86Registers::eax);
2121    emitJumpSlowCaseIfNotImmediateInteger(X86Registers::ecx);
2122#if USE(JSVALUE64)
2123    addSlowCase(branchPtr(Equal, X86Registers::ecx, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))));
2124    m_assembler.cdq();
2125    m_assembler.idivl_r(X86Registers::ecx);
2126#else
2127    emitFastArithDeTagImmediate(X86Registers::eax);
2128    addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86Registers::ecx));
2129    m_assembler.cdq();
2130    m_assembler.idivl_r(X86Registers::ecx);
2131    signExtend32ToPtr(X86Registers::edx, X86Registers::edx);
2132#endif
2133    emitFastArithReTagImmediate(X86Registers::edx, X86Registers::eax);
2134    emitPutVirtualRegister(result);
2135}
2136
2137void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2138{
2139    unsigned result = currentInstruction[1].u.operand;
2140
2141#if USE(JSVALUE64)
2142    linkSlowCase(iter);
2143    linkSlowCase(iter);
2144    linkSlowCase(iter);
2145#else
2146    Jump notImm1 = getSlowCase(iter);
2147    Jump notImm2 = getSlowCase(iter);
2148    linkSlowCase(iter);
2149    emitFastArithReTagImmediate(X86Registers::eax, X86Registers::eax);
2150    emitFastArithReTagImmediate(X86Registers::ecx, X86Registers::ecx);
2151    notImm1.link(this);
2152    notImm2.link(this);
2153#endif
2154    JITStubCall stubCall(this, cti_op_mod);
2155    stubCall.addArgument(X86Registers::eax);
2156    stubCall.addArgument(X86Registers::ecx);
2157    stubCall.call(result);
2158}
2159
2160#else // CPU(X86) || CPU(X86_64)
2161
2162void JIT::emit_op_mod(Instruction* currentInstruction)
2163{
2164    unsigned result = currentInstruction[1].u.operand;
2165    unsigned op1 = currentInstruction[2].u.operand;
2166    unsigned op2 = currentInstruction[3].u.operand;
2167
2168#if ENABLE(JIT_OPTIMIZE_MOD)
2169    emitGetVirtualRegisters(op1, regT0, op2, regT2);
2170    emitJumpSlowCaseIfNotImmediateInteger(regT0);
2171    emitJumpSlowCaseIfNotImmediateInteger(regT2);
2172
2173    addSlowCase(branch32(Equal, regT2, Imm32(1)));
2174
2175    emitNakedCall(m_globalData->jitStubs.ctiSoftModulo());
2176
2177    emitPutVirtualRegister(result, regT0);
2178#else
2179    JITStubCall stubCall(this, cti_op_mod);
2180    stubCall.addArgument(op1, regT2);
2181    stubCall.addArgument(op2, regT2);
2182    stubCall.call(result);
2183#endif
2184}
2185
2186void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2187{
2188#if ENABLE(JIT_OPTIMIZE_MOD)
2189    unsigned result = currentInstruction[1].u.operand;
2190    unsigned op1 = currentInstruction[2].u.operand;
2191    unsigned op2 = currentInstruction[3].u.operand;
2192    linkSlowCase(iter);
2193    linkSlowCase(iter);
2194    linkSlowCase(iter);
2195    JITStubCall stubCall(this, cti_op_mod);
2196    stubCall.addArgument(op1, regT2);
2197    stubCall.addArgument(op2, regT2);
2198    stubCall.call(result);
2199#else
2200    ASSERT_NOT_REACHED();
2201#endif
2202}
2203
2204#endif // CPU(X86) || CPU(X86_64)
2205
2206/* ------------------------------ END: OP_MOD ------------------------------ */
2207
2208#if USE(JSVALUE64)
2209
2210/* ------------------------------ BEGIN: USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
2211
2212void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsigned op2, OperandTypes)
2213{
2214    emitGetVirtualRegisters(op1, regT0, op2, regT1);
2215    emitJumpSlowCaseIfNotImmediateInteger(regT0);
2216    emitJumpSlowCaseIfNotImmediateInteger(regT1);
2217    if (opcodeID == op_add)
2218        addSlowCase(branchAdd32(Overflow, regT1, regT0));
2219    else if (opcodeID == op_sub)
2220        addSlowCase(branchSub32(Overflow, regT1, regT0));
2221    else {
2222        ASSERT(opcodeID == op_mul);
2223        addSlowCase(branchMul32(Overflow, regT1, regT0));
2224        addSlowCase(branchTest32(Zero, regT0));
2225    }
2226    emitFastArithIntToImmNoCheck(regT0, regT0);
2227}
2228
2229void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned result, unsigned op1, unsigned op2, OperandTypes types, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase)
2230{
2231    // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
2232    COMPILE_ASSERT(((JSImmediate::TagTypeNumber + JSImmediate::DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0);
2233
2234    Jump notImm1;
2235    Jump notImm2;
2236    if (op1HasImmediateIntFastCase) {
2237        notImm2 = getSlowCase(iter);
2238    } else if (op2HasImmediateIntFastCase) {
2239        notImm1 = getSlowCase(iter);
2240    } else {
2241        notImm1 = getSlowCase(iter);
2242        notImm2 = getSlowCase(iter);
2243    }
2244
2245    linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
2246    if (opcodeID == op_mul && !op1HasImmediateIntFastCase && !op2HasImmediateIntFastCase) // op_mul has an extra slow case to handle 0 * negative number.
2247        linkSlowCase(iter);
2248    emitGetVirtualRegister(op1, regT0);
2249
2250    Label stubFunctionCall(this);
2251    JITStubCall stubCall(this, opcodeID == op_add ? cti_op_add : opcodeID == op_sub ? cti_op_sub : cti_op_mul);
2252    if (op1HasImmediateIntFastCase || op2HasImmediateIntFastCase) {
2253        emitGetVirtualRegister(op1, regT0);
2254        emitGetVirtualRegister(op2, regT1);
2255    }
2256    stubCall.addArgument(regT0);
2257    stubCall.addArgument(regT1);
2258    stubCall.call(result);
2259    Jump end = jump();
2260
2261    if (op1HasImmediateIntFastCase) {
2262        notImm2.link(this);
2263        if (!types.second().definitelyIsNumber())
2264            emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
2265        emitGetVirtualRegister(op1, regT1);
2266        convertInt32ToDouble(regT1, fpRegT1);
2267        addPtr(tagTypeNumberRegister, regT0);
2268        movePtrToDouble(regT0, fpRegT2);
2269    } else if (op2HasImmediateIntFastCase) {
2270        notImm1.link(this);
2271        if (!types.first().definitelyIsNumber())
2272            emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
2273        emitGetVirtualRegister(op2, regT1);
2274        convertInt32ToDouble(regT1, fpRegT1);
2275        addPtr(tagTypeNumberRegister, regT0);
2276        movePtrToDouble(regT0, fpRegT2);
2277    } else {
2278        // if we get here, eax is not an int32, edx not yet checked.
2279        notImm1.link(this);
2280        if (!types.first().definitelyIsNumber())
2281            emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
2282        if (!types.second().definitelyIsNumber())
2283            emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
2284        addPtr(tagTypeNumberRegister, regT0);
2285        movePtrToDouble(regT0, fpRegT1);
2286        Jump op2isDouble = emitJumpIfNotImmediateInteger(regT1);
2287        convertInt32ToDouble(regT1, fpRegT2);
2288        Jump op2wasInteger = jump();
2289
2290        // if we get here, eax IS an int32, edx is not.
2291        notImm2.link(this);
2292        if (!types.second().definitelyIsNumber())
2293            emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
2294        convertInt32ToDouble(regT0, fpRegT1);
2295        op2isDouble.link(this);
2296        addPtr(tagTypeNumberRegister, regT1);
2297        movePtrToDouble(regT1, fpRegT2);
2298        op2wasInteger.link(this);
2299    }
2300
2301    if (opcodeID == op_add)
2302        addDouble(fpRegT2, fpRegT1);
2303    else if (opcodeID == op_sub)
2304        subDouble(fpRegT2, fpRegT1);
2305    else if (opcodeID == op_mul)
2306        mulDouble(fpRegT2, fpRegT1);
2307    else {
2308        ASSERT(opcodeID == op_div);
2309        divDouble(fpRegT2, fpRegT1);
2310    }
2311    moveDoubleToPtr(fpRegT1, regT0);
2312    subPtr(tagTypeNumberRegister, regT0);
2313    emitPutVirtualRegister(result, regT0);
2314
2315    end.link(this);
2316}
2317
2318void JIT::emit_op_add(Instruction* currentInstruction)
2319{
2320    unsigned result = currentInstruction[1].u.operand;
2321    unsigned op1 = currentInstruction[2].u.operand;
2322    unsigned op2 = currentInstruction[3].u.operand;
2323    OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2324
2325    if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
2326        JITStubCall stubCall(this, cti_op_add);
2327        stubCall.addArgument(op1, regT2);
2328        stubCall.addArgument(op2, regT2);
2329        stubCall.call(result);
2330        return;
2331    }
2332
2333    if (isOperandConstantImmediateInt(op1)) {
2334        emitGetVirtualRegister(op2, regT0);
2335        emitJumpSlowCaseIfNotImmediateInteger(regT0);
2336        addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1)), regT0));
2337        emitFastArithIntToImmNoCheck(regT0, regT0);
2338    } else if (isOperandConstantImmediateInt(op2)) {
2339        emitGetVirtualRegister(op1, regT0);
2340        emitJumpSlowCaseIfNotImmediateInteger(regT0);
2341        addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2)), regT0));
2342        emitFastArithIntToImmNoCheck(regT0, regT0);
2343    } else
2344        compileBinaryArithOp(op_add, result, op1, op2, types);
2345
2346    emitPutVirtualRegister(result);
2347}
2348
2349void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2350{
2351    unsigned result = currentInstruction[1].u.operand;
2352    unsigned op1 = currentInstruction[2].u.operand;
2353    unsigned op2 = currentInstruction[3].u.operand;
2354    OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2355
2356    if (!types.first().mightBeNumber() || !types.second().mightBeNumber())
2357        return;
2358
2359    bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1);
2360    bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2);
2361    compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand), op1HasImmediateIntFastCase, op2HasImmediateIntFastCase);
2362}
2363
2364void JIT::emit_op_mul(Instruction* currentInstruction)
2365{
2366    unsigned result = currentInstruction[1].u.operand;
2367    unsigned op1 = currentInstruction[2].u.operand;
2368    unsigned op2 = currentInstruction[3].u.operand;
2369    OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2370
2371    // For now, only plant a fast int case if the constant operand is greater than zero.
2372    int32_t value;
2373    if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
2374        emitGetVirtualRegister(op2, regT0);
2375        emitJumpSlowCaseIfNotImmediateInteger(regT0);
2376        addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
2377        emitFastArithReTagImmediate(regT0, regT0);
2378    } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
2379        emitGetVirtualRegister(op1, regT0);
2380        emitJumpSlowCaseIfNotImmediateInteger(regT0);
2381        addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
2382        emitFastArithReTagImmediate(regT0, regT0);
2383    } else
2384        compileBinaryArithOp(op_mul, result, op1, op2, types);
2385
2386    emitPutVirtualRegister(result);
2387}
2388
2389void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2390{
2391    unsigned result = currentInstruction[1].u.operand;
2392    unsigned op1 = currentInstruction[2].u.operand;
2393    unsigned op2 = currentInstruction[3].u.operand;
2394    OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2395
2396    bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1) && getConstantOperandImmediateInt(op1) > 0;
2397    bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2) && getConstantOperandImmediateInt(op2) > 0;
2398    compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand), op1HasImmediateIntFastCase, op2HasImmediateIntFastCase);
2399}
2400
2401void JIT::emit_op_div(Instruction* currentInstruction)
2402{
2403    unsigned dst = currentInstruction[1].u.operand;
2404    unsigned op1 = currentInstruction[2].u.operand;
2405    unsigned op2 = currentInstruction[3].u.operand;
2406    OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2407
2408    if (isOperandConstantImmediateDouble(op1)) {
2409        emitGetVirtualRegister(op1, regT0);
2410        addPtr(tagTypeNumberRegister, regT0);
2411        movePtrToDouble(regT0, fpRegT0);
2412    } else if (isOperandConstantImmediateInt(op1)) {
2413        emitLoadInt32ToDouble(op1, fpRegT0);
2414    } else {
2415        emitGetVirtualRegister(op1, regT0);
2416        if (!types.first().definitelyIsNumber())
2417            emitJumpSlowCaseIfNotImmediateNumber(regT0);
2418        Jump notInt = emitJumpIfNotImmediateInteger(regT0);
2419        convertInt32ToDouble(regT0, fpRegT0);
2420        Jump skipDoubleLoad = jump();
2421        notInt.link(this);
2422        addPtr(tagTypeNumberRegister, regT0);
2423        movePtrToDouble(regT0, fpRegT0);
2424        skipDoubleLoad.link(this);
2425    }
2426
2427    if (isOperandConstantImmediateDouble(op2)) {
2428        emitGetVirtualRegister(op2, regT1);
2429        addPtr(tagTypeNumberRegister, regT1);
2430        movePtrToDouble(regT1, fpRegT1);
2431    } else if (isOperandConstantImmediateInt(op2)) {
2432        emitLoadInt32ToDouble(op2, fpRegT1);
2433    } else {
2434        emitGetVirtualRegister(op2, regT1);
2435        if (!types.second().definitelyIsNumber())
2436            emitJumpSlowCaseIfNotImmediateNumber(regT1);
2437        Jump notInt = emitJumpIfNotImmediateInteger(regT1);
2438        convertInt32ToDouble(regT1, fpRegT1);
2439        Jump skipDoubleLoad = jump();
2440        notInt.link(this);
2441        addPtr(tagTypeNumberRegister, regT1);
2442        movePtrToDouble(regT1, fpRegT1);
2443        skipDoubleLoad.link(this);
2444    }
2445    divDouble(fpRegT1, fpRegT0);
2446
2447    // Double result.
2448    moveDoubleToPtr(fpRegT0, regT0);
2449    subPtr(tagTypeNumberRegister, regT0);
2450
2451    emitPutVirtualRegister(dst, regT0);
2452}
2453
2454void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2455{
2456    unsigned result = currentInstruction[1].u.operand;
2457    unsigned op1 = currentInstruction[2].u.operand;
2458    unsigned op2 = currentInstruction[3].u.operand;
2459    OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2460    if (types.first().definitelyIsNumber() && types.second().definitelyIsNumber()) {
2461#ifndef NDEBUG
2462        breakpoint();
2463#endif
2464        return;
2465    }
2466    if (!isOperandConstantImmediateDouble(op1) && !isOperandConstantImmediateInt(op1)) {
2467        if (!types.first().definitelyIsNumber())
2468            linkSlowCase(iter);
2469    }
2470    if (!isOperandConstantImmediateDouble(op2) && !isOperandConstantImmediateInt(op2)) {
2471        if (!types.second().definitelyIsNumber())
2472            linkSlowCase(iter);
2473    }
2474    // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
2475    JITStubCall stubCall(this, cti_op_div);
2476    stubCall.addArgument(op1, regT2);
2477    stubCall.addArgument(op2, regT2);
2478    stubCall.call(result);
2479}
2480
2481void JIT::emit_op_sub(Instruction* currentInstruction)
2482{
2483    unsigned result = currentInstruction[1].u.operand;
2484    unsigned op1 = currentInstruction[2].u.operand;
2485    unsigned op2 = currentInstruction[3].u.operand;
2486    OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2487
2488    compileBinaryArithOp(op_sub, result, op1, op2, types);
2489    emitPutVirtualRegister(result);
2490}
2491
2492void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2493{
2494    unsigned result = currentInstruction[1].u.operand;
2495    unsigned op1 = currentInstruction[2].u.operand;
2496    unsigned op2 = currentInstruction[3].u.operand;
2497    OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2498
2499    compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types, false, false);
2500}
2501
2502#else // USE(JSVALUE64)
2503
2504/* ------------------------------ BEGIN: !USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
2505
2506void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
2507{
2508    Structure* numberStructure = m_globalData->numberStructure.get();
2509    Jump wasJSNumberCell1;
2510    Jump wasJSNumberCell2;
2511
2512    emitGetVirtualRegisters(src1, regT0, src2, regT1);
2513
2514    if (types.second().isReusable() && supportsFloatingPoint()) {
2515        ASSERT(types.second().mightBeNumber());
2516
2517        // Check op2 is a number
2518        Jump op2imm = emitJumpIfImmediateInteger(regT1);
2519        if (!types.second().definitelyIsNumber()) {
2520            emitJumpSlowCaseIfNotJSCell(regT1, src2);
2521            addSlowCase(checkStructure(regT1, numberStructure));
2522        }
2523
2524        // (1) In this case src2 is a reusable number cell.
2525        //     Slow case if src1 is not a number type.
2526        Jump op1imm = emitJumpIfImmediateInteger(regT0);
2527        if (!types.first().definitelyIsNumber()) {
2528            emitJumpSlowCaseIfNotJSCell(regT0, src1);
2529            addSlowCase(checkStructure(regT0, numberStructure));
2530        }
2531
2532        // (1a) if we get here, src1 is also a number cell
2533        loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
2534        Jump loadedDouble = jump();
2535        // (1b) if we get here, src1 is an immediate
2536        op1imm.link(this);
2537        emitFastArithImmToInt(regT0);
2538        convertInt32ToDouble(regT0, fpRegT0);
2539        // (1c)
2540        loadedDouble.link(this);
2541        if (opcodeID == op_add)
2542            addDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
2543        else if (opcodeID == op_sub)
2544            subDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
2545        else {
2546            ASSERT(opcodeID == op_mul);
2547            mulDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
2548        }
2549
2550        // Store the result to the JSNumberCell and jump.
2551        storeDouble(fpRegT0, Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)));
2552        move(regT1, regT0);
2553        emitPutVirtualRegister(dst);
2554        wasJSNumberCell2 = jump();
2555
2556        // (2) This handles cases where src2 is an immediate number.
2557        //     Two slow cases - either src1 isn't an immediate, or the subtract overflows.
2558        op2imm.link(this);
2559        emitJumpSlowCaseIfNotImmediateInteger(regT0);
2560    } else if (types.first().isReusable() && supportsFloatingPoint()) {
2561        ASSERT(types.first().mightBeNumber());
2562
2563        // Check op1 is a number
2564        Jump op1imm = emitJumpIfImmediateInteger(regT0);
2565        if (!types.first().definitelyIsNumber()) {
2566            emitJumpSlowCaseIfNotJSCell(regT0, src1);
2567            addSlowCase(checkStructure(regT0, numberStructure));
2568        }
2569
2570        // (1) In this case src1 is a reusable number cell.
2571        //     Slow case if src2 is not a number type.
2572        Jump op2imm = emitJumpIfImmediateInteger(regT1);
2573        if (!types.second().definitelyIsNumber()) {
2574            emitJumpSlowCaseIfNotJSCell(regT1, src2);
2575            addSlowCase(checkStructure(regT1, numberStructure));
2576        }
2577
2578        // (1a) if we get here, src2 is also a number cell
2579        loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
2580        Jump loadedDouble = jump();
2581        // (1b) if we get here, src2 is an immediate
2582        op2imm.link(this);
2583        emitFastArithImmToInt(regT1);
2584        convertInt32ToDouble(regT1, fpRegT1);
2585        // (1c)
2586        loadedDouble.link(this);
2587        loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
2588        if (opcodeID == op_add)
2589            addDouble(fpRegT1, fpRegT0);
2590        else if (opcodeID == op_sub)
2591            subDouble(fpRegT1, fpRegT0);
2592        else {
2593            ASSERT(opcodeID == op_mul);
2594            mulDouble(fpRegT1, fpRegT0);
2595        }
2596        storeDouble(fpRegT0, Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)));
2597        emitPutVirtualRegister(dst);
2598
2599        // Store the result to the JSNumberCell and jump.
2600        storeDouble(fpRegT0, Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)));
2601        emitPutVirtualRegister(dst);
2602        wasJSNumberCell1 = jump();
2603
2604        // (2) This handles cases where src1 is an immediate number.
2605        //     Two slow cases - either src2 isn't an immediate, or the subtract overflows.
2606        op1imm.link(this);
2607        emitJumpSlowCaseIfNotImmediateInteger(regT1);
2608    } else
2609        emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
2610
2611    if (opcodeID == op_add) {
2612        emitFastArithDeTagImmediate(regT0);
2613        addSlowCase(branchAdd32(Overflow, regT1, regT0));
2614    } else  if (opcodeID == op_sub) {
2615        addSlowCase(branchSub32(Overflow, regT1, regT0));
2616        signExtend32ToPtr(regT0, regT0);
2617        emitFastArithReTagImmediate(regT0, regT0);
2618    } else {
2619        ASSERT(opcodeID == op_mul);
2620        // convert eax & edx from JSImmediates to ints, and check if either are zero
2621        emitFastArithImmToInt(regT1);
2622        Jump op1Zero = emitFastArithDeTagImmediateJumpIfZero(regT0);
2623        Jump op2NonZero = branchTest32(NonZero, regT1);
2624        op1Zero.link(this);
2625        // if either input is zero, add the two together, and check if the result is < 0.
2626        // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate.
2627        move(regT0, regT2);
2628        addSlowCase(branchAdd32(Signed, regT1, regT2));
2629        // Skip the above check if neither input is zero
2630        op2NonZero.link(this);
2631        addSlowCase(branchMul32(Overflow, regT1, regT0));
2632        signExtend32ToPtr(regT0, regT0);
2633        emitFastArithReTagImmediate(regT0, regT0);
2634    }
2635    emitPutVirtualRegister(dst);
2636
2637    if (types.second().isReusable() && supportsFloatingPoint())
2638        wasJSNumberCell2.link(this);
2639    else if (types.first().isReusable() && supportsFloatingPoint())
2640        wasJSNumberCell1.link(this);
2641}
2642
2643void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
2644{
2645    linkSlowCase(iter);
2646    if (types.second().isReusable() && supportsFloatingPoint()) {
2647        if (!types.first().definitelyIsNumber()) {
2648            linkSlowCaseIfNotJSCell(iter, src1);
2649            linkSlowCase(iter);
2650        }
2651        if (!types.second().definitelyIsNumber()) {
2652            linkSlowCaseIfNotJSCell(iter, src2);
2653            linkSlowCase(iter);
2654        }
2655    } else if (types.first().isReusable() && supportsFloatingPoint()) {
2656        if (!types.first().definitelyIsNumber()) {
2657            linkSlowCaseIfNotJSCell(iter, src1);
2658            linkSlowCase(iter);
2659        }
2660        if (!types.second().definitelyIsNumber()) {
2661            linkSlowCaseIfNotJSCell(iter, src2);
2662            linkSlowCase(iter);
2663        }
2664    }
2665    linkSlowCase(iter);
2666
2667    // additional entry point to handle -0 cases.
2668    if (opcodeID == op_mul)
2669        linkSlowCase(iter);
2670
2671    JITStubCall stubCall(this, opcodeID == op_add ? cti_op_add : opcodeID == op_sub ? cti_op_sub : cti_op_mul);
2672    stubCall.addArgument(src1, regT2);
2673    stubCall.addArgument(src2, regT2);
2674    stubCall.call(dst);
2675}
2676
2677void JIT::emit_op_add(Instruction* currentInstruction)
2678{
2679    unsigned result = currentInstruction[1].u.operand;
2680    unsigned op1 = currentInstruction[2].u.operand;
2681    unsigned op2 = currentInstruction[3].u.operand;
2682    OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2683
2684    if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
2685        JITStubCall stubCall(this, cti_op_add);
2686        stubCall.addArgument(op1, regT2);
2687        stubCall.addArgument(op2, regT2);
2688        stubCall.call(result);
2689        return;
2690    }
2691
2692    if (isOperandConstantImmediateInt(op1)) {
2693        emitGetVirtualRegister(op2, regT0);
2694        emitJumpSlowCaseIfNotImmediateInteger(regT0);
2695        addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), regT0));
2696        signExtend32ToPtr(regT0, regT0);
2697        emitPutVirtualRegister(result);
2698    } else if (isOperandConstantImmediateInt(op2)) {
2699        emitGetVirtualRegister(op1, regT0);
2700        emitJumpSlowCaseIfNotImmediateInteger(regT0);
2701        addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), regT0));
2702        signExtend32ToPtr(regT0, regT0);
2703        emitPutVirtualRegister(result);
2704    } else {
2705        compileBinaryArithOp(op_add, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
2706    }
2707}
2708
2709void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2710{
2711    unsigned result = currentInstruction[1].u.operand;
2712    unsigned op1 = currentInstruction[2].u.operand;
2713    unsigned op2 = currentInstruction[3].u.operand;
2714
2715    OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2716    if (!types.first().mightBeNumber() || !types.second().mightBeNumber())
2717        return;
2718
2719    if (isOperandConstantImmediateInt(op1)) {
2720        Jump notImm = getSlowCase(iter);
2721        linkSlowCase(iter);
2722        sub32(Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), regT0);
2723        notImm.link(this);
2724        JITStubCall stubCall(this, cti_op_add);
2725        stubCall.addArgument(op1, regT2);
2726        stubCall.addArgument(regT0);
2727        stubCall.call(result);
2728    } else if (isOperandConstantImmediateInt(op2)) {
2729        Jump notImm = getSlowCase(iter);
2730        linkSlowCase(iter);
2731        sub32(Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), regT0);
2732        notImm.link(this);
2733        JITStubCall stubCall(this, cti_op_add);
2734        stubCall.addArgument(regT0);
2735        stubCall.addArgument(op2, regT2);
2736        stubCall.call(result);
2737    } else {
2738        OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2739        ASSERT(types.first().mightBeNumber() && types.second().mightBeNumber());
2740        compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types);
2741    }
2742}
2743
2744void JIT::emit_op_mul(Instruction* currentInstruction)
2745{
2746    unsigned result = currentInstruction[1].u.operand;
2747    unsigned op1 = currentInstruction[2].u.operand;
2748    unsigned op2 = currentInstruction[3].u.operand;
2749
2750    // For now, only plant a fast int case if the constant operand is greater than zero.
2751    int32_t value;
2752    if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
2753        emitGetVirtualRegister(op2, regT0);
2754        emitJumpSlowCaseIfNotImmediateInteger(regT0);
2755        emitFastArithDeTagImmediate(regT0);
2756        addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
2757        signExtend32ToPtr(regT0, regT0);
2758        emitFastArithReTagImmediate(regT0, regT0);
2759        emitPutVirtualRegister(result);
2760    } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
2761        emitGetVirtualRegister(op1, regT0);
2762        emitJumpSlowCaseIfNotImmediateInteger(regT0);
2763        emitFastArithDeTagImmediate(regT0);
2764        addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
2765        signExtend32ToPtr(regT0, regT0);
2766        emitFastArithReTagImmediate(regT0, regT0);
2767        emitPutVirtualRegister(result);
2768    } else
2769        compileBinaryArithOp(op_mul, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
2770}
2771
2772void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2773{
2774    unsigned result = currentInstruction[1].u.operand;
2775    unsigned op1 = currentInstruction[2].u.operand;
2776    unsigned op2 = currentInstruction[3].u.operand;
2777
2778    if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0))
2779        || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) {
2780        linkSlowCase(iter);
2781        linkSlowCase(iter);
2782        // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
2783        JITStubCall stubCall(this, cti_op_mul);
2784        stubCall.addArgument(op1, regT2);
2785        stubCall.addArgument(op2, regT2);
2786        stubCall.call(result);
2787    } else
2788        compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
2789}
2790
2791void JIT::emit_op_sub(Instruction* currentInstruction)
2792{
2793    compileBinaryArithOp(op_sub, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
2794}
2795
2796void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2797{
2798    compileBinaryArithOpSlowCase(op_sub, iter, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
2799}
2800
2801#endif // USE(JSVALUE64)
2802
2803/* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
2804
2805#endif // USE(JSVALUE32_64)
2806
2807} // namespace JSC
2808
2809#endif // ENABLE(JIT)
2810