1/*
2 * Copyright (C) 2011 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef DFGJITCodeGenerator_h
27#define DFGJITCodeGenerator_h
28
29#if ENABLE(DFG_JIT)
30
31#include "CodeBlock.h"
32#include <dfg/DFGGenerationInfo.h>
33#include <dfg/DFGGraph.h>
34#include <dfg/DFGJITCompiler.h>
35#include <dfg/DFGOperations.h>
36#include <dfg/DFGRegisterBank.h>
37
38namespace JSC { namespace DFG {
39
40class SpeculateIntegerOperand;
41class SpeculateStrictInt32Operand;
42class SpeculateCellOperand;
43
44
45// === JITCodeGenerator ===
46//
47// This class provides common infrastructure used by the speculative &
48// non-speculative JITs. Provides common mechanisms for virtual and
49// physical register management, calls out from JIT code to helper
50// functions, etc.
51class JITCodeGenerator {
52protected:
53    typedef MacroAssembler::TrustedImm32 TrustedImm32;
54    typedef MacroAssembler::Imm32 Imm32;
55
56    // These constants are used to set priorities for spill order for
57    // the register allocator.
58    enum SpillOrder {
59        SpillOrderNone,
60        SpillOrderConstant = 1, // no spill, and cheap fill
61        SpillOrderSpilled = 2,  // no spill
62        SpillOrderJS = 4,       // needs spill
63        SpillOrderCell = 4,     // needs spill
64        SpillOrderInteger = 5,  // needs spill and box
65        SpillOrderDouble = 6,   // needs spill and convert
66        SpillOrderMax
67    };
68
69
70public:
71    GPRReg fillInteger(NodeIndex, DataFormat& returnFormat);
72    FPRReg fillDouble(NodeIndex);
73    GPRReg fillJSValue(NodeIndex);
74
75    // lock and unlock GPR & FPR registers.
76    void lock(GPRReg reg)
77    {
78        m_gprs.lock(reg);
79    }
80    void lock(FPRReg reg)
81    {
82        m_fprs.lock(reg);
83    }
84    void unlock(GPRReg reg)
85    {
86        m_gprs.unlock(reg);
87    }
88    void unlock(FPRReg reg)
89    {
90        m_fprs.unlock(reg);
91    }
92
93    // Used to check whether a child node is on its last use,
94    // and its machine registers may be reused.
95    bool canReuse(NodeIndex nodeIndex)
96    {
97        VirtualRegister virtualRegister = m_jit.graph()[nodeIndex].virtualRegister;
98        GenerationInfo& info = m_generationInfo[virtualRegister];
99        return info.canReuse();
100    }
101    GPRReg reuse(GPRReg reg)
102    {
103        m_gprs.lock(reg);
104        return reg;
105    }
106    FPRReg reuse(FPRReg reg)
107    {
108        m_fprs.lock(reg);
109        return reg;
110    }
111
112    // Allocate a gpr/fpr.
113    GPRReg allocate()
114    {
115        VirtualRegister spillMe;
116        GPRReg gpr = m_gprs.allocate(spillMe);
117        if (spillMe != InvalidVirtualRegister)
118            spill(spillMe);
119        return gpr;
120    }
121    FPRReg fprAllocate()
122    {
123        VirtualRegister spillMe;
124        FPRReg fpr = m_fprs.allocate(spillMe);
125        if (spillMe != InvalidVirtualRegister)
126            spill(spillMe);
127        return fpr;
128    }
129
130    // Check whether a VirtualRegsiter is currently in a machine register.
131    // We use this when filling operands to fill those that are already in
132    // machine registers first (by locking VirtualRegsiters that are already
133    // in machine register before filling those that are not we attempt to
134    // avoid spilling values we will need immediately).
135    bool isFilled(NodeIndex nodeIndex)
136    {
137        VirtualRegister virtualRegister = m_jit.graph()[nodeIndex].virtualRegister;
138        GenerationInfo& info = m_generationInfo[virtualRegister];
139        return info.registerFormat() != DataFormatNone;
140    }
141    bool isFilledDouble(NodeIndex nodeIndex)
142    {
143        VirtualRegister virtualRegister = m_jit.graph()[nodeIndex].virtualRegister;
144        GenerationInfo& info = m_generationInfo[virtualRegister];
145        return info.registerFormat() == DataFormatDouble;
146    }
147
148protected:
149    JITCodeGenerator(JITCompiler& jit, bool isSpeculative)
150        : m_jit(jit)
151        , m_isSpeculative(isSpeculative)
152        , m_compileIndex(0)
153        , m_generationInfo(m_jit.codeBlock()->m_numCalleeRegisters)
154        , m_blockHeads(jit.graph().m_blocks.size())
155    {
156    }
157
158    // These methods convert between doubles, and doubles boxed and JSValues.
159    GPRReg boxDouble(FPRReg fpr, GPRReg gpr)
160    {
161        JITCompiler::FPRegisterID fpReg = JITCompiler::fprToRegisterID(fpr);
162        JITCompiler::RegisterID reg = JITCompiler::gprToRegisterID(gpr);
163        m_jit.moveDoubleToPtr(fpReg, reg);
164        m_jit.subPtr(JITCompiler::tagTypeNumberRegister, reg);
165        return gpr;
166    }
167    FPRReg unboxDouble(GPRReg gpr, FPRReg fpr)
168    {
169        JITCompiler::RegisterID reg = JITCompiler::gprToRegisterID(gpr);
170        JITCompiler::FPRegisterID fpReg = JITCompiler::fprToRegisterID(fpr);
171        m_jit.addPtr(JITCompiler::tagTypeNumberRegister, reg);
172        m_jit.movePtrToDouble(reg, fpReg);
173        return fpr;
174    }
175    GPRReg boxDouble(FPRReg fpr)
176    {
177        return boxDouble(fpr, allocate());
178    }
179    FPRReg unboxDouble(GPRReg gpr)
180    {
181        return unboxDouble(gpr, fprAllocate());
182    }
183
184    // Called on an operand once it has been consumed by a parent node.
185    void use(NodeIndex nodeIndex)
186    {
187        VirtualRegister virtualRegister = m_jit.graph()[nodeIndex].virtualRegister;
188        GenerationInfo& info = m_generationInfo[virtualRegister];
189
190        // use() returns true when the value becomes dead, and any
191        // associated resources may be freed.
192        if (!info.use())
193            return;
194
195        // Release the associated machine registers.
196        DataFormat registerFormat = info.registerFormat();
197        if (registerFormat == DataFormatDouble)
198            m_fprs.release(info.fpr());
199        else if (registerFormat != DataFormatNone)
200            m_gprs.release(info.gpr());
201    }
202
203    // Spill a VirtualRegister to the RegisterFile.
204    void spill(VirtualRegister spillMe)
205    {
206        GenerationInfo& info = m_generationInfo[spillMe];
207
208        // Check the GenerationInfo to see if this value need writing
209        // to the RegisterFile - if not, mark it as spilled & return.
210        if (!info.needsSpill()) {
211            info.setSpilled();
212            return;
213        }
214
215        DataFormat spillFormat = info.registerFormat();
216        if (spillFormat == DataFormatDouble) {
217            // All values are spilled as JSValues, so box the double via a temporary gpr.
218            GPRReg gpr = boxDouble(info.fpr());
219            m_jit.storePtr(JITCompiler::gprToRegisterID(gpr), JITCompiler::addressFor(spillMe));
220            unlock(gpr);
221            info.spill(DataFormatJSDouble);
222            return;
223        }
224
225        // The following code handles JSValues, int32s, and cells.
226        ASSERT(spillFormat == DataFormatInteger || spillFormat == DataFormatCell || spillFormat & DataFormatJS);
227
228        JITCompiler::RegisterID reg = JITCompiler::gprToRegisterID(info.gpr());
229        // We need to box int32 and cell values ...
230        // but on JSVALUE64 boxing a cell is a no-op!
231        if (spillFormat == DataFormatInteger)
232            m_jit.orPtr(JITCompiler::tagTypeNumberRegister, reg);
233
234        // Spill the value, and record it as spilled in its boxed form.
235        m_jit.storePtr(reg, JITCompiler::addressFor(spillMe));
236        info.spill((DataFormat)(spillFormat | DataFormatJS));
237    }
238
239    // Checks/accessors for constant values.
240    bool isConstant(NodeIndex nodeIndex) { return m_jit.isConstant(nodeIndex); }
241    bool isInt32Constant(NodeIndex nodeIndex) { return m_jit.isInt32Constant(nodeIndex); }
242    bool isDoubleConstant(NodeIndex nodeIndex) { return m_jit.isDoubleConstant(nodeIndex); }
243    bool isJSConstant(NodeIndex nodeIndex) { return m_jit.isJSConstant(nodeIndex); }
244    int32_t valueOfInt32Constant(NodeIndex nodeIndex) { return m_jit.valueOfInt32Constant(nodeIndex); }
245    double valueOfDoubleConstant(NodeIndex nodeIndex) { return m_jit.valueOfDoubleConstant(nodeIndex); }
246    JSValue valueOfJSConstant(NodeIndex nodeIndex) { return m_jit.valueOfJSConstant(nodeIndex); }
247
248    Identifier* identifier(unsigned index)
249    {
250        return &m_jit.codeBlock()->identifier(index);
251    }
252
253    // Spill all VirtualRegisters back to the RegisterFile.
254    void flushRegisters()
255    {
256        for (GPRReg gpr = gpr0; gpr < numberOfGPRs; next(gpr)) {
257            VirtualRegister name = m_gprs.name(gpr);
258            if (name != InvalidVirtualRegister) {
259                spill(name);
260                m_gprs.release(gpr);
261            }
262        }
263        for (FPRReg fpr = fpr0; fpr < numberOfFPRs; next(fpr)) {
264            VirtualRegister name = m_fprs.name(fpr);
265            if (name != InvalidVirtualRegister) {
266                spill(name);
267                m_fprs.release(fpr);
268            }
269        }
270    }
271
272#ifndef NDEBUG
273    // Used to ASSERT flushRegisters() has been called prior to
274    // calling out from JIT code to a C helper function.
275    bool isFlushed()
276    {
277        for (GPRReg gpr = gpr0; gpr < numberOfGPRs; next(gpr)) {
278            VirtualRegister name = m_gprs.name(gpr);
279            if (name != InvalidVirtualRegister)
280                return false;
281        }
282        for (FPRReg fpr = fpr0; fpr < numberOfFPRs; next(fpr)) {
283            VirtualRegister name = m_fprs.name(fpr);
284            if (name != InvalidVirtualRegister)
285                return false;
286        }
287        return true;
288    }
289#endif
290
291    // Get the JSValue representation of a constant.
292    JSValue constantAsJSValue(NodeIndex nodeIndex)
293    {
294        Node& node = m_jit.graph()[nodeIndex];
295        if (isInt32Constant(nodeIndex))
296            return jsNumber(node.int32Constant());
297        if (isDoubleConstant(nodeIndex))
298            return JSValue(JSValue::EncodeAsDouble, node.numericConstant());
299        ASSERT(isJSConstant(nodeIndex));
300        return valueOfJSConstant(nodeIndex);
301    }
302    MacroAssembler::ImmPtr constantAsJSValueAsImmPtr(NodeIndex nodeIndex)
303    {
304        return MacroAssembler::ImmPtr(JSValue::encode(constantAsJSValue(nodeIndex)));
305    }
306
307    // Helper functions to enable code sharing in implementations of bit/shift ops.
308    void bitOp(NodeType op, int32_t imm, MacroAssembler::RegisterID op1, MacroAssembler::RegisterID result)
309    {
310        switch (op) {
311        case BitAnd:
312            m_jit.and32(Imm32(imm), op1, result);
313            break;
314        case BitOr:
315            m_jit.or32(Imm32(imm), op1, result);
316            break;
317        case BitXor:
318            m_jit.xor32(Imm32(imm), op1, result);
319            break;
320        default:
321            ASSERT_NOT_REACHED();
322        }
323    }
324    void bitOp(NodeType op, MacroAssembler::RegisterID op1, MacroAssembler::RegisterID op2, MacroAssembler::RegisterID result)
325    {
326        switch (op) {
327        case BitAnd:
328            m_jit.and32(op1, op2, result);
329            break;
330        case BitOr:
331            m_jit.or32(op1, op2, result);
332            break;
333        case BitXor:
334            m_jit.xor32(op1, op2, result);
335            break;
336        default:
337            ASSERT_NOT_REACHED();
338        }
339    }
340    void shiftOp(NodeType op, MacroAssembler::RegisterID op1, int32_t shiftAmount, MacroAssembler::RegisterID result)
341    {
342        switch (op) {
343        case BitRShift:
344            m_jit.rshift32(op1, Imm32(shiftAmount), result);
345            break;
346        case BitLShift:
347            m_jit.lshift32(op1, Imm32(shiftAmount), result);
348            break;
349        case BitURShift:
350            m_jit.urshift32(op1, Imm32(shiftAmount), result);
351            break;
352        default:
353            ASSERT_NOT_REACHED();
354        }
355    }
356    void shiftOp(NodeType op, MacroAssembler::RegisterID op1, MacroAssembler::RegisterID shiftAmount, MacroAssembler::RegisterID result)
357    {
358        switch (op) {
359        case BitRShift:
360            m_jit.rshift32(op1, shiftAmount, result);
361            break;
362        case BitLShift:
363            m_jit.lshift32(op1, shiftAmount, result);
364            break;
365        case BitURShift:
366            m_jit.urshift32(op1, shiftAmount, result);
367            break;
368        default:
369            ASSERT_NOT_REACHED();
370        }
371    }
372
373    // Called once a node has completed code generation but prior to setting
374    // its result, to free up its children. (This must happen prior to setting
375    // the nodes result, since the node may have the same VirtualRegister as
376    // a child, and as such will use the same GeneratioInfo).
377    void useChildren(Node&);
378
379    // These method called to initialize the the GenerationInfo
380    // to describe the result of an operation.
381    void integerResult(GPRReg reg, NodeIndex nodeIndex, DataFormat format = DataFormatInteger)
382    {
383        Node& node = m_jit.graph()[nodeIndex];
384        useChildren(node);
385
386        VirtualRegister virtualRegister = node.virtualRegister;
387        GenerationInfo& info = m_generationInfo[virtualRegister];
388
389        if (format == DataFormatInteger) {
390            m_jit.jitAssertIsInt32(reg);
391            m_gprs.retain(reg, virtualRegister, SpillOrderInteger);
392            info.initInteger(nodeIndex, node.refCount, reg);
393        } else {
394            ASSERT(format == DataFormatJSInteger);
395            m_jit.jitAssertIsJSInt32(reg);
396            m_gprs.retain(reg, virtualRegister, SpillOrderJS);
397            info.initJSValue(nodeIndex, node.refCount, reg, format);
398        }
399    }
400    void noResult(NodeIndex nodeIndex)
401    {
402        Node& node = m_jit.graph()[nodeIndex];
403        useChildren(node);
404
405        VirtualRegister virtualRegister = node.virtualRegister;
406        GenerationInfo& info = m_generationInfo[virtualRegister];
407        info.initNone(nodeIndex, node.refCount);
408    }
409    void cellResult(GPRReg reg, NodeIndex nodeIndex)
410    {
411        Node& node = m_jit.graph()[nodeIndex];
412        useChildren(node);
413
414        VirtualRegister virtualRegister = node.virtualRegister;
415        m_gprs.retain(reg, virtualRegister, SpillOrderCell);
416        GenerationInfo& info = m_generationInfo[virtualRegister];
417        info.initCell(nodeIndex, node.refCount, reg);
418    }
419    void jsValueResult(GPRReg reg, NodeIndex nodeIndex, DataFormat format = DataFormatJS)
420    {
421        if (format == DataFormatJSInteger)
422            m_jit.jitAssertIsJSInt32(reg);
423
424        Node& node = m_jit.graph()[nodeIndex];
425        useChildren(node);
426
427        VirtualRegister virtualRegister = node.virtualRegister;
428        m_gprs.retain(reg, virtualRegister, SpillOrderJS);
429        GenerationInfo& info = m_generationInfo[virtualRegister];
430        info.initJSValue(nodeIndex, node.refCount, reg, format);
431    }
432    void doubleResult(FPRReg reg, NodeIndex nodeIndex)
433    {
434        Node& node = m_jit.graph()[nodeIndex];
435        useChildren(node);
436
437        VirtualRegister virtualRegister = node.virtualRegister;
438        m_fprs.retain(reg, virtualRegister, SpillOrderDouble);
439        GenerationInfo& info = m_generationInfo[virtualRegister];
440        info.initDouble(nodeIndex, node.refCount, reg);
441    }
442    void initConstantInfo(NodeIndex nodeIndex)
443    {
444        ASSERT(isInt32Constant(nodeIndex) || isDoubleConstant(nodeIndex) || isJSConstant(nodeIndex));
445        Node& node = m_jit.graph()[nodeIndex];
446        m_generationInfo[node.virtualRegister].initConstant(nodeIndex, node.refCount);
447    }
448
449    // These methods used to sort arguments into the correct registers.
450    template<GPRReg destA, GPRReg destB>
451    void setupTwoStubArgs(GPRReg srcA, GPRReg srcB)
452    {
453        // Assuming that srcA != srcB, there are 7 interesting states the registers may be in:
454        // (1) both are already in arg regs, the right way around.
455        // (2) both are already in arg regs, the wrong way around.
456        // (3) neither are currently in arg registers.
457        // (4) srcA in in its correct reg.
458        // (5) srcA in in the incorrect reg.
459        // (6) srcB in in its correct reg.
460        // (7) srcB in in the incorrect reg.
461        //
462        // The trivial approach is to simply emit two moves, to put srcA in place then srcB in
463        // place (the MacroAssembler will omit redundant moves). This apporach will be safe in
464        // cases 1, 3, 4, 5, 6, and in cases where srcA==srcB. The two problem cases are 2
465        // (requires a swap) and 7 (must move srcB first, to avoid trampling.)
466
467        if (srcB != destA) {
468            // Handle the easy cases - two simple moves.
469            m_jit.move(JITCompiler::gprToRegisterID(srcA), JITCompiler::gprToRegisterID(destA));
470            m_jit.move(JITCompiler::gprToRegisterID(srcB), JITCompiler::gprToRegisterID(destB));
471        } else if (srcA != destB) {
472            // Handle the non-swap case - just put srcB in place first.
473            m_jit.move(JITCompiler::gprToRegisterID(srcB), JITCompiler::gprToRegisterID(destB));
474            m_jit.move(JITCompiler::gprToRegisterID(srcA), JITCompiler::gprToRegisterID(destA));
475        } else
476            m_jit.swap(JITCompiler::gprToRegisterID(destB), JITCompiler::gprToRegisterID(destB));
477    }
478    template<FPRReg destA, FPRReg destB>
479    void setupTwoStubArgs(FPRReg srcA, FPRReg srcB)
480    {
481        // Assuming that srcA != srcB, there are 7 interesting states the registers may be in:
482        // (1) both are already in arg regs, the right way around.
483        // (2) both are already in arg regs, the wrong way around.
484        // (3) neither are currently in arg registers.
485        // (4) srcA in in its correct reg.
486        // (5) srcA in in the incorrect reg.
487        // (6) srcB in in its correct reg.
488        // (7) srcB in in the incorrect reg.
489        //
490        // The trivial approach is to simply emit two moves, to put srcA in place then srcB in
491        // place (the MacroAssembler will omit redundant moves). This apporach will be safe in
492        // cases 1, 3, 4, 5, 6, and in cases where srcA==srcB. The two problem cases are 2
493        // (requires a swap) and 7 (must move srcB first, to avoid trampling.)
494
495        if (srcB != destA) {
496            // Handle the easy cases - two simple moves.
497            m_jit.moveDouble(JITCompiler::fprToRegisterID(srcA), JITCompiler::fprToRegisterID(destA));
498            m_jit.moveDouble(JITCompiler::fprToRegisterID(srcB), JITCompiler::fprToRegisterID(destB));
499            return;
500        }
501
502        if (srcA != destB) {
503            // Handle the non-swap case - just put srcB in place first.
504            m_jit.moveDouble(JITCompiler::fprToRegisterID(srcB), JITCompiler::fprToRegisterID(destB));
505            m_jit.moveDouble(JITCompiler::fprToRegisterID(srcA), JITCompiler::fprToRegisterID(destA));
506            return;
507        }
508
509        ASSERT(srcB == destA && srcA == destB);
510        // Need to swap; pick a temporary register.
511        FPRReg temp;
512        if (destA != JITCompiler::argumentFPR3 && destA != JITCompiler::argumentFPR3)
513            temp = JITCompiler::argumentFPR3;
514        else if (destA != JITCompiler::argumentFPR2 && destA != JITCompiler::argumentFPR2)
515            temp = JITCompiler::argumentFPR2;
516        else {
517            ASSERT(destA != JITCompiler::argumentFPR1 && destA != JITCompiler::argumentFPR1);
518            temp = JITCompiler::argumentFPR1;
519        }
520        m_jit.moveDouble(JITCompiler::fprToRegisterID(destA), JITCompiler::fprToRegisterID(temp));
521        m_jit.moveDouble(JITCompiler::fprToRegisterID(destB), JITCompiler::fprToRegisterID(destA));
522        m_jit.moveDouble(JITCompiler::fprToRegisterID(temp), JITCompiler::fprToRegisterID(destB));
523    }
524    void setupStubArguments(GPRReg arg1, GPRReg arg2)
525    {
526        setupTwoStubArgs<JITCompiler::argumentGPR1, JITCompiler::argumentGPR2>(arg1, arg2);
527    }
528    void setupStubArguments(GPRReg arg1, GPRReg arg2, GPRReg arg3)
529    {
530        // If neither of arg2/arg3 are in our way, then we can move arg1 into place.
531        // Then we can use setupTwoStubArgs to fix arg2/arg3.
532        if (arg2 != JITCompiler::argumentGPR1 && arg3 != JITCompiler::argumentGPR1) {
533            m_jit.move(JITCompiler::gprToRegisterID(arg1), JITCompiler::argumentRegister1);
534            setupTwoStubArgs<JITCompiler::argumentGPR2, JITCompiler::argumentGPR3>(arg2, arg3);
535            return;
536        }
537
538        // If neither of arg1/arg3 are in our way, then we can move arg2 into place.
539        // Then we can use setupTwoStubArgs to fix arg1/arg3.
540        if (arg1 != JITCompiler::argumentGPR2 && arg3 != JITCompiler::argumentGPR2) {
541            m_jit.move(JITCompiler::gprToRegisterID(arg2), JITCompiler::argumentRegister2);
542            setupTwoStubArgs<JITCompiler::argumentGPR1, JITCompiler::argumentGPR3>(arg1, arg3);
543            return;
544        }
545
546        // If neither of arg1/arg2 are in our way, then we can move arg3 into place.
547        // Then we can use setupTwoStubArgs to fix arg1/arg2.
548        if (arg1 != JITCompiler::argumentGPR3 && arg2 != JITCompiler::argumentGPR3) {
549            m_jit.move(JITCompiler::gprToRegisterID(arg3), JITCompiler::argumentRegister3);
550            setupTwoStubArgs<JITCompiler::argumentGPR1, JITCompiler::argumentGPR2>(arg1, arg2);
551            return;
552        }
553
554        // If we get here, we haven't been able to move any of arg1/arg2/arg3.
555        // Since all three are blocked, then all three must already be in the argument register.
556        // But are they in the right ones?
557
558        // First, ensure arg1 is in place.
559        if (arg1 != JITCompiler::argumentGPR1) {
560            m_jit.swap(JITCompiler::gprToRegisterID(arg1), JITCompiler::argumentRegister1);
561
562            // If arg1 wasn't in argumentGPR1, one of arg2/arg3 must be.
563            ASSERT(arg2 == JITCompiler::argumentGPR1 || arg3 == JITCompiler::argumentGPR1);
564            // If arg2 was in argumentGPR1 it no longer is (due to the swap).
565            // Otherwise arg3 must have been. Mark him as moved.
566            if (arg2 == JITCompiler::argumentGPR1)
567                arg2 = arg1;
568            else
569                arg3 = arg1;
570        }
571
572        // Either arg2 & arg3 need swapping, or we're all done.
573        ASSERT((arg2 == JITCompiler::argumentGPR2 || arg3 == JITCompiler::argumentGPR3)
574            || (arg2 == JITCompiler::argumentGPR3 || arg3 == JITCompiler::argumentGPR2));
575
576        if (arg2 != JITCompiler::argumentGPR2)
577            m_jit.swap(JITCompiler::argumentRegister2, JITCompiler::argumentRegister3);
578    }
579
580    // These methods add calls to C++ helper functions.
581    void callOperation(J_DFGOperation_EJP operation, GPRReg result, GPRReg arg1, void* pointer)
582    {
583        ASSERT(isFlushed());
584
585        m_jit.move(JITCompiler::gprToRegisterID(arg1), JITCompiler::argumentRegister1);
586        m_jit.move(JITCompiler::TrustedImmPtr(pointer), JITCompiler::argumentRegister2);
587        m_jit.move(JITCompiler::callFrameRegister, JITCompiler::argumentRegister0);
588
589        appendCallWithExceptionCheck(operation);
590        m_jit.move(JITCompiler::returnValueRegister, JITCompiler::gprToRegisterID(result));
591    }
592    void callOperation(J_DFGOperation_EJI operation, GPRReg result, GPRReg arg1, Identifier* identifier)
593    {
594        callOperation((J_DFGOperation_EJP)operation, result, arg1, identifier);
595    }
596    void callOperation(J_DFGOperation_EJ operation, GPRReg result, GPRReg arg1)
597    {
598        ASSERT(isFlushed());
599
600        m_jit.move(JITCompiler::gprToRegisterID(arg1), JITCompiler::argumentRegister1);
601        m_jit.move(JITCompiler::callFrameRegister, JITCompiler::argumentRegister0);
602
603        appendCallWithExceptionCheck(operation);
604        m_jit.move(JITCompiler::returnValueRegister, JITCompiler::gprToRegisterID(result));
605    }
606    void callOperation(Z_DFGOperation_EJ operation, GPRReg result, GPRReg arg1)
607    {
608        ASSERT(isFlushed());
609
610        m_jit.move(JITCompiler::gprToRegisterID(arg1), JITCompiler::argumentRegister1);
611        m_jit.move(JITCompiler::callFrameRegister, JITCompiler::argumentRegister0);
612
613        appendCallWithExceptionCheck(operation);
614        m_jit.move(JITCompiler::returnValueRegister, JITCompiler::gprToRegisterID(result));
615    }
616    void callOperation(Z_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
617    {
618        ASSERT(isFlushed());
619
620        setupStubArguments(arg1, arg2);
621        m_jit.move(JITCompiler::callFrameRegister, JITCompiler::argumentRegister0);
622
623        appendCallWithExceptionCheck(operation);
624        m_jit.move(JITCompiler::returnValueRegister, JITCompiler::gprToRegisterID(result));
625    }
626    void callOperation(J_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
627    {
628        ASSERT(isFlushed());
629
630        setupStubArguments(arg1, arg2);
631        m_jit.move(JITCompiler::callFrameRegister, JITCompiler::argumentRegister0);
632
633        appendCallWithExceptionCheck(operation);
634        m_jit.move(JITCompiler::returnValueRegister, JITCompiler::gprToRegisterID(result));
635    }
636    void callOperation(V_DFGOperation_EJJP operation, GPRReg arg1, GPRReg arg2, void* pointer)
637    {
638        ASSERT(isFlushed());
639
640        setupStubArguments(arg1, arg2);
641        m_jit.move(JITCompiler::TrustedImmPtr(pointer), JITCompiler::argumentRegister3);
642        m_jit.move(JITCompiler::callFrameRegister, JITCompiler::argumentRegister0);
643
644        appendCallWithExceptionCheck(operation);
645    }
646    void callOperation(V_DFGOperation_EJJI operation, GPRReg arg1, GPRReg arg2, Identifier* identifier)
647    {
648        callOperation((V_DFGOperation_EJJP)operation, arg1, arg2, identifier);
649    }
650    void callOperation(V_DFGOperation_EJJJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
651    {
652        ASSERT(isFlushed());
653
654        setupStubArguments(arg1, arg2, arg3);
655        m_jit.move(JITCompiler::callFrameRegister, JITCompiler::argumentRegister0);
656
657        appendCallWithExceptionCheck(operation);
658    }
659    void callOperation(D_DFGOperation_DD operation, FPRReg result, FPRReg arg1, FPRReg arg2)
660    {
661        ASSERT(isFlushed());
662
663        setupTwoStubArgs<JITCompiler::argumentFPR0, JITCompiler::argumentFPR1>(arg1, arg2);
664
665        m_jit.appendCall(operation);
666        m_jit.moveDouble(JITCompiler::fpReturnValueRegister, JITCompiler::fprToRegisterID(result));
667    }
668
669    void appendCallWithExceptionCheck(const FunctionPtr& function)
670    {
671        m_jit.appendCallWithExceptionCheck(function, m_jit.graph()[m_compileIndex].exceptionInfo);
672    }
673
674    void addBranch(const MacroAssembler::Jump& jump, BlockIndex destination)
675    {
676        m_branches.append(BranchRecord(jump, destination));
677    }
678
679    void linkBranches()
680    {
681        for (size_t i = 0; i < m_branches.size(); ++i) {
682            BranchRecord& branch = m_branches[i];
683            branch.jump.linkTo(m_blockHeads[branch.destination], &m_jit);
684        }
685    }
686
687#ifndef NDEBUG
688    void dump(const char* label = 0);
689#endif
690
691#if DFG_CONSISTENCY_CHECK
692    void checkConsistency();
693#else
694    void checkConsistency() {}
695#endif
696
697    // The JIT, while also provides MacroAssembler functionality.
698    JITCompiler& m_jit;
699    // This flag is used to distinguish speculative and non-speculative
700    // code generation. This is significant when filling spilled values
701    // from the RegisterFile. When spilling we attempt to store information
702    // as to the type of boxed value being stored (int32, double, cell), and
703    // when filling on the speculative path we will retrieve this type info
704    // where available. On the non-speculative path, however, we cannot rely
705    // on the spill format info, since the a value being loaded might have
706    // been spilled by either the speculative or non-speculative paths (where
707    // we entered the non-speculative path on an intervening bail-out), and
708    // the value may have been boxed differently on the two paths.
709    bool m_isSpeculative;
710    // The current node being generated.
711    BlockIndex m_block;
712    NodeIndex m_compileIndex;
713    // Virtual and physical register maps.
714    Vector<GenerationInfo, 32> m_generationInfo;
715    RegisterBank<GPRReg, numberOfGPRs, SpillOrder, SpillOrderNone, SpillOrderMax> m_gprs;
716    RegisterBank<FPRReg, numberOfFPRs, SpillOrder, SpillOrderNone, SpillOrderMax> m_fprs;
717
718    Vector<MacroAssembler::Label> m_blockHeads;
719    struct BranchRecord {
720        BranchRecord(MacroAssembler::Jump jump, BlockIndex destination)
721            : jump(jump)
722            , destination(destination)
723        {
724        }
725
726        MacroAssembler::Jump jump;
727        BlockIndex destination;
728    };
729    Vector<BranchRecord, 8> m_branches;
730};
731
732// === Operand types ===
733//
734// IntegerOperand, DoubleOperand and JSValueOperand.
735//
736// These classes are used to lock the operands to a node into machine
737// registers. These classes implement of pattern of locking a value
738// into register at the point of construction only if it is already in
739// registers, and otherwise loading it lazily at the point it is first
740// used. We do so in order to attempt to avoid spilling one operand
741// in order to make space available for another.
742
743class IntegerOperand {
744public:
745    explicit IntegerOperand(JITCodeGenerator* jit, NodeIndex index)
746        : m_jit(jit)
747        , m_index(index)
748        , m_gprOrInvalid(InvalidGPRReg)
749#ifndef NDEBUG
750        , m_format(DataFormatNone)
751#endif
752    {
753        ASSERT(m_jit);
754        if (jit->isFilled(index))
755            gpr();
756    }
757
758    ~IntegerOperand()
759    {
760        ASSERT(m_gprOrInvalid != InvalidGPRReg);
761        m_jit->unlock(m_gprOrInvalid);
762    }
763
764    NodeIndex index() const
765    {
766        return m_index;
767    }
768
769    GPRReg gpr()
770    {
771        if (m_gprOrInvalid == InvalidGPRReg)
772            m_gprOrInvalid = m_jit->fillInteger(index(), m_format);
773        return m_gprOrInvalid;
774    }
775
776    DataFormat format()
777    {
778        gpr(); // m_format is set when m_gpr is locked.
779        ASSERT(m_format == DataFormatInteger || m_format == DataFormatJSInteger);
780        return m_format;
781    }
782
783    MacroAssembler::RegisterID registerID()
784    {
785        return JITCompiler::gprToRegisterID(gpr());
786    }
787
788private:
789    JITCodeGenerator* m_jit;
790    NodeIndex m_index;
791    GPRReg m_gprOrInvalid;
792    DataFormat m_format;
793};
794
795class DoubleOperand {
796public:
797    explicit DoubleOperand(JITCodeGenerator* jit, NodeIndex index)
798        : m_jit(jit)
799        , m_index(index)
800        , m_fprOrInvalid(InvalidFPRReg)
801    {
802        ASSERT(m_jit);
803        if (jit->isFilledDouble(index))
804            fpr();
805    }
806
807    ~DoubleOperand()
808    {
809        ASSERT(m_fprOrInvalid != InvalidFPRReg);
810        m_jit->unlock(m_fprOrInvalid);
811    }
812
813    NodeIndex index() const
814    {
815        return m_index;
816    }
817
818    FPRReg fpr()
819    {
820        if (m_fprOrInvalid == InvalidFPRReg)
821            m_fprOrInvalid = m_jit->fillDouble(index());
822        return m_fprOrInvalid;
823    }
824
825    MacroAssembler::FPRegisterID registerID()
826    {
827        return JITCompiler::fprToRegisterID(fpr());
828    }
829
830private:
831    JITCodeGenerator* m_jit;
832    NodeIndex m_index;
833    FPRReg m_fprOrInvalid;
834};
835
836class JSValueOperand {
837public:
838    explicit JSValueOperand(JITCodeGenerator* jit, NodeIndex index)
839        : m_jit(jit)
840        , m_index(index)
841        , m_gprOrInvalid(InvalidGPRReg)
842    {
843        ASSERT(m_jit);
844        if (jit->isFilled(index))
845            gpr();
846    }
847
848    ~JSValueOperand()
849    {
850        ASSERT(m_gprOrInvalid != InvalidGPRReg);
851        m_jit->unlock(m_gprOrInvalid);
852    }
853
854    NodeIndex index() const
855    {
856        return m_index;
857    }
858
859    GPRReg gpr()
860    {
861        if (m_gprOrInvalid == InvalidGPRReg)
862            m_gprOrInvalid = m_jit->fillJSValue(index());
863        return m_gprOrInvalid;
864    }
865
866    MacroAssembler::RegisterID registerID()
867    {
868        return JITCompiler::gprToRegisterID(gpr());
869    }
870
871private:
872    JITCodeGenerator* m_jit;
873    NodeIndex m_index;
874    GPRReg m_gprOrInvalid;
875};
876
877
878// === Temporaries ===
879//
880// These classes are used to allocate temporary registers.
881// A mechanism is provided to attempt to reuse the registers
882// currently allocated to child nodes whose value is consumed
883// by, and not live after, this operation.
884
885class GPRTemporary {
886public:
887    GPRTemporary(JITCodeGenerator*);
888    GPRTemporary(JITCodeGenerator*, SpeculateIntegerOperand&);
889    GPRTemporary(JITCodeGenerator*, SpeculateIntegerOperand&, SpeculateIntegerOperand&);
890    GPRTemporary(JITCodeGenerator*, IntegerOperand&);
891    GPRTemporary(JITCodeGenerator*, IntegerOperand&, IntegerOperand&);
892    GPRTemporary(JITCodeGenerator*, SpeculateCellOperand&);
893    GPRTemporary(JITCodeGenerator*, JSValueOperand&);
894
895    ~GPRTemporary()
896    {
897        m_jit->unlock(gpr());
898    }
899
900    GPRReg gpr() const
901    {
902        ASSERT(m_gpr != InvalidGPRReg);
903        return m_gpr;
904    }
905
906    MacroAssembler::RegisterID registerID()
907    {
908        ASSERT(m_gpr != InvalidGPRReg);
909        return JITCompiler::gprToRegisterID(m_gpr);
910    }
911
912protected:
913    GPRTemporary(JITCodeGenerator* jit, GPRReg lockedGPR)
914        : m_jit(jit)
915        , m_gpr(lockedGPR)
916    {
917    }
918
919private:
920    JITCodeGenerator* m_jit;
921    GPRReg m_gpr;
922};
923
924class FPRTemporary {
925public:
926    FPRTemporary(JITCodeGenerator*);
927    FPRTemporary(JITCodeGenerator*, DoubleOperand&);
928    FPRTemporary(JITCodeGenerator*, DoubleOperand&, DoubleOperand&);
929
930    ~FPRTemporary()
931    {
932        m_jit->unlock(fpr());
933    }
934
935    FPRReg fpr() const
936    {
937        ASSERT(m_fpr != InvalidFPRReg);
938        return m_fpr;
939    }
940
941    MacroAssembler::FPRegisterID registerID()
942    {
943        ASSERT(m_fpr != InvalidFPRReg);
944        return JITCompiler::fprToRegisterID(m_fpr);
945    }
946
947protected:
948    FPRTemporary(JITCodeGenerator* jit, FPRReg lockedFPR)
949        : m_jit(jit)
950        , m_fpr(lockedFPR)
951    {
952    }
953
954private:
955    JITCodeGenerator* m_jit;
956    FPRReg m_fpr;
957};
958
959
960// === Results ===
961//
962// These classes lock the result of a call to a C++ helper function.
963
964class GPRResult : public GPRTemporary {
965public:
966    GPRResult(JITCodeGenerator* jit)
967        : GPRTemporary(jit, lockedResult(jit))
968    {
969    }
970
971private:
972    static GPRReg lockedResult(JITCodeGenerator* jit)
973    {
974        jit->lock(JITCompiler::returnValueGPR);
975        return JITCompiler::returnValueGPR;
976    }
977};
978
979class FPRResult : public FPRTemporary {
980public:
981    FPRResult(JITCodeGenerator* jit)
982        : FPRTemporary(jit, lockedResult(jit))
983    {
984    }
985
986private:
987    static FPRReg lockedResult(JITCodeGenerator* jit)
988    {
989        jit->lock(JITCompiler::returnValueFPR);
990        return JITCompiler::returnValueFPR;
991    }
992};
993
994} } // namespace JSC::DFG
995
996#endif
997#endif
998
999