1/*
2 * Signed 64-bit integer multiply, 2-addr version
3 *
4 * We could definately use more free registers for
5 * this code.  We must spill %edx (rIBASE) because it
6 * is used by imul.  We'll also spill rINST (ebx),
7 * giving us eax, ebc, ecx and rIBASE as computational
8 * temps.  On top of that, we'll spill %esi (edi)
9 * for use as the vA pointer and rFP (esi) for use
10 * as the vB pointer.  Yuck.
11 */
12    /* mul-long/2addr vA, vB */
13    movzbl  rINSTbl, %eax                   # eax <- BA
14    andb    $$0xf, %al                      # eax <- A
15    CLEAR_WIDE_REF %eax                     # clear refs in advance
16    sarl    $$4, rINST                      # rINST <- B
17    mov     rPC, LOCAL0(%esp)               # save Interpreter PC
18    mov     rFP, LOCAL1(%esp)               # save FP
19    mov     rIBASE, LOCAL2(%esp)            # save rIBASE
20    leal    (rFP,%eax,4), %esi              # esi <- &v[A]
21    leal    (rFP,rINST,4), rFP              # rFP <- &v[B]
22    movl    4(%esi), %ecx                   # ecx <- Amsw
23    imull   (rFP), %ecx                     # ecx <- (Amsw*Blsw)
24    movl    4(rFP), %eax                    # eax <- Bmsw
25    imull   (%esi), %eax                    # eax <- (Bmsw*Alsw)
26    addl    %eax, %ecx                      # ecx <- (Amsw*Blsw)+(Bmsw*Alsw)
27    movl    (rFP), %eax                     # eax <- Blsw
28    mull    (%esi)                          # eax <- (Blsw*Alsw)
29    leal    (%ecx,rIBASE), rIBASE           # full result now in %edx:%eax
30    movl    rIBASE, 4(%esi)                 # v[A+1] <- rIBASE
31    movl    %eax, (%esi)                    # v[A] <- %eax
32    mov     LOCAL0(%esp), rPC               # restore Interpreter PC
33    mov     LOCAL2(%esp), rIBASE            # restore IBASE
34    mov     LOCAL1(%esp), rFP               # restore FP
35    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
36