1%verify "executed"
2    /*
3     * Array get, 64 bits.  vAA <- vBB[vCC].
4     *
5     * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD.
6     */
7    /* aget-wide vAA, vBB, vCC */
8    FETCH(r0, 1)                        @ r0<- CCBB
9    mov     r9, rINST, lsr #8           @ r9<- AA
10    and     r2, r0, #255                @ r2<- BB
11    mov     r3, r0, lsr #8              @ r3<- CC
12    GET_VREG(r0, r2)                    @ r0<- vBB (array object)
13    GET_VREG(r1, r3)                    @ r1<- vCC (requested index)
14    cmp     r0, #0                      @ null array object?
15    beq     common_errNullObject        @ yes, bail
16    ldr     r3, [r0, #offArrayObject_length]    @ r3<- arrayObj->length
17    add     r0, r0, r1, lsl #3          @ r0<- arrayObj + index*width
18    cmp     r1, r3                      @ compare unsigned index, length
19    bcc     .L${opcode}_finish          @ okay, continue below
20    b       common_errArrayIndex        @ index >= length, bail
21    @ May want to swap the order of these two branches depending on how the
22    @ branch prediction (if any) handles conditional forward branches vs.
23    @ unconditional forward branches.
24%break
25
26.L${opcode}_finish:
27    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
28    ldrd    r2, [r0, #offArrayObject_contents]  @ r2/r3<- vBB[vCC]
29    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
30    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
31    stmia   r9, {r2-r3}                 @ vAA/vAA+1<- r2/r3
32    GOTO_OPCODE(ip)                     @ jump to next instruction
33
34