1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#define TCG_TARGET_HPPA 1
26
27#if defined(_PA_RISC1_1)
28#define TCG_TARGET_REG_BITS 32
29#else
30#error unsupported
31#endif
32
33#define TCG_TARGET_WORDS_BIGENDIAN
34
35#define TCG_TARGET_NB_REGS 32
36
37enum {
38    TCG_REG_R0 = 0,
39    TCG_REG_R1,
40    TCG_REG_RP,
41    TCG_REG_R3,
42    TCG_REG_R4,
43    TCG_REG_R5,
44    TCG_REG_R6,
45    TCG_REG_R7,
46    TCG_REG_R8,
47    TCG_REG_R9,
48    TCG_REG_R10,
49    TCG_REG_R11,
50    TCG_REG_R12,
51    TCG_REG_R13,
52    TCG_REG_R14,
53    TCG_REG_R15,
54    TCG_REG_R16,
55    TCG_REG_R17,
56    TCG_REG_R18,
57    TCG_REG_R19,
58    TCG_REG_R20,
59    TCG_REG_R21,
60    TCG_REG_R22,
61    TCG_REG_R23,
62    TCG_REG_R24,
63    TCG_REG_R25,
64    TCG_REG_R26,
65    TCG_REG_DP,
66    TCG_REG_RET0,
67    TCG_REG_RET1,
68    TCG_REG_SP,
69    TCG_REG_R31,
70};
71
72/* used for function call generation */
73#define TCG_REG_CALL_STACK TCG_REG_SP
74#define TCG_TARGET_STACK_ALIGN 16
75#define TCG_TARGET_STACK_GROWSUP
76
77/* optional instructions */
78//#define TCG_TARGET_HAS_ext8s_i32
79//#define TCG_TARGET_HAS_ext16s_i32
80//#define TCG_TARGET_HAS_bswap16_i32
81//#define TCG_TARGET_HAS_bswap32_i32
82
83/* Note: must be synced with dyngen-exec.h */
84#define TCG_AREG0 TCG_REG_R17
85#define TCG_AREG1 TCG_REG_R14
86#define TCG_AREG2 TCG_REG_R15
87
88static inline void flush_icache_range(unsigned long start, unsigned long stop)
89{
90    start &= ~31;
91    while (start <= stop)
92    {
93        asm volatile ("fdc 0(%0)\n"
94                      "sync\n"
95                      "fic 0(%%sr4, %0)\n"
96                      "sync\n"
97                      : : "r"(start) : "memory");
98        start += 32;
99    }
100}
101
102/* supplied by libgcc */
103extern void *__canonicalize_funcptr_for_compare(void *);
104
105/* Field selection types defined by hppa */
106#define rnd(x)                  (((x)+0x1000)&~0x1fff)
107/* lsel: select left 21 bits */
108#define lsel(v,a)               (((v)+(a))>>11)
109/* rsel: select right 11 bits */
110#define rsel(v,a)               (((v)+(a))&0x7ff)
111/* lrsel with rounding of addend to nearest 8k */
112#define lrsel(v,a)              (((v)+rnd(a))>>11)
113/* rrsel with rounding of addend to nearest 8k */
114#define rrsel(v,a)              ((((v)+rnd(a))&0x7ff)+((a)-rnd(a)))
115
116#define mask(x,sz)              ((x) & ~((1<<(sz))-1))
117
118static inline int reassemble_12(int as12)
119{
120    return (((as12 & 0x800) >> 11) |
121            ((as12 & 0x400) >> 8) |
122            ((as12 & 0x3ff) << 3));
123}
124
125static inline int reassemble_14(int as14)
126{
127    return (((as14 & 0x1fff) << 1) |
128            ((as14 & 0x2000) >> 13));
129}
130
131static inline int reassemble_17(int as17)
132{
133    return (((as17 & 0x10000) >> 16) |
134            ((as17 & 0x0f800) << 5) |
135            ((as17 & 0x00400) >> 8) |
136            ((as17 & 0x003ff) << 3));
137}
138
139static inline int reassemble_21(int as21)
140{
141    return (((as21 & 0x100000) >> 20) |
142            ((as21 & 0x0ffe00) >> 8) |
143            ((as21 & 0x000180) << 7) |
144            ((as21 & 0x00007c) << 14) |
145            ((as21 & 0x000003) << 12));
146}
147
148static inline void hppa_patch21l(uint32_t *insn, int val, int addend)
149{
150    val = lrsel(val, addend);
151    *insn = mask(*insn, 21) | reassemble_21(val);
152}
153
154static inline void hppa_patch14r(uint32_t *insn, int val, int addend)
155{
156    val = rrsel(val, addend);
157    *insn = mask(*insn, 14) | reassemble_14(val);
158}
159
160static inline void hppa_patch17r(uint32_t *insn, int val, int addend)
161{
162    val = rrsel(val, addend);
163    *insn = (*insn & ~0x1f1ffd) | reassemble_17(val);
164}
165
166
167static inline void hppa_patch21l_dprel(uint32_t *insn, int val, int addend)
168{
169    register unsigned int dp asm("r27");
170    hppa_patch21l(insn, val - dp, addend);
171}
172
173static inline void hppa_patch14r_dprel(uint32_t *insn, int val, int addend)
174{
175    register unsigned int dp asm("r27");
176    hppa_patch14r(insn, val - dp, addend);
177}
178
179static inline void hppa_patch17f(uint32_t *insn, int val, int addend)
180{
181    int dot = (int)insn & ~0x3;
182    int v = ((val + addend) - dot - 8) / 4;
183    if (v > (1 << 16) || v < -(1 << 16)) {
184        printf("cannot fit branch to offset %d [%08x->%08x]\n", v, dot, val);
185        abort();
186    }
187    *insn = (*insn & ~0x1f1ffd) | reassemble_17(v);
188}
189
190static inline void hppa_load_imm21l(uint32_t *insn, int val, int addend)
191{
192    /* Transform addil L'sym(%dp) to ldil L'val, %r1 */
193    *insn = 0x20200000 | reassemble_21(lrsel(val, 0));
194}
195
196static inline void hppa_load_imm14r(uint32_t *insn, int val, int addend)
197{
198    /* Transform ldw R'sym(%r1), %rN to ldo R'sym(%r1), %rN */
199    hppa_patch14r(insn, val, addend);
200    /* HACK */
201    if (addend == 0)
202        *insn = (*insn & ~0xfc000000) | (0x0d << 26);
203}
204