exec-all.h revision ff9a2b851f95dff46171881afcdc65b2e164d36d
1cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)/*
2cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles) * internal execution defines for qemu
3cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles) *
4cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles) *  Copyright (c) 2003 Fabrice Bellard
5cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles) *
6cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles) * This library is free software; you can redistribute it and/or
7cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles) * modify it under the terms of the GNU Lesser General Public
8cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles) * License as published by the Free Software Foundation; either
9cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles) * version 2 of the License, or (at your option) any later version.
10cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles) *
11cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles) * This library is distributed in the hope that it will be useful,
12cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles) * but WITHOUT ANY WARRANTY; without even the implied warranty of
13cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles) * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles) * Lesser General Public License for more details.
15cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles) *
16cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles) * You should have received a copy of the GNU Lesser General Public
17cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles) * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles) */
19cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)
20cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)#ifndef _EXEC_ALL_H_
21cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)#define _EXEC_ALL_H_
22cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)
23cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)#include "qemu-common.h"
24cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)#include "exec/cpu-common.h"
25cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)#include "exec/cpu-all.h"
26cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)
27cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)/* allow to see translation results - the slowdown should be negligible, so we leave it */
28cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)#define DEBUG_DISAS
29cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)
30cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)/* Page tracking code uses ram addresses in system mode, and virtual
31cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)   addresses in userspace mode.  Define tb_page_addr_t to be an appropriate
32cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)   type.  */
33cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)#if defined(CONFIG_USER_ONLY)
34cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)typedef abi_ulong tb_page_addr_t;
35cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)#else
36cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)typedef ram_addr_t tb_page_addr_t;
37cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)#endif
38cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)
39cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)/* is_jmp field values */
40cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)#define DISAS_NEXT    0 /* next instruction can be analyzed */
41cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)#define DISAS_JUMP    1 /* only pc was modified dynamically */
42cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)#define DISAS_UPDATE  2 /* cpu state was modified dynamically */
43cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)#define DISAS_TB_JUMP 3 /* only pc was modified statically */
44cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)
45cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)struct TranslationBlock;
46cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)typedef struct TranslationBlock TranslationBlock;
47cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)
48cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)/* XXX: make safe guess about sizes */
49cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)#define MAX_OP_PER_INSTR 96
50cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)/* A Call op needs up to 6 + 2N parameters (N = number of arguments).  */
51cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)#define MAX_OPC_PARAM 10
52cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)#define OPC_BUF_SIZE 2048
53cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
54cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)
55cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)/* Maximum size a TCG op can expand to.  This is complicated because a
56cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)   single op may require several host instructions and register reloads.
57cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)   For now take a wild guess at 192 bytes, which should allow at least
58cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)   a couple of fixup instructions per argument.  */
59cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)#define TCG_MAX_OP_SIZE 192
60cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)
61cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
62cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)
63cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)extern target_ulong gen_opc_pc[OPC_BUF_SIZE];
64cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)extern target_ulong gen_opc_npc[OPC_BUF_SIZE];
65cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
66cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
67cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)extern uint16_t gen_opc_icount[OPC_BUF_SIZE];
68cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)extern target_ulong gen_opc_jump_pc[2];
69cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)extern uint32_t gen_opc_hflags[OPC_BUF_SIZE];
70cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)
71cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)#include "qemu/log.h"
72cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)
73cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
74cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb);
75cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
76cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)                          int pc_pos);
77cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)
78cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)unsigned long code_gen_max_block_size(void);
79cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)void cpu_gen_init(void);
80cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)void tcg_exec_init(unsigned long tb_size);
81cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb,
82cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)                 int *gen_code_size_ptr);
83cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)bool cpu_restore_state(struct TranslationBlock *tb,
84cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)                       CPUArchState *env, uintptr_t searched_pc);
85cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)void QEMU_NORETURN cpu_resume_from_signal(CPUArchState *env1, void *puc);
86cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)void QEMU_NORETURN cpu_io_recompile(CPUArchState *env, uintptr_t retaddr);
87cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)TranslationBlock *tb_gen_code(CPUArchState *env,
88cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)                              target_ulong pc, target_ulong cs_base, int flags,
89cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)                              int cflags);
90cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)void cpu_exec_init(CPUArchState *env);
91cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)void QEMU_NORETURN cpu_loop_exit(CPUArchState *env1);
92cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)int page_unprotect(target_ulong address, uintptr_t pc, void *puc);
93cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)void tb_invalidate_phys_page_range(hwaddr start, hwaddr end,
94cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)                                   int is_cpu_write_access);
95cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)void tb_invalidate_page_range(target_ulong start, target_ulong end);
96cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)#if !defined(CONFIG_USER_ONLY)
97cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)void tlb_flush_page(CPUArchState *env, target_ulong addr);
98cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)void tlb_flush(CPUArchState *env, int flush_global);
99cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)int tlb_set_page_exec(CPUArchState *env, target_ulong vaddr,
100cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)                      hwaddr paddr, int prot,
101cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)                      int mmu_idx, int is_softmmu);
102cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)int tlb_set_page(CPUArchState *env1, target_ulong vaddr,
103cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)                 hwaddr paddr, int prot,
104cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)                 int mmu_idx, int is_softmmu);
105cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)void tb_reset_jump_recursive(TranslationBlock *tb);
106cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)#else
107cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)static inline void tlb_flush_page(CPUArchState *env, target_ulong addr)
108cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles){
109cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)}
110cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)
111cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)static inline void tlb_flush(CPUArchState *env, int flush_global)
112cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles){
113cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)}
114cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)#endif
115cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)
116cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)typedef struct PhysPageDesc {
117cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)    /* offset in host memory of the page + io_index in the low bits */
118cedac228d2dd51db4b79ea1e72c7f249408ee061Torne (Richard Coles)    ram_addr_t phys_offset;
119    ram_addr_t region_offset;
120} PhysPageDesc;
121
122PhysPageDesc *phys_page_find(hwaddr index);
123PhysPageDesc *phys_page_find_alloc(hwaddr index, int alloc);
124
125int io_mem_watch;
126
127#define CODE_GEN_ALIGN           16 /* must be >= of the size of a icache line */
128
129#define CODE_GEN_PHYS_HASH_BITS     15
130#define CODE_GEN_PHYS_HASH_SIZE     (1 << CODE_GEN_PHYS_HASH_BITS)
131
132#define MIN_CODE_GEN_BUFFER_SIZE     (1024 * 1024)
133
134/* estimated block size for TB allocation */
135/* XXX: use a per code average code fragment size and modulate it
136   according to the host CPU */
137#if defined(CONFIG_SOFTMMU)
138#define CODE_GEN_AVG_BLOCK_SIZE 128
139#else
140#define CODE_GEN_AVG_BLOCK_SIZE 64
141#endif
142
143#if defined(__arm__) || defined(_ARCH_PPC) \
144    || defined(__x86_64__) || defined(__i386__) \
145    || defined(__sparc__) || defined(__aarch64__) \
146    || defined(CONFIG_TCG_INTERPRETER)
147#define USE_DIRECT_JUMP
148#endif
149
150struct TranslationBlock {
151    target_ulong pc;   /* simulated PC corresponding to this block (EIP + CS base) */
152    target_ulong cs_base; /* CS base for this block */
153    uint64_t flags; /* flags defining in which context the code was generated */
154    uint16_t size;      /* size of target code for this block (1 <=
155                           size <= TARGET_PAGE_SIZE) */
156    uint16_t cflags;    /* compile flags */
157#define CF_COUNT_MASK  0x7fff
158#define CF_LAST_IO     0x8000 /* Last insn may be an IO access.  */
159
160    uint8_t *tc_ptr;    /* pointer to the translated code */
161    /* next matching tb for physical address. */
162    struct TranslationBlock *phys_hash_next;
163    /* first and second physical page containing code. The lower bit
164       of the pointer tells the index in page_next[] */
165    struct TranslationBlock *page_next[2];
166    tb_page_addr_t page_addr[2];
167
168    /* the following data are used to directly call another TB from
169       the code of this one. */
170    uint16_t tb_next_offset[2]; /* offset of original jump target */
171#ifdef USE_DIRECT_JUMP
172    uint16_t tb_jmp_offset[4]; /* offset of jump instruction */
173#else
174    uintptr_t tb_next[2]; /* address of jump generated code */
175#endif
176    /* list of TBs jumping to this one. This is a circular list using
177       the two least significant bits of the pointers to tell what is
178       the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
179       jmp_first */
180    struct TranslationBlock *jmp_next[2];
181    struct TranslationBlock *jmp_first;
182    uint32_t icount;
183
184#ifdef CONFIG_MEMCHECK
185    /* Maps PCs in this translation block to corresponding PCs in guest address
186     * space. The array is arranged in such way, that every even entry contains
187     * PC in the translation block, followed by an odd entry that contains
188     * guest PC corresponding to that PC in the translation block. This
189     * arrangement is set by tcg_gen_code_common that initializes this array
190     * when performing guest code translation. */
191    uintptr_t*   tpc2gpc;
192    /* Number of pairs (pc_tb, pc_guest) in tpc2gpc array. */
193    unsigned int    tpc2gpc_pairs;
194#endif  // CONFIG_MEMCHECK
195};
196
197#include "exec/spinlock.h"
198
199typedef struct TBContext TBContext;
200
201struct TBContext {
202
203    TranslationBlock *tbs;
204    TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
205    int nb_tbs;
206    /* any access to the tbs or the page table must use this lock */
207    spinlock_t tb_lock;
208
209    /* statistics */
210    int tb_flush_count;
211    int tb_phys_invalidate_count;
212
213    int tb_invalidated_flag;
214};
215
216static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
217{
218    target_ulong tmp;
219    tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
220    return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
221}
222
223static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
224{
225    target_ulong tmp;
226    tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
227    return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
228	    | (tmp & TB_JMP_ADDR_MASK));
229}
230
231static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc)
232{
233    return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1);
234}
235
236#ifdef CONFIG_MEMCHECK
237/* Gets translated PC for a given (translated PC, guest PC) pair.
238 * Return:
239 *  Translated PC, or NULL if pair index was too large.
240 */
241static inline target_ulong
242tb_get_tb_pc(const TranslationBlock* tb, unsigned int pair)
243{
244    return (tb->tpc2gpc != NULL && pair < tb->tpc2gpc_pairs) ?
245                                                    tb->tpc2gpc[pair * 2] : 0;
246}
247
248/* Gets guest PC for a given (translated PC, guest PC) pair.
249 * Return:
250 *  Guest PC, or NULL if pair index was too large.
251 */
252static inline target_ulong
253tb_get_guest_pc(const TranslationBlock* tb, unsigned int pair)
254{
255    return (tb->tpc2gpc != NULL && pair < tb->tpc2gpc_pairs) ?
256            tb->tpc2gpc[pair * 2 + 1] : 0;
257}
258
259/* Gets guest PC for a given translated PC.
260 * Return:
261 *  Guest PC for a given translated PC, or NULL if there was no pair, matching
262 *  translated PC in tb's tpc2gpc array.
263 */
264static inline target_ulong
265tb_search_guest_pc_from_tb_pc(const TranslationBlock* tb, target_ulong tb_pc)
266{
267    if (tb->tpc2gpc != NULL && tb->tpc2gpc_pairs != 0) {
268        unsigned int m_min = 0;
269        unsigned int m_max = (tb->tpc2gpc_pairs - 1) << 1;
270        /* Make sure that tb_pc is within TB array. */
271        if (tb_pc < tb->tpc2gpc[0]) {
272            return 0;
273        }
274        while (m_min <= m_max) {
275            const unsigned int m = ((m_min + m_max) >> 1) & ~1;
276            if (tb_pc < tb->tpc2gpc[m]) {
277                m_max = m - 2;
278            } else if (m == m_max || tb_pc < tb->tpc2gpc[m + 2]) {
279                return tb->tpc2gpc[m + 1];
280            } else {
281                m_min = m + 2;
282            }
283        }
284        return tb->tpc2gpc[m_max + 1];
285    }
286    return 0;
287}
288#endif  // CONFIG_MEMCHECK
289
290TranslationBlock *tb_alloc(target_ulong pc);
291void tb_free(TranslationBlock *tb);
292void tb_flush(CPUArchState *env);
293void tb_link_phys(TranslationBlock *tb,
294                  target_ulong phys_pc, target_ulong phys_page2);
295void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
296void tb_invalidate_phys_page_fast0(hwaddr start, int len);
297
298extern uint8_t *code_gen_ptr;
299extern int code_gen_max_blocks;
300
301#if defined(USE_DIRECT_JUMP)
302
303#if defined(CONFIG_TCG_INTERPRETER)
304static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
305{
306    /* patch the branch destination */
307    *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
308    /* no need to flush icache explicitly */
309}
310#elif defined(_ARCH_PPC)
311void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr);
312#define tb_set_jmp_target1 ppc_tb_set_jmp_target
313#elif defined(__i386__) || defined(__x86_64__)
314static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
315{
316    /* patch the branch destination */
317    *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
318    /* no need to flush icache explicitly */
319}
320#elif defined(__aarch64__)
321void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
322#define tb_set_jmp_target1 aarch64_tb_set_jmp_target
323#elif defined(__arm__)
324static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
325{
326#if !QEMU_GNUC_PREREQ(4, 1)
327    register unsigned long _beg __asm ("a1");
328    register unsigned long _end __asm ("a2");
329    register unsigned long _flg __asm ("a3");
330#endif
331
332    /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
333    *(uint32_t *)jmp_addr =
334        (*(uint32_t *)jmp_addr & ~0xffffff)
335        | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
336
337#if QEMU_GNUC_PREREQ(4, 1)
338    __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
339#else
340    /* flush icache */
341    _beg = jmp_addr;
342    _end = jmp_addr + 4;
343    _flg = 0;
344    __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
345#endif
346}
347#elif defined(__sparc__)
348void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
349#else
350#error tb_set_jmp_target1 is missing
351#endif
352
353static inline void tb_set_jmp_target(TranslationBlock *tb,
354                                     int n, uintptr_t addr)
355{
356    uint16_t offset = tb->tb_jmp_offset[n];
357    tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
358    offset = tb->tb_jmp_offset[n + 2];
359    if (offset != 0xffff)
360        tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
361}
362
363#else
364
365/* set the jump target */
366static inline void tb_set_jmp_target(TranslationBlock *tb,
367                                     int n, uintptr_t addr)
368{
369    tb->tb_next[n] = addr;
370}
371
372#endif
373
374static inline void tb_add_jump(TranslationBlock *tb, int n,
375                               TranslationBlock *tb_next)
376{
377    /* NOTE: this test is only needed for thread safety */
378    if (!tb->jmp_next[n]) {
379        /* patch the native jump address */
380        tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
381
382        /* add in TB jmp circular list */
383        tb->jmp_next[n] = tb_next->jmp_first;
384        tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
385    }
386}
387
388/* GETRA is the true target of the return instruction that we'll execute,
389   defined here for simplicity of defining the follow-up macros.  */
390#if defined(CONFIG_TCG_INTERPRETER)
391extern uintptr_t tci_tb_ptr;
392# define GETRA() tci_tb_ptr
393#else
394# define GETRA() \
395    ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
396#endif
397
398/* The true return address will often point to a host insn that is part of
399   the next translated guest insn.  Adjust the address backward to point to
400   the middle of the call insn.  Subtracting one would do the job except for
401   several compressed mode architectures (arm, mips) which set the low bit
402   to indicate the compressed mode; subtracting two works around that.  It
403   is also the case that there are no host isas that contain a call insn
404   smaller than 4 bytes, so we don't worry about special-casing this.  */
405#if defined(CONFIG_TCG_INTERPRETER)
406# define GETPC_ADJ   0
407#else
408# define GETPC_ADJ   2
409#endif
410
411#if !defined(CONFIG_USER_ONLY)
412
413void phys_mem_set_alloc(void *(*alloc)(size_t));
414
415TranslationBlock *tb_find_pc(unsigned long pc_ptr);
416
417extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
418extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
419extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
420
421void tlb_fill(target_ulong addr, int is_write, int mmu_idx,
422              void *retaddr);
423
424#include "exec/softmmu_defs.h"
425
426#define ACCESS_TYPE (NB_MMU_MODES + 1)
427#define MEMSUFFIX _code
428#define env cpu_single_env
429
430#define DATA_SIZE 1
431#include "exec/softmmu_header.h"
432
433#define DATA_SIZE 2
434#include "exec/softmmu_header.h"
435
436#define DATA_SIZE 4
437#include "exec/softmmu_header.h"
438
439#define DATA_SIZE 8
440#include "exec/softmmu_header.h"
441
442#undef ACCESS_TYPE
443#undef MEMSUFFIX
444#undef env
445
446#endif
447
448#if defined(CONFIG_USER_ONLY)
449static inline target_ulong get_page_addr_code(CPUArchState *env1, target_ulong addr)
450{
451    return addr;
452}
453#else
454/* NOTE: this function can trigger an exception */
455/* NOTE2: the returned address is not exactly the physical address: it
456   is the offset relative to phys_ram_base */
457static inline target_ulong get_page_addr_code(CPUArchState *env1, target_ulong addr)
458{
459    int mmu_idx, page_index, pd;
460    void *p;
461
462    page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
463    mmu_idx = cpu_mmu_index(env1);
464    if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
465                 (addr & TARGET_PAGE_MASK))) {
466        ldub_code(addr);
467    }
468    pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
469    if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
470#if defined(TARGET_SPARC) || defined(TARGET_MIPS)
471        do_unassigned_access(addr, 0, 1, 0, 4);
472#else
473        cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
474#endif
475    }
476    p = (void *)(unsigned long)addr
477        + env1->tlb_table[mmu_idx][page_index].addend;
478    return qemu_ram_addr_from_host_nofail(p);
479}
480#endif
481
482typedef void (CPUDebugExcpHandler)(CPUArchState *env);
483
484void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
485
486/* vl.c */
487extern int singlestep;
488
489/* Deterministic execution requires that IO only be performed on the last
490   instruction of a TB so that interrupts take effect immediately.  */
491static inline int can_do_io(CPUArchState *env)
492{
493    if (!use_icount) {
494        return 1;
495    }
496    /* If not executing code then assume we are ok.  */
497    if (!env->current_tb) {
498        return 1;
499    }
500    return env->can_do_io != 0;
501}
502
503#endif
504