1/*
2 * defines common to all virtual CPUs
3 *
4 *  Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef CPU_ALL_H
20#define CPU_ALL_H
21
22#include "qemu-common.h"
23#include "cpu-common.h"
24
25/* some important defines:
26 *
27 * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
28 * memory accesses.
29 *
30 * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and
31 * otherwise little endian.
32 *
33 * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
34 *
35 * TARGET_WORDS_BIGENDIAN : same for target cpu
36 */
37
38#include "softfloat.h"
39
40#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
41#define BSWAP_NEEDED
42#endif
43
44#ifdef BSWAP_NEEDED
45
46static inline uint16_t tswap16(uint16_t s)
47{
48    return bswap16(s);
49}
50
51static inline uint32_t tswap32(uint32_t s)
52{
53    return bswap32(s);
54}
55
56static inline uint64_t tswap64(uint64_t s)
57{
58    return bswap64(s);
59}
60
61static inline void tswap16s(uint16_t *s)
62{
63    *s = bswap16(*s);
64}
65
66static inline void tswap32s(uint32_t *s)
67{
68    *s = bswap32(*s);
69}
70
71static inline void tswap64s(uint64_t *s)
72{
73    *s = bswap64(*s);
74}
75
76#else
77
78static inline uint16_t tswap16(uint16_t s)
79{
80    return s;
81}
82
83static inline uint32_t tswap32(uint32_t s)
84{
85    return s;
86}
87
88static inline uint64_t tswap64(uint64_t s)
89{
90    return s;
91}
92
93static inline void tswap16s(uint16_t *s)
94{
95}
96
97static inline void tswap32s(uint32_t *s)
98{
99}
100
101static inline void tswap64s(uint64_t *s)
102{
103}
104
105#endif
106
107#if TARGET_LONG_SIZE == 4
108#define tswapl(s) tswap32(s)
109#define tswapls(s) tswap32s((uint32_t *)(s))
110#define bswaptls(s) bswap32s(s)
111#else
112#define tswapl(s) tswap64(s)
113#define tswapls(s) tswap64s((uint64_t *)(s))
114#define bswaptls(s) bswap64s(s)
115#endif
116
117typedef union {
118    float32 f;
119    uint32_t l;
120} CPU_FloatU;
121
122/* NOTE: arm FPA is horrible as double 32 bit words are stored in big
123   endian ! */
124typedef union {
125    float64 d;
126#if defined(HOST_WORDS_BIGENDIAN) \
127    || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
128    struct {
129        uint32_t upper;
130        uint32_t lower;
131    } l;
132#else
133    struct {
134        uint32_t lower;
135        uint32_t upper;
136    } l;
137#endif
138    uint64_t ll;
139} CPU_DoubleU;
140
141#ifdef TARGET_SPARC
142typedef union {
143    float128 q;
144#if defined(HOST_WORDS_BIGENDIAN) \
145    || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
146    struct {
147        uint32_t upmost;
148        uint32_t upper;
149        uint32_t lower;
150        uint32_t lowest;
151    } l;
152    struct {
153        uint64_t upper;
154        uint64_t lower;
155    } ll;
156#else
157    struct {
158        uint32_t lowest;
159        uint32_t lower;
160        uint32_t upper;
161        uint32_t upmost;
162    } l;
163    struct {
164        uint64_t lower;
165        uint64_t upper;
166    } ll;
167#endif
168} CPU_QuadU;
169#endif
170
171/* CPU memory access without any memory or io remapping */
172
173/*
174 * the generic syntax for the memory accesses is:
175 *
176 * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
177 *
178 * store: st{type}{size}{endian}_{access_type}(ptr, val)
179 *
180 * type is:
181 * (empty): integer access
182 *   f    : float access
183 *
184 * sign is:
185 * (empty): for floats or 32 bit size
186 *   u    : unsigned
187 *   s    : signed
188 *
189 * size is:
190 *   b: 8 bits
191 *   w: 16 bits
192 *   l: 32 bits
193 *   q: 64 bits
194 *
195 * endian is:
196 * (empty): target cpu endianness or 8 bit access
197 *   r    : reversed target cpu endianness (not implemented yet)
198 *   be   : big endian (not implemented yet)
199 *   le   : little endian (not implemented yet)
200 *
201 * access_type is:
202 *   raw    : host memory access
203 *   user   : user mode access using soft MMU
204 *   kernel : kernel mode access using soft MMU
205 */
206static inline int ldub_p(const void *ptr)
207{
208    return *(uint8_t *)ptr;
209}
210
211static inline int ldsb_p(const void *ptr)
212{
213    return *(int8_t *)ptr;
214}
215
216static inline void stb_p(void *ptr, int v)
217{
218    *(uint8_t *)ptr = v;
219}
220
221/* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the
222   kernel handles unaligned load/stores may give better results, but
223   it is a system wide setting : bad */
224#if defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
225
226/* conservative code for little endian unaligned accesses */
227static inline int lduw_le_p(const void *ptr)
228{
229#ifdef _ARCH_PPC
230    int val;
231    __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
232    return val;
233#else
234    const uint8_t *p = ptr;
235    return p[0] | (p[1] << 8);
236#endif
237}
238
239static inline int ldsw_le_p(const void *ptr)
240{
241#ifdef _ARCH_PPC
242    int val;
243    __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
244    return (int16_t)val;
245#else
246    const uint8_t *p = ptr;
247    return (int16_t)(p[0] | (p[1] << 8));
248#endif
249}
250
251static inline int ldl_le_p(const void *ptr)
252{
253#ifdef _ARCH_PPC
254    int val;
255    __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
256    return val;
257#else
258    const uint8_t *p = ptr;
259    return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
260#endif
261}
262
263static inline uint64_t ldq_le_p(const void *ptr)
264{
265    const uint8_t *p = ptr;
266    uint32_t v1, v2;
267    v1 = ldl_le_p(p);
268    v2 = ldl_le_p(p + 4);
269    return v1 | ((uint64_t)v2 << 32);
270}
271
272static inline void stw_le_p(void *ptr, int v)
273{
274#ifdef _ARCH_PPC
275    __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
276#else
277    uint8_t *p = ptr;
278    p[0] = v;
279    p[1] = v >> 8;
280#endif
281}
282
283static inline void stl_le_p(void *ptr, int v)
284{
285#ifdef _ARCH_PPC
286    __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
287#else
288    uint8_t *p = ptr;
289    p[0] = v;
290    p[1] = v >> 8;
291    p[2] = v >> 16;
292    p[3] = v >> 24;
293#endif
294}
295
296static inline void stq_le_p(void *ptr, uint64_t v)
297{
298    uint8_t *p = ptr;
299    stl_le_p(p, (uint32_t)v);
300    stl_le_p(p + 4, v >> 32);
301}
302
303/* float access */
304
305static inline float32 ldfl_le_p(const void *ptr)
306{
307    union {
308        float32 f;
309        uint32_t i;
310    } u;
311    u.i = ldl_le_p(ptr);
312    return u.f;
313}
314
315static inline void stfl_le_p(void *ptr, float32 v)
316{
317    union {
318        float32 f;
319        uint32_t i;
320    } u;
321    u.f = v;
322    stl_le_p(ptr, u.i);
323}
324
325static inline float64 ldfq_le_p(const void *ptr)
326{
327    CPU_DoubleU u;
328    u.l.lower = ldl_le_p(ptr);
329    u.l.upper = ldl_le_p(ptr + 4);
330    return u.d;
331}
332
333static inline void stfq_le_p(void *ptr, float64 v)
334{
335    CPU_DoubleU u;
336    u.d = v;
337    stl_le_p(ptr, u.l.lower);
338    stl_le_p(ptr + 4, u.l.upper);
339}
340
341#else
342
343static inline int lduw_le_p(const void *ptr)
344{
345    return *(uint16_t *)ptr;
346}
347
348static inline int ldsw_le_p(const void *ptr)
349{
350    return *(int16_t *)ptr;
351}
352
353static inline int ldl_le_p(const void *ptr)
354{
355    return *(uint32_t *)ptr;
356}
357
358static inline uint64_t ldq_le_p(const void *ptr)
359{
360    return *(uint64_t *)ptr;
361}
362
363static inline void stw_le_p(void *ptr, int v)
364{
365    *(uint16_t *)ptr = v;
366}
367
368static inline void stl_le_p(void *ptr, int v)
369{
370    *(uint32_t *)ptr = v;
371}
372
373static inline void stq_le_p(void *ptr, uint64_t v)
374{
375    *(uint64_t *)ptr = v;
376}
377
378/* float access */
379
380static inline float32 ldfl_le_p(const void *ptr)
381{
382    return *(float32 *)ptr;
383}
384
385static inline float64 ldfq_le_p(const void *ptr)
386{
387    return *(float64 *)ptr;
388}
389
390static inline void stfl_le_p(void *ptr, float32 v)
391{
392    *(float32 *)ptr = v;
393}
394
395static inline void stfq_le_p(void *ptr, float64 v)
396{
397    *(float64 *)ptr = v;
398}
399#endif
400
401#if !defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
402
403static inline int lduw_be_p(const void *ptr)
404{
405#if defined(__i386__)
406    int val;
407    asm volatile ("movzwl %1, %0\n"
408                  "xchgb %b0, %h0\n"
409                  : "=q" (val)
410                  : "m" (*(uint16_t *)ptr));
411    return val;
412#else
413    const uint8_t *b = ptr;
414    return ((b[0] << 8) | b[1]);
415#endif
416}
417
418static inline int ldsw_be_p(const void *ptr)
419{
420#if defined(__i386__)
421    int val;
422    asm volatile ("movzwl %1, %0\n"
423                  "xchgb %b0, %h0\n"
424                  : "=q" (val)
425                  : "m" (*(uint16_t *)ptr));
426    return (int16_t)val;
427#else
428    const uint8_t *b = ptr;
429    return (int16_t)((b[0] << 8) | b[1]);
430#endif
431}
432
433static inline int ldl_be_p(const void *ptr)
434{
435#if defined(__i386__) || defined(__x86_64__)
436    int val;
437    asm volatile ("movl %1, %0\n"
438                  "bswap %0\n"
439                  : "=r" (val)
440                  : "m" (*(uint32_t *)ptr));
441    return val;
442#else
443    const uint8_t *b = ptr;
444    return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
445#endif
446}
447
448static inline uint64_t ldq_be_p(const void *ptr)
449{
450    uint32_t a,b;
451    a = ldl_be_p(ptr);
452    b = ldl_be_p((uint8_t *)ptr + 4);
453    return (((uint64_t)a<<32)|b);
454}
455
456static inline void stw_be_p(void *ptr, int v)
457{
458#if defined(__i386__)
459    asm volatile ("xchgb %b0, %h0\n"
460                  "movw %w0, %1\n"
461                  : "=q" (v)
462                  : "m" (*(uint16_t *)ptr), "0" (v));
463#else
464    uint8_t *d = (uint8_t *) ptr;
465    d[0] = v >> 8;
466    d[1] = v;
467#endif
468}
469
470static inline void stl_be_p(void *ptr, int v)
471{
472#if defined(__i386__) || defined(__x86_64__)
473    asm volatile ("bswap %0\n"
474                  "movl %0, %1\n"
475                  : "=r" (v)
476                  : "m" (*(uint32_t *)ptr), "0" (v));
477#else
478    uint8_t *d = (uint8_t *) ptr;
479    d[0] = v >> 24;
480    d[1] = v >> 16;
481    d[2] = v >> 8;
482    d[3] = v;
483#endif
484}
485
486static inline void stq_be_p(void *ptr, uint64_t v)
487{
488    stl_be_p(ptr, v >> 32);
489    stl_be_p((uint8_t *)ptr + 4, v);
490}
491
492/* float access */
493
494static inline float32 ldfl_be_p(const void *ptr)
495{
496    union {
497        float32 f;
498        uint32_t i;
499    } u;
500    u.i = ldl_be_p(ptr);
501    return u.f;
502}
503
504static inline void stfl_be_p(void *ptr, float32 v)
505{
506    union {
507        float32 f;
508        uint32_t i;
509    } u;
510    u.f = v;
511    stl_be_p(ptr, u.i);
512}
513
514static inline float64 ldfq_be_p(const void *ptr)
515{
516    CPU_DoubleU u;
517    u.l.upper = ldl_be_p(ptr);
518    u.l.lower = ldl_be_p((uint8_t *)ptr + 4);
519    return u.d;
520}
521
522static inline void stfq_be_p(void *ptr, float64 v)
523{
524    CPU_DoubleU u;
525    u.d = v;
526    stl_be_p(ptr, u.l.upper);
527    stl_be_p((uint8_t *)ptr + 4, u.l.lower);
528}
529
530#else
531
532static inline int lduw_be_p(const void *ptr)
533{
534    return *(uint16_t *)ptr;
535}
536
537static inline int ldsw_be_p(const void *ptr)
538{
539    return *(int16_t *)ptr;
540}
541
542static inline int ldl_be_p(const void *ptr)
543{
544    return *(uint32_t *)ptr;
545}
546
547static inline uint64_t ldq_be_p(const void *ptr)
548{
549    return *(uint64_t *)ptr;
550}
551
552static inline void stw_be_p(void *ptr, int v)
553{
554    *(uint16_t *)ptr = v;
555}
556
557static inline void stl_be_p(void *ptr, int v)
558{
559    *(uint32_t *)ptr = v;
560}
561
562static inline void stq_be_p(void *ptr, uint64_t v)
563{
564    *(uint64_t *)ptr = v;
565}
566
567/* float access */
568
569static inline float32 ldfl_be_p(const void *ptr)
570{
571    return *(float32 *)ptr;
572}
573
574static inline float64 ldfq_be_p(const void *ptr)
575{
576    return *(float64 *)ptr;
577}
578
579static inline void stfl_be_p(void *ptr, float32 v)
580{
581    *(float32 *)ptr = v;
582}
583
584static inline void stfq_be_p(void *ptr, float64 v)
585{
586    *(float64 *)ptr = v;
587}
588
589#endif
590
591/* target CPU memory access functions */
592#if defined(TARGET_WORDS_BIGENDIAN)
593#define lduw_p(p) lduw_be_p(p)
594#define ldsw_p(p) ldsw_be_p(p)
595#define ldl_p(p) ldl_be_p(p)
596#define ldq_p(p) ldq_be_p(p)
597#define ldfl_p(p) ldfl_be_p(p)
598#define ldfq_p(p) ldfq_be_p(p)
599#define stw_p(p, v) stw_be_p(p, v)
600#define stl_p(p, v) stl_be_p(p, v)
601#define stq_p(p, v) stq_be_p(p, v)
602#define stfl_p(p, v) stfl_be_p(p, v)
603#define stfq_p(p, v) stfq_be_p(p, v)
604#else
605#define lduw_p(p) lduw_le_p(p)
606#define ldsw_p(p) ldsw_le_p(p)
607#define ldl_p(p) ldl_le_p(p)
608#define ldq_p(p) ldq_le_p(p)
609#define ldfl_p(p) ldfl_le_p(p)
610#define ldfq_p(p) ldfq_le_p(p)
611#define stw_p(p, v) stw_le_p(p, v)
612#define stl_p(p, v) stl_le_p(p, v)
613#define stq_p(p, v) stq_le_p(p, v)
614#define stfl_p(p, v) stfl_le_p(p, v)
615#define stfq_p(p, v) stfq_le_p(p, v)
616#endif
617
618/* MMU memory access macros */
619
620#if defined(CONFIG_USER_ONLY)
621#include <assert.h>
622#include "qemu-types.h"
623
624/* On some host systems the guest address space is reserved on the host.
625 * This allows the guest address space to be offset to a convenient location.
626 */
627#if defined(CONFIG_USE_GUEST_BASE)
628extern unsigned long guest_base;
629extern int have_guest_base;
630extern unsigned long reserved_va;
631#define GUEST_BASE guest_base
632#define RESERVED_VA reserved_va
633#else
634#define GUEST_BASE 0ul
635#define RESERVED_VA 0ul
636#endif
637
638/* All direct uses of g2h and h2g need to go away for usermode softmmu.  */
639#define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE))
640
641#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
642#define h2g_valid(x) 1
643#else
644#define h2g_valid(x) ({ \
645    unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
646    __guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS); \
647})
648#endif
649
650#define h2g(x) ({ \
651    unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
652    /* Check if given address fits target address space */ \
653    assert(h2g_valid(x)); \
654    (abi_ulong)__ret; \
655})
656
657#define saddr(x) g2h(x)
658#define laddr(x) g2h(x)
659
660#else /* !CONFIG_USER_ONLY */
661/* NOTE: we use double casts if pointers and target_ulong have
662   different sizes */
663#define saddr(x) (uint8_t *)(long)(x)
664#define laddr(x) (uint8_t *)(long)(x)
665#endif
666
667#define ldub_raw(p) ldub_p(laddr((p)))
668#define ldsb_raw(p) ldsb_p(laddr((p)))
669#define lduw_raw(p) lduw_p(laddr((p)))
670#define ldsw_raw(p) ldsw_p(laddr((p)))
671#define ldl_raw(p) ldl_p(laddr((p)))
672#define ldq_raw(p) ldq_p(laddr((p)))
673#define ldfl_raw(p) ldfl_p(laddr((p)))
674#define ldfq_raw(p) ldfq_p(laddr((p)))
675#define stb_raw(p, v) stb_p(saddr((p)), v)
676#define stw_raw(p, v) stw_p(saddr((p)), v)
677#define stl_raw(p, v) stl_p(saddr((p)), v)
678#define stq_raw(p, v) stq_p(saddr((p)), v)
679#define stfl_raw(p, v) stfl_p(saddr((p)), v)
680#define stfq_raw(p, v) stfq_p(saddr((p)), v)
681
682
683#if defined(CONFIG_USER_ONLY)
684
685/* if user mode, no other memory access functions */
686#define ldub(p) ldub_raw(p)
687#define ldsb(p) ldsb_raw(p)
688#define lduw(p) lduw_raw(p)
689#define ldsw(p) ldsw_raw(p)
690#define ldl(p) ldl_raw(p)
691#define ldq(p) ldq_raw(p)
692#define ldfl(p) ldfl_raw(p)
693#define ldfq(p) ldfq_raw(p)
694#define stb(p, v) stb_raw(p, v)
695#define stw(p, v) stw_raw(p, v)
696#define stl(p, v) stl_raw(p, v)
697#define stq(p, v) stq_raw(p, v)
698#define stfl(p, v) stfl_raw(p, v)
699#define stfq(p, v) stfq_raw(p, v)
700
701#define ldub_code(p) ldub_raw(p)
702#define ldsb_code(p) ldsb_raw(p)
703#define lduw_code(p) lduw_raw(p)
704#define ldsw_code(p) ldsw_raw(p)
705#define ldl_code(p) ldl_raw(p)
706#define ldq_code(p) ldq_raw(p)
707
708#define ldub_kernel(p) ldub_raw(p)
709#define ldsb_kernel(p) ldsb_raw(p)
710#define lduw_kernel(p) lduw_raw(p)
711#define ldsw_kernel(p) ldsw_raw(p)
712#define ldl_kernel(p) ldl_raw(p)
713#define ldq_kernel(p) ldq_raw(p)
714#define ldfl_kernel(p) ldfl_raw(p)
715#define ldfq_kernel(p) ldfq_raw(p)
716#define stb_kernel(p, v) stb_raw(p, v)
717#define stw_kernel(p, v) stw_raw(p, v)
718#define stl_kernel(p, v) stl_raw(p, v)
719#define stq_kernel(p, v) stq_raw(p, v)
720#define stfl_kernel(p, v) stfl_raw(p, v)
721#define stfq_kernel(p, vt) stfq_raw(p, v)
722
723#endif /* defined(CONFIG_USER_ONLY) */
724
725/* page related stuff */
726
727#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
728#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
729#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
730
731/* ??? These should be the larger of unsigned long and target_ulong.  */
732extern unsigned long qemu_real_host_page_size;
733extern unsigned long qemu_host_page_bits;
734extern unsigned long qemu_host_page_size;
735extern unsigned long qemu_host_page_mask;
736
737#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
738
739/* same as PROT_xxx */
740#define PAGE_READ      0x0001
741#define PAGE_WRITE     0x0002
742#define PAGE_EXEC      0x0004
743#define PAGE_BITS      (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
744#define PAGE_VALID     0x0008
745/* original state of the write flag (used when tracking self-modifying
746   code */
747#define PAGE_WRITE_ORG 0x0010
748#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
749/* FIXME: Code that sets/uses this is broken and needs to go away.  */
750#define PAGE_RESERVED  0x0020
751#endif
752
753#if defined(CONFIG_USER_ONLY)
754void page_dump(FILE *f);
755
756typedef int (*walk_memory_regions_fn)(void *, abi_ulong,
757                                      abi_ulong, unsigned long);
758int walk_memory_regions(void *, walk_memory_regions_fn);
759
760int page_get_flags(target_ulong address);
761void page_set_flags(target_ulong start, target_ulong end, int flags);
762int page_check_range(target_ulong start, target_ulong len, int flags);
763#endif
764
765CPUState *cpu_copy(CPUState *env);
766CPUState *qemu_get_cpu(int cpu);
767
768#define CPU_DUMP_CODE 0x00010000
769
770void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
771                    int flags);
772void cpu_dump_statistics(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
773                          int flags);
774
775void QEMU_NORETURN cpu_abort(CPUState *env, const char *fmt, ...)
776    GCC_FMT_ATTR(2, 3);
777extern CPUState *first_cpu;
778extern CPUState *cpu_single_env;
779
780#define CPU_INTERRUPT_TIMER  0x08 /* internal timer exception pending */
781#define CPU_INTERRUPT_SMI    0x40 /* (x86 only) SMI interrupt pending */
782#define CPU_INTERRUPT_VIRQ   0x100 /* virtual interrupt pending.  */
783#define CPU_INTERRUPT_NMI    0x200 /* NMI pending. */
784#define CPU_INTERRUPT_INIT   0x400 /* INIT pending. */
785#define CPU_INTERRUPT_SIPI   0x800 /* SIPI pending. */
786#define CPU_INTERRUPT_MCE    0x1000 /* (x86 only) MCE pending. */
787
788/* Flags for use in ENV->INTERRUPT_PENDING.
789
790   The numbers assigned here are non-sequential in order to preserve
791   binary compatibility with the vmstate dump.  Bit 0 (0x0001) was
792   previously used for CPU_INTERRUPT_EXIT, and is cleared when loading
793   the vmstate dump.  */
794
795/* External hardware interrupt pending.  This is typically used for
796   interrupts from devices.  */
797#define CPU_INTERRUPT_HARD        0x0002
798
799/* Exit the current TB.  This is typically used when some system-level device
800   makes some change to the memory mapping.  E.g. the a20 line change.  */
801#define CPU_INTERRUPT_EXITTB      0x0004
802
803/* Halt the CPU.  */
804#define CPU_INTERRUPT_HALT        0x0020
805
806/* Debug event pending.  */
807#define CPU_INTERRUPT_DEBUG       0x0080
808
809/* Several target-specific external hardware interrupts.  Each target/cpu.h
810   should define proper names based on these defines.  */
811#define CPU_INTERRUPT_TGT_EXT_0   0x0008
812#define CPU_INTERRUPT_TGT_EXT_1   0x0010
813#define CPU_INTERRUPT_TGT_EXT_2   0x0040
814#define CPU_INTERRUPT_TGT_EXT_3   0x0200
815#define CPU_INTERRUPT_TGT_EXT_4   0x1000
816
817/* Several target-specific internal interrupts.  These differ from the
818   preceeding target-specific interrupts in that they are intended to
819   originate from within the cpu itself, typically in response to some
820   instruction being executed.  These, therefore, are not masked while
821   single-stepping within the debugger.  */
822#define CPU_INTERRUPT_TGT_INT_0   0x0100
823#define CPU_INTERRUPT_TGT_INT_1   0x0400
824#define CPU_INTERRUPT_TGT_INT_2   0x0800
825
826/* First unused bit: 0x2000.  */
827
828/* The set of all bits that should be masked when single-stepping.  */
829#define CPU_INTERRUPT_SSTEP_MASK \
830    (CPU_INTERRUPT_HARD          \
831     | CPU_INTERRUPT_TGT_EXT_0   \
832     | CPU_INTERRUPT_TGT_EXT_1   \
833     | CPU_INTERRUPT_TGT_EXT_2   \
834     | CPU_INTERRUPT_TGT_EXT_3   \
835     | CPU_INTERRUPT_TGT_EXT_4)
836
837void cpu_interrupt(CPUState *s, int mask);
838void cpu_reset_interrupt(CPUState *env, int mask);
839
840void cpu_exit(CPUState *s);
841
842int qemu_cpu_has_work(CPUState *env);
843
844/* Breakpoint/watchpoint flags */
845#define BP_MEM_READ           0x01
846#define BP_MEM_WRITE          0x02
847#define BP_MEM_ACCESS         (BP_MEM_READ | BP_MEM_WRITE)
848#define BP_STOP_BEFORE_ACCESS 0x04
849#define BP_WATCHPOINT_HIT     0x08
850#define BP_GDB                0x10
851#define BP_CPU                0x20
852
853int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
854                          CPUBreakpoint **breakpoint);
855int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags);
856void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint);
857void cpu_breakpoint_remove_all(CPUState *env, int mask);
858int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
859                          int flags, CPUWatchpoint **watchpoint);
860int cpu_watchpoint_remove(CPUState *env, target_ulong addr,
861                          target_ulong len, int flags);
862void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint);
863void cpu_watchpoint_remove_all(CPUState *env, int mask);
864
865#define SSTEP_ENABLE  0x1  /* Enable simulated HW single stepping */
866#define SSTEP_NOIRQ   0x2  /* Do not use IRQ while single stepping */
867#define SSTEP_NOTIMER 0x4  /* Do not Timers while single stepping */
868
869void cpu_single_step(CPUState *env, int enabled);
870void cpu_reset(CPUState *s);
871int cpu_is_stopped(CPUState *env);
872void run_on_cpu(CPUState *env, void (*func)(void *data), void *data);
873
874#define CPU_LOG_TB_OUT_ASM (1 << 0)
875#define CPU_LOG_TB_IN_ASM  (1 << 1)
876#define CPU_LOG_TB_OP      (1 << 2)
877#define CPU_LOG_TB_OP_OPT  (1 << 3)
878#define CPU_LOG_INT        (1 << 4)
879#define CPU_LOG_EXEC       (1 << 5)
880#define CPU_LOG_PCALL      (1 << 6)
881#define CPU_LOG_IOPORT     (1 << 7)
882#define CPU_LOG_TB_CPU     (1 << 8)
883#define CPU_LOG_RESET      (1 << 9)
884
885/* define log items */
886typedef struct CPULogItem {
887    int mask;
888    const char *name;
889    const char *help;
890} CPULogItem;
891
892extern const CPULogItem cpu_log_items[];
893
894void cpu_set_log(int log_flags);
895void cpu_set_log_filename(const char *filename);
896int cpu_str_to_log_mask(const char *str);
897
898/* IO ports API */
899#include "ioport.h"
900
901/* Return the physical page corresponding to a virtual one. Use it
902   only for debugging because no protection checks are done. Return -1
903   if no page found. */
904target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
905
906/* memory API */
907
908extern int phys_ram_fd;
909extern ram_addr_t ram_size;
910
911/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
912#define RAM_PREALLOC_MASK   (1 << 0)
913
914typedef struct RAMBlock {
915    uint8_t *host;
916    ram_addr_t offset;
917    ram_addr_t length;
918    uint32_t flags;
919    char idstr[256];
920    QLIST_ENTRY(RAMBlock) next;
921#if defined(__linux__) && !defined(TARGET_S390X)
922    int fd;
923#endif
924} RAMBlock;
925
926typedef struct RAMList {
927    uint8_t *phys_dirty;
928    QLIST_HEAD(ram, RAMBlock) blocks;
929} RAMList;
930extern RAMList ram_list;
931
932extern const char *mem_path;
933extern int mem_prealloc;
934
935/* physical memory access */
936
937/* MMIO pages are identified by a combination of an IO device index and
938   3 flags.  The ROMD code stores the page ram offset in iotlb entry,
939   so only a limited number of ids are avaiable.  */
940
941#define IO_MEM_NB_ENTRIES  (1 << (TARGET_PAGE_BITS  - IO_MEM_SHIFT))
942
943/* Flags stored in the low bits of the TLB virtual address.  These are
944   defined so that fast path ram access is all zeros.  */
945/* Zero if TLB entry is valid.  */
946#define TLB_INVALID_MASK   (1 << 3)
947/* Set if TLB entry references a clean RAM page.  The iotlb entry will
948   contain the page physical address.  */
949#define TLB_NOTDIRTY    (1 << 4)
950/* Set if TLB entry is an IO callback.  */
951#define TLB_MMIO        (1 << 5)
952
953#define VGA_DIRTY_FLAG       0x01
954#define CODE_DIRTY_FLAG      0x02
955#define MIGRATION_DIRTY_FLAG 0x08
956
957/* read dirty bit (return 0 or 1) */
958static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
959{
960    return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
961}
962
963static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr)
964{
965    return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS];
966}
967
968static inline int cpu_physical_memory_get_dirty(ram_addr_t addr,
969                                                int dirty_flags)
970{
971    return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
972}
973
974static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
975{
976    ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
977}
978
979static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
980                                                      int dirty_flags)
981{
982    return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
983}
984
985static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start,
986                                                        int length,
987                                                        int dirty_flags)
988{
989    int i, mask, len;
990    uint8_t *p;
991
992    len = length >> TARGET_PAGE_BITS;
993    mask = ~dirty_flags;
994    p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS);
995    for (i = 0; i < len; i++) {
996        p[i] &= mask;
997    }
998}
999
1000void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1001                                     int dirty_flags);
1002void cpu_tlb_update_dirty(CPUState *env);
1003
1004int cpu_physical_memory_set_dirty_tracking(int enable);
1005
1006int cpu_physical_memory_get_dirty_tracking(void);
1007
1008int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1009                                   target_phys_addr_t end_addr);
1010
1011void dump_exec_info(FILE *f,
1012                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
1013
1014/* Coalesced MMIO regions are areas where write operations can be reordered.
1015 * This usually implies that write operations are side-effect free.  This allows
1016 * batching which can make a major impact on performance when using
1017 * virtualization.
1018 */
1019void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
1020
1021void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
1022
1023void qemu_flush_coalesced_mmio_buffer(void);
1024
1025
1026/* profiling */
1027#ifdef CONFIG_PROFILER
1028static inline int64_t profile_getclock(void)
1029{
1030    return cpu_get_real_ticks();
1031}
1032
1033extern int64_t qemu_time, qemu_time_start;
1034extern int64_t tlb_flush_time;
1035extern int64_t dev_time;
1036#endif
1037
1038int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
1039                        uint8_t *buf, int len, int is_write);
1040
1041void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1042                        uint64_t mcg_status, uint64_t addr, uint64_t misc);
1043
1044#endif /* CPU_ALL_H */
1045