op_helper.c revision d62e538a5a9627bb1306ba14130699aaba9b585f
1/*
2 *  MIPS emulation helpers for qemu.
3 *
4 *  Copyright (c) 2004-2005 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19#include <stdlib.h>
20#include "cpu.h"
21#include "dyngen-exec.h"
22
23#include "qemu/host-utils.h"
24
25#include "helper.h"
26
27#if !defined(CONFIG_USER_ONLY)
28#include "exec/softmmu_exec.h"
29#endif /* !defined(CONFIG_USER_ONLY) */
30
31#ifndef CONFIG_USER_ONLY
32static inline void cpu_mips_tlb_flush (CPUMIPSState *env, int flush_global);
33#endif
34
35static inline void compute_hflags(CPUMIPSState *env)
36{
37    env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 |
38                     MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU |
39                     MIPS_HFLAG_UX);
40    if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
41        !(env->CP0_Status & (1 << CP0St_ERL)) &&
42        !(env->hflags & MIPS_HFLAG_DM)) {
43        env->hflags |= (env->CP0_Status >> CP0St_KSU) & MIPS_HFLAG_KSU;
44    }
45#if defined(TARGET_MIPS64)
46    if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_UM) ||
47        (env->CP0_Status & (1 << CP0St_PX)) ||
48        (env->CP0_Status & (1 << CP0St_UX))) {
49        env->hflags |= MIPS_HFLAG_64;
50    }
51    if (env->CP0_Status & (1 << CP0St_UX)) {
52        env->hflags |= MIPS_HFLAG_UX;
53    }
54#endif
55    if ((env->CP0_Status & (1 << CP0St_CU0)) ||
56        !(env->hflags & MIPS_HFLAG_KSU)) {
57        env->hflags |= MIPS_HFLAG_CP0;
58    }
59    if (env->CP0_Status & (1 << CP0St_CU1)) {
60        env->hflags |= MIPS_HFLAG_FPU;
61    }
62    if (env->CP0_Status & (1 << CP0St_FR)) {
63        env->hflags |= MIPS_HFLAG_F64;
64    }
65    if (env->insn_flags & ISA_MIPS32R2) {
66        if (env->active_fpu.fcr0 & (1 << FCR0_F64)) {
67            env->hflags |= MIPS_HFLAG_COP1X;
68        }
69    } else if (env->insn_flags & ISA_MIPS32) {
70        if (env->hflags & MIPS_HFLAG_64) {
71            env->hflags |= MIPS_HFLAG_COP1X;
72        }
73    } else if (env->insn_flags & ISA_MIPS4) {
74        /* All supported MIPS IV CPUs use the XX (CU3) to enable
75           and disable the MIPS IV extensions to the MIPS III ISA.
76           Some other MIPS IV CPUs ignore the bit, so the check here
77           would be too restrictive for them.  */
78        if (env->CP0_Status & (1 << CP0St_CU3)) {
79            env->hflags |= MIPS_HFLAG_COP1X;
80        }
81    }
82}
83
84/*****************************************************************************/
85/* Exceptions processing helpers */
86
87void helper_raise_exception_err (CPUMIPSState *env,
88                                 uint32_t exception, int error_code)
89{
90#if 1
91    if (exception < 0x100)
92        qemu_log("%s: %d %d\n", __func__, exception, error_code);
93#endif
94    env->exception_index = exception;
95    env->error_code = error_code;
96    cpu_loop_exit(env);
97}
98
99void helper_raise_exception (CPUMIPSState *env, uint32_t exception)
100{
101    helper_raise_exception_err(env, exception, 0);
102}
103
104void helper_interrupt_restart (CPUMIPSState *env)
105{
106    if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
107        !(env->CP0_Status & (1 << CP0St_ERL)) &&
108        !(env->hflags & MIPS_HFLAG_DM) &&
109        (env->CP0_Status & (1 << CP0St_IE)) &&
110        (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask)) {
111        env->CP0_Cause &= ~(0x1f << CP0Ca_EC);
112        helper_raise_exception(env, EXCP_EXT_INTERRUPT);
113    }
114}
115
116#if !defined(CONFIG_USER_ONLY)
117static void do_restore_state (CPUMIPSState *env, uintptr_t pc)
118{
119    TranslationBlock *tb;
120
121    tb = tb_find_pc (pc);
122    if (tb) {
123        cpu_restore_state (env, pc);
124    }
125}
126#endif
127
128#if defined(CONFIG_USER_ONLY)
129#define HELPER_LD(name, insn, type)                                     \
130static inline type do_##name(CPUMIPSState *env, target_ulong addr,      \
131                             int mem_idx)                               \
132{                                                                       \
133    return (type) cpu_##insn##_raw(env, addr);                                     \
134}
135#else
136#define HELPER_LD(name, insn, type)                                     \
137static inline type do_##name(CPUMIPSState *env, target_ulong addr,      \
138                             int mem_idx)                               \
139{                                                                       \
140    switch (mem_idx)                                                    \
141    {                                                                   \
142    case 0: return (type) cpu_##insn##_kernel(env, addr); break;        \
143    case 1: return (type) cpu_##insn##_super(env, addr); break;         \
144    default:                                                            \
145    case 2: return (type) cpu_##insn##_user(env, addr); break;          \
146    }                                                                   \
147}
148#endif
149HELPER_LD(lbu, ldub, uint8_t)
150HELPER_LD(lw, ldl, int32_t)
151#ifdef TARGET_MIPS64
152HELPER_LD(ld, ldq, int64_t)
153#endif
154#undef HELPER_LD
155
156#if defined(CONFIG_USER_ONLY)
157#define HELPER_ST(name, insn, type)                                     \
158static inline void do_##name(CPUMIPSState *env, target_ulong addr,      \
159                             type val, int mem_idx)                     \
160{                                                                       \
161    cpu_##insn##_raw(env, addr, val);                                              \
162}
163#else
164#define HELPER_ST(name, insn, type)                                     \
165static inline void do_##name(CPUMIPSState *env, target_ulong addr,      \
166                             type val, int mem_idx)                     \
167{                                                                       \
168    switch (mem_idx)                                                    \
169    {                                                                   \
170    case 0: cpu_##insn##_kernel(env, addr, val); break;                 \
171    case 1: cpu_##insn##_super(env, addr, val); break;                  \
172    default:                                                            \
173    case 2: cpu_##insn##_user(env, addr, val); break;                   \
174    }                                                                   \
175}
176#endif
177HELPER_ST(sb, stb, uint8_t)
178HELPER_ST(sw, stl, uint32_t)
179#ifdef TARGET_MIPS64
180HELPER_ST(sd, stq, uint64_t)
181#endif
182#undef HELPER_ST
183
184target_ulong helper_clo (target_ulong arg1)
185{
186    return clo32(arg1);
187}
188
189target_ulong helper_clz (target_ulong arg1)
190{
191    return clz32(arg1);
192}
193
194#if defined(TARGET_MIPS64)
195target_ulong helper_dclo (target_ulong arg1)
196{
197    return clo64(arg1);
198}
199
200target_ulong helper_dclz (target_ulong arg1)
201{
202    return clz64(arg1);
203}
204#endif /* TARGET_MIPS64 */
205
206/* 64 bits arithmetic for 32 bits hosts */
207static inline uint64_t get_HILO (void)
208{
209    return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
210}
211
212static inline void set_HILO (uint64_t HILO)
213{
214    env->active_tc.LO[0] = (int32_t)HILO;
215    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
216}
217
218static inline void set_HIT0_LO (target_ulong arg1, uint64_t HILO)
219{
220    env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
221    arg1 = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
222}
223
224static inline void set_HI_LOT0 (target_ulong arg1, uint64_t HILO)
225{
226    arg1 = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
227    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
228}
229
230/* Multiplication variants of the vr54xx. */
231target_ulong helper_muls (target_ulong arg1, target_ulong arg2)
232{
233    set_HI_LOT0(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
234
235    return arg1;
236}
237
238target_ulong helper_mulsu (target_ulong arg1, target_ulong arg2)
239{
240    set_HI_LOT0(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
241
242    return arg1;
243}
244
245target_ulong helper_macc (target_ulong arg1, target_ulong arg2)
246{
247    set_HI_LOT0(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
248
249    return arg1;
250}
251
252target_ulong helper_macchi (target_ulong arg1, target_ulong arg2)
253{
254    set_HIT0_LO(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
255
256    return arg1;
257}
258
259target_ulong helper_maccu (target_ulong arg1, target_ulong arg2)
260{
261    set_HI_LOT0(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
262
263    return arg1;
264}
265
266target_ulong helper_macchiu (target_ulong arg1, target_ulong arg2)
267{
268    set_HIT0_LO(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
269
270    return arg1;
271}
272
273target_ulong helper_msac (target_ulong arg1, target_ulong arg2)
274{
275    set_HI_LOT0(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
276
277    return arg1;
278}
279
280target_ulong helper_msachi (target_ulong arg1, target_ulong arg2)
281{
282    set_HIT0_LO(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
283
284    return arg1;
285}
286
287target_ulong helper_msacu (target_ulong arg1, target_ulong arg2)
288{
289    set_HI_LOT0(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
290
291    return arg1;
292}
293
294target_ulong helper_msachiu (target_ulong arg1, target_ulong arg2)
295{
296    set_HIT0_LO(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
297
298    return arg1;
299}
300
301target_ulong helper_mulhi (target_ulong arg1, target_ulong arg2)
302{
303    set_HIT0_LO(arg1, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
304
305    return arg1;
306}
307
308target_ulong helper_mulhiu (target_ulong arg1, target_ulong arg2)
309{
310    set_HIT0_LO(arg1, (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
311
312    return arg1;
313}
314
315target_ulong helper_mulshi (target_ulong arg1, target_ulong arg2)
316{
317    set_HIT0_LO(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
318
319    return arg1;
320}
321
322target_ulong helper_mulshiu (target_ulong arg1, target_ulong arg2)
323{
324    set_HIT0_LO(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
325
326    return arg1;
327}
328
329#ifdef TARGET_MIPS64
330void helper_dmult (target_ulong arg1, target_ulong arg2)
331{
332    muls64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
333}
334
335void helper_dmultu (target_ulong arg1, target_ulong arg2)
336{
337    mulu64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
338}
339#endif
340
341#ifndef CONFIG_USER_ONLY
342
343static inline hwaddr do_translate_address(target_ulong address, int rw)
344{
345    hwaddr lladdr;
346
347    lladdr = cpu_mips_translate_address(env, address, rw);
348
349    if (lladdr == (hwaddr)-1LL) {
350        cpu_loop_exit(env);
351    } else {
352        return lladdr;
353    }
354}
355
356#define HELPER_LD_ATOMIC(name, insn)                                          \
357target_ulong helper_##name(target_ulong arg, int mem_idx)                     \
358{                                                                             \
359    env->lladdr = do_translate_address(arg, 0);                               \
360    env->llval = do_##insn(env, arg, mem_idx);                                     \
361    return env->llval;                                                        \
362}
363HELPER_LD_ATOMIC(ll, lw)
364#ifdef TARGET_MIPS64
365HELPER_LD_ATOMIC(lld, ld)
366#endif
367#undef HELPER_LD_ATOMIC
368
369#define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask)                      \
370target_ulong helper_##name(target_ulong arg1, target_ulong arg2, int mem_idx) \
371{                                                                             \
372    target_long tmp;                                                          \
373                                                                              \
374    if (arg2 & almask) {                                                      \
375        env->CP0_BadVAddr = arg2;                                             \
376        helper_raise_exception(env, EXCP_AdES);                               \
377    }                                                                         \
378    if (do_translate_address(arg2, 1) == env->lladdr) {                       \
379        tmp = do_##ld_insn(env, arg2, mem_idx);                                    \
380        if (tmp == env->llval) {                                              \
381            do_##st_insn(env, arg2, arg1, mem_idx);                                \
382            return 1;                                                         \
383        }                                                                     \
384    }                                                                         \
385    return 0;                                                                 \
386}
387HELPER_ST_ATOMIC(sc, lw, sw, 0x3)
388#ifdef TARGET_MIPS64
389HELPER_ST_ATOMIC(scd, ld, sd, 0x7)
390#endif
391#undef HELPER_ST_ATOMIC
392#endif
393
394#ifdef TARGET_WORDS_BIGENDIAN
395#define GET_LMASK(v) ((v) & 3)
396#define GET_OFFSET(addr, offset) (addr + (offset))
397#else
398#define GET_LMASK(v) (((v) & 3) ^ 3)
399#define GET_OFFSET(addr, offset) (addr - (offset))
400#endif
401
402target_ulong helper_lwl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
403                        int mem_idx)
404{
405    target_ulong tmp;
406
407    tmp = do_lbu(env, arg2, mem_idx);
408    arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
409
410    if (GET_LMASK(arg2) <= 2) {
411        tmp = do_lbu(env, GET_OFFSET(arg2, 1), mem_idx);
412        arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
413    }
414
415    if (GET_LMASK(arg2) <= 1) {
416        tmp = do_lbu(env, GET_OFFSET(arg2, 2), mem_idx);
417        arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
418    }
419
420    if (GET_LMASK(arg2) == 0) {
421        tmp = do_lbu(env, GET_OFFSET(arg2, 3), mem_idx);
422        arg1 = (arg1 & 0xFFFFFF00) | tmp;
423    }
424    return (int32_t)arg1;
425}
426
427target_ulong helper_lwr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
428                        int mem_idx)
429{
430    target_ulong tmp;
431
432    tmp = do_lbu(env, arg2, mem_idx);
433    arg1 = (arg1 & 0xFFFFFF00) | tmp;
434
435    if (GET_LMASK(arg2) >= 1) {
436        tmp = do_lbu(env, GET_OFFSET(arg2, -1), mem_idx);
437        arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
438    }
439
440    if (GET_LMASK(arg2) >= 2) {
441        tmp = do_lbu(env, GET_OFFSET(arg2, -2), mem_idx);
442        arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
443    }
444
445    if (GET_LMASK(arg2) == 3) {
446        tmp = do_lbu(env, GET_OFFSET(arg2, -3), mem_idx);
447        arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
448    }
449    return (int32_t)arg1;
450}
451
452void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
453                int mem_idx)
454{
455    do_sb(env, arg2, (uint8_t)(arg1 >> 24), mem_idx);
456
457    if (GET_LMASK(arg2) <= 2)
458        do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx);
459
460    if (GET_LMASK(arg2) <= 1)
461        do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx);
462
463    if (GET_LMASK(arg2) == 0)
464        do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx);
465}
466
467void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
468                int mem_idx)
469{
470    do_sb(env, arg2, (uint8_t)arg1, mem_idx);
471
472    if (GET_LMASK(arg2) >= 1)
473        do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
474
475    if (GET_LMASK(arg2) >= 2)
476        do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
477
478    if (GET_LMASK(arg2) == 3)
479        do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
480}
481
482#if defined(TARGET_MIPS64)
483/* "half" load and stores.  We must do the memory access inline,
484   or fault handling won't work.  */
485
486#ifdef TARGET_WORDS_BIGENDIAN
487#define GET_LMASK64(v) ((v) & 7)
488#else
489#define GET_LMASK64(v) (((v) & 7) ^ 7)
490#endif
491
492target_ulong helper_ldl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
493                        int mem_idx)
494{
495    uint64_t tmp;
496
497    tmp = do_lbu(env, arg2, mem_idx);
498    arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
499
500    if (GET_LMASK64(arg2) <= 6) {
501        tmp = do_lbu(env, GET_OFFSET(arg2, 1), mem_idx);
502        arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
503    }
504
505    if (GET_LMASK64(arg2) <= 5) {
506        tmp = do_lbu(env, GET_OFFSET(arg2, 2), mem_idx);
507        arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
508    }
509
510    if (GET_LMASK64(arg2) <= 4) {
511        tmp = do_lbu(env, GET_OFFSET(arg2, 3), mem_idx);
512        arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
513    }
514
515    if (GET_LMASK64(arg2) <= 3) {
516        tmp = do_lbu(GET_OFFSET(arg2, 4), mem_idx);
517        arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
518    }
519
520    if (GET_LMASK64(arg2) <= 2) {
521        tmp = do_lbu(env, GET_OFFSET(arg2, 5), mem_idx);
522        arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
523    }
524
525    if (GET_LMASK64(arg2) <= 1) {
526        tmp = do_lbu(env, GET_OFFSET(arg2, 6), mem_idx);
527        arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
528    }
529
530    if (GET_LMASK64(arg2) == 0) {
531        tmp = do_lbu(env, GET_OFFSET(arg2, 7), mem_idx);
532        arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
533    }
534
535    return arg1;
536}
537
538target_ulong helper_ldr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
539                        int mem_idx)
540{
541    uint64_t tmp;
542
543    tmp = do_lbu(env, arg2, mem_idx);
544    arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
545
546    if (GET_LMASK64(arg2) >= 1) {
547        tmp = do_lbu(env, GET_OFFSET(arg2, -1), mem_idx);
548        arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp  << 8);
549    }
550
551    if (GET_LMASK64(arg2) >= 2) {
552        tmp = do_lbu(env, GET_OFFSET(arg2, -2), mem_idx);
553        arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
554    }
555
556    if (GET_LMASK64(arg2) >= 3) {
557        tmp = do_lbu(env, GET_OFFSET(arg2, -3), mem_idx);
558        arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
559    }
560
561    if (GET_LMASK64(arg2) >= 4) {
562        tmp = do_lbu(env, GET_OFFSET(arg2, -4), mem_idx);
563        arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
564    }
565
566    if (GET_LMASK64(arg2) >= 5) {
567        tmp = do_lbu(env, GET_OFFSET(arg2, -5), mem_idx);
568        arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
569    }
570
571    if (GET_LMASK64(arg2) >= 6) {
572        tmp = do_lbu(env, GET_OFFSET(arg2, -6), mem_idx);
573        arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
574    }
575
576    if (GET_LMASK64(arg2) == 7) {
577        tmp = do_lbu(env, GET_OFFSET(arg2, -7), mem_idx);
578        arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
579    }
580
581    return arg1;
582}
583
584void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
585                 int mem_idx)
586{
587    do_sb(env, arg2, (uint8_t)(arg1 >> 56), mem_idx);
588
589    if (GET_LMASK64(arg2) <= 6)
590        do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx);
591
592    if (GET_LMASK64(arg2) <= 5)
593        do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx);
594
595    if (GET_LMASK64(arg2) <= 4)
596        do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx);
597
598    if (GET_LMASK64(arg2) <= 3)
599        do_sb(env, GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx);
600
601    if (GET_LMASK64(arg2) <= 2)
602        do_sb(env, GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx);
603
604    if (GET_LMASK64(arg2) <= 1)
605        do_sb(env, GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx);
606
607    if (GET_LMASK64(arg2) <= 0)
608        do_sb(env, GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx);
609}
610
611void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
612                 int mem_idx)
613{
614    do_sb(env, arg2, (uint8_t)arg1, mem_idx);
615
616    if (GET_LMASK64(arg2) >= 1)
617        do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
618
619    if (GET_LMASK64(arg2) >= 2)
620        do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
621
622    if (GET_LMASK64(arg2) >= 3)
623        do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
624
625    if (GET_LMASK64(arg2) >= 4)
626        do_sb(env, GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx);
627
628    if (GET_LMASK64(arg2) >= 5)
629        do_sb(env, GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx);
630
631    if (GET_LMASK64(arg2) >= 6)
632        do_sb(env, GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx);
633
634    if (GET_LMASK64(arg2) == 7)
635        do_sb(env, GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx);
636}
637#endif /* TARGET_MIPS64 */
638
639#ifndef CONFIG_USER_ONLY
640/* tc should point to an int with the value of the global TC index.
641   This function will transform it into a local index within the
642   returned CPUState.
643
644   FIXME: This code assumes that all VPEs have the same number of TCs,
645          which depends on runtime setup. Can probably be fixed by
646          walking the list of CPUStates.  */
647static CPUMIPSState *mips_cpu_map_tc(int *tc)
648{
649    CPUMIPSState *other;
650    int vpe_idx, nr_threads = env->nr_threads;
651    int tc_idx = *tc;
652
653    if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) {
654        /* Not allowed to address other CPUs.  */
655        *tc = env->current_tc;
656        return env;
657    }
658
659    vpe_idx = tc_idx / nr_threads;
660    *tc = tc_idx % nr_threads;
661    other = qemu_get_cpu(vpe_idx);
662    return other ? other : env;
663}
664
665/* The per VPE CP0_Status register shares some fields with the per TC
666   CP0_TCStatus registers. These fields are wired to the same registers,
667   so changes to either of them should be reflected on both registers.
668
669   Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
670
671   These helper call synchronizes the regs for a given cpu.  */
672
673/* Called for updates to CP0_Status.  */
674static void sync_c0_status(CPUMIPSState *cpu, int tc)
675{
676    int32_t tcstatus, *tcst;
677    uint32_t v = cpu->CP0_Status;
678    uint32_t cu, mx, asid, ksu;
679    uint32_t mask = ((1 << CP0TCSt_TCU3)
680                       | (1 << CP0TCSt_TCU2)
681                       | (1 << CP0TCSt_TCU1)
682                       | (1 << CP0TCSt_TCU0)
683                       | (1 << CP0TCSt_TMX)
684                       | (3 << CP0TCSt_TKSU)
685                       | (0xff << CP0TCSt_TASID));
686
687    cu = (v >> CP0St_CU0) & 0xf;
688    mx = (v >> CP0St_MX) & 0x1;
689    ksu = (v >> CP0St_KSU) & 0x3;
690    asid = env->CP0_EntryHi & 0xff;
691
692    tcstatus = cu << CP0TCSt_TCU0;
693    tcstatus |= mx << CP0TCSt_TMX;
694    tcstatus |= ksu << CP0TCSt_TKSU;
695    tcstatus |= asid;
696
697    if (tc == cpu->current_tc) {
698        tcst = &cpu->active_tc.CP0_TCStatus;
699    } else {
700        tcst = &cpu->tcs[tc].CP0_TCStatus;
701    }
702
703    *tcst &= ~mask;
704    *tcst |= tcstatus;
705    compute_hflags(cpu);
706}
707
708/* Called for updates to CP0_TCStatus.  */
709static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc, target_ulong v)
710{
711    uint32_t status;
712    uint32_t tcu, tmx, tasid, tksu;
713    uint32_t mask = ((1 << CP0St_CU3)
714                       | (1 << CP0St_CU2)
715                       | (1 << CP0St_CU1)
716                       | (1 << CP0St_CU0)
717                       | (1 << CP0St_MX)
718                       | (3 << CP0St_KSU));
719
720    tcu = (v >> CP0TCSt_TCU0) & 0xf;
721    tmx = (v >> CP0TCSt_TMX) & 0x1;
722    tasid = v & 0xff;
723    tksu = (v >> CP0TCSt_TKSU) & 0x3;
724
725    status = tcu << CP0St_CU0;
726    status |= tmx << CP0St_MX;
727    status |= tksu << CP0St_KSU;
728
729    cpu->CP0_Status &= ~mask;
730    cpu->CP0_Status |= status;
731
732    /* Sync the TASID with EntryHi.  */
733    cpu->CP0_EntryHi &= ~0xff;
734    cpu->CP0_EntryHi = tasid;
735
736    compute_hflags(cpu);
737}
738
739/* Called for updates to CP0_EntryHi.  */
740static void sync_c0_entryhi(CPUMIPSState *cpu, int tc)
741{
742    int32_t *tcst;
743    uint32_t asid, v = cpu->CP0_EntryHi;
744
745    asid = v & 0xff;
746
747    if (tc == cpu->current_tc) {
748        tcst = &cpu->active_tc.CP0_TCStatus;
749    } else {
750        tcst = &cpu->tcs[tc].CP0_TCStatus;
751    }
752
753    *tcst &= ~0xff;
754    *tcst |= asid;
755}
756
757/* CP0 helpers */
758target_ulong helper_mfc0_mvpcontrol (void)
759{
760    return env->mvp->CP0_MVPControl;
761}
762
763target_ulong helper_mfc0_mvpconf0 (void)
764{
765    return env->mvp->CP0_MVPConf0;
766}
767
768target_ulong helper_mfc0_mvpconf1 (void)
769{
770    return env->mvp->CP0_MVPConf1;
771}
772
773target_ulong helper_mfc0_random (void)
774{
775    return (int32_t)cpu_mips_get_random(env);
776}
777
778target_ulong helper_mfc0_tcstatus (void)
779{
780    return env->active_tc.CP0_TCStatus;
781}
782
783target_ulong helper_mftc0_tcstatus(void)
784{
785    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
786    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
787
788    if (other_tc == other->current_tc)
789        return other->active_tc.CP0_TCStatus;
790    else
791        return other->tcs[other_tc].CP0_TCStatus;
792}
793
794target_ulong helper_mfc0_tcbind (void)
795{
796    return env->active_tc.CP0_TCBind;
797}
798
799target_ulong helper_mftc0_tcbind(void)
800{
801    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
802    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
803
804    if (other_tc == other->current_tc)
805        return other->active_tc.CP0_TCBind;
806    else
807        return other->tcs[other_tc].CP0_TCBind;
808}
809
810target_ulong helper_mfc0_tcrestart (void)
811{
812    return env->active_tc.PC;
813}
814
815target_ulong helper_mftc0_tcrestart(void)
816{
817    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
818    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
819
820    if (other_tc == other->current_tc)
821        return other->active_tc.PC;
822    else
823        return other->tcs[other_tc].PC;
824}
825
826target_ulong helper_mfc0_tchalt (void)
827{
828    return env->active_tc.CP0_TCHalt;
829}
830
831target_ulong helper_mftc0_tchalt(void)
832{
833    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
834    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
835
836    if (other_tc == other->current_tc)
837        return other->active_tc.CP0_TCHalt;
838    else
839        return other->tcs[other_tc].CP0_TCHalt;
840}
841
842target_ulong helper_mfc0_tccontext (void)
843{
844    return env->active_tc.CP0_TCContext;
845}
846
847target_ulong helper_mftc0_tccontext(void)
848{
849    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
850    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
851
852    if (other_tc == other->current_tc)
853        return other->active_tc.CP0_TCContext;
854    else
855        return other->tcs[other_tc].CP0_TCContext;
856}
857
858target_ulong helper_mfc0_tcschedule (void)
859{
860    return env->active_tc.CP0_TCSchedule;
861}
862
863target_ulong helper_mftc0_tcschedule(void)
864{
865    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
866    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
867
868    if (other_tc == other->current_tc)
869        return other->active_tc.CP0_TCSchedule;
870    else
871        return other->tcs[other_tc].CP0_TCSchedule;
872}
873
874target_ulong helper_mfc0_tcschefback (void)
875{
876    return env->active_tc.CP0_TCScheFBack;
877}
878
879target_ulong helper_mftc0_tcschefback(void)
880{
881    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
882    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
883
884    if (other_tc == other->current_tc)
885        return other->active_tc.CP0_TCScheFBack;
886    else
887        return other->tcs[other_tc].CP0_TCScheFBack;
888}
889
890target_ulong helper_mfc0_count (void)
891{
892    return (int32_t)cpu_mips_get_count(env);
893}
894
895target_ulong helper_mftc0_entryhi(void)
896{
897    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
898    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
899
900    return other->CP0_EntryHi;
901}
902
903target_ulong helper_mftc0_status(void)
904{
905    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
906    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
907
908    return other->CP0_Status;
909}
910
911target_ulong helper_mfc0_lladdr (void)
912{
913    return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift);
914}
915
916target_ulong helper_mfc0_watchlo (uint32_t sel)
917{
918    return (int32_t)env->CP0_WatchLo[sel];
919}
920
921target_ulong helper_mfc0_watchhi (uint32_t sel)
922{
923    return env->CP0_WatchHi[sel];
924}
925
926target_ulong helper_mfc0_debug (void)
927{
928    target_ulong t0 = env->CP0_Debug;
929    if (env->hflags & MIPS_HFLAG_DM)
930        t0 |= 1 << CP0DB_DM;
931
932    return t0;
933}
934
935target_ulong helper_mftc0_debug(void)
936{
937    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
938    int32_t tcstatus;
939    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
940
941    if (other_tc == other->current_tc)
942        tcstatus = other->active_tc.CP0_Debug_tcstatus;
943    else
944        tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus;
945
946    /* XXX: Might be wrong, check with EJTAG spec. */
947    return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
948            (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
949}
950
951#if defined(TARGET_MIPS64)
952target_ulong helper_dmfc0_tcrestart (void)
953{
954    return env->active_tc.PC;
955}
956
957target_ulong helper_dmfc0_tchalt (void)
958{
959    return env->active_tc.CP0_TCHalt;
960}
961
962target_ulong helper_dmfc0_tccontext (void)
963{
964    return env->active_tc.CP0_TCContext;
965}
966
967target_ulong helper_dmfc0_tcschedule (void)
968{
969    return env->active_tc.CP0_TCSchedule;
970}
971
972target_ulong helper_dmfc0_tcschefback (void)
973{
974    return env->active_tc.CP0_TCScheFBack;
975}
976
977target_ulong helper_dmfc0_lladdr (void)
978{
979    return env->lladdr >> env->CP0_LLAddr_shift;
980}
981
982target_ulong helper_dmfc0_watchlo (uint32_t sel)
983{
984    return env->CP0_WatchLo[sel];
985}
986#endif /* TARGET_MIPS64 */
987
988void helper_mtc0_index (target_ulong arg1)
989{
990    int num = 1;
991    unsigned int tmp = env->tlb->nb_tlb;
992
993    do {
994        tmp >>= 1;
995        num <<= 1;
996    } while (tmp);
997    env->CP0_Index = (env->CP0_Index & 0x80000000) | (arg1 & (num - 1));
998}
999
1000void helper_mtc0_mvpcontrol (target_ulong arg1)
1001{
1002    uint32_t mask = 0;
1003    uint32_t newval;
1004
1005    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
1006        mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
1007                (1 << CP0MVPCo_EVP);
1008    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1009        mask |= (1 << CP0MVPCo_STLB);
1010    newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
1011
1012    // TODO: Enable/disable shared TLB, enable/disable VPEs.
1013
1014    env->mvp->CP0_MVPControl = newval;
1015}
1016
1017void helper_mtc0_vpecontrol (target_ulong arg1)
1018{
1019    uint32_t mask;
1020    uint32_t newval;
1021
1022    mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
1023           (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
1024    newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
1025
1026    /* Yield scheduler intercept not implemented. */
1027    /* Gating storage scheduler intercept not implemented. */
1028
1029    // TODO: Enable/disable TCs.
1030
1031    env->CP0_VPEControl = newval;
1032}
1033
1034void helper_mtc0_vpeconf0 (target_ulong arg1)
1035{
1036    uint32_t mask = 0;
1037    uint32_t newval;
1038
1039    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
1040        if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
1041            mask |= (0xff << CP0VPEC0_XTC);
1042        mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1043    }
1044    newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1045
1046    // TODO: TC exclusive handling due to ERL/EXL.
1047
1048    env->CP0_VPEConf0 = newval;
1049}
1050
1051void helper_mtc0_vpeconf1 (target_ulong arg1)
1052{
1053    uint32_t mask = 0;
1054    uint32_t newval;
1055
1056    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1057        mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
1058                (0xff << CP0VPEC1_NCP1);
1059    newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
1060
1061    /* UDI not implemented. */
1062    /* CP2 not implemented. */
1063
1064    // TODO: Handle FPU (CP1) binding.
1065
1066    env->CP0_VPEConf1 = newval;
1067}
1068
1069void helper_mtc0_yqmask (target_ulong arg1)
1070{
1071    /* Yield qualifier inputs not implemented. */
1072    env->CP0_YQMask = 0x00000000;
1073}
1074
1075void helper_mtc0_vpeopt (target_ulong arg1)
1076{
1077    env->CP0_VPEOpt = arg1 & 0x0000ffff;
1078}
1079
1080void helper_mtc0_entrylo0 (target_ulong arg1)
1081{
1082    /* Large physaddr (PABITS) not implemented */
1083    /* 1k pages not implemented */
1084    env->CP0_EntryLo0 = arg1 & 0x3FFFFFFF;
1085}
1086
1087void helper_mtc0_tcstatus (target_ulong arg1)
1088{
1089    uint32_t mask = env->CP0_TCStatus_rw_bitmask;
1090    uint32_t newval;
1091
1092    newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
1093
1094    env->active_tc.CP0_TCStatus = newval;
1095    sync_c0_tcstatus(env, env->current_tc, newval);
1096}
1097
1098void helper_mttc0_tcstatus (target_ulong arg1)
1099{
1100    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1101    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1102
1103    if (other_tc == other->current_tc)
1104        other->active_tc.CP0_TCStatus = arg1;
1105    else
1106        other->tcs[other_tc].CP0_TCStatus = arg1;
1107    sync_c0_tcstatus(other, other_tc, arg1);
1108}
1109
1110void helper_mtc0_tcbind (target_ulong arg1)
1111{
1112    uint32_t mask = (1 << CP0TCBd_TBE);
1113    uint32_t newval;
1114
1115    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1116        mask |= (1 << CP0TCBd_CurVPE);
1117    newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1118    env->active_tc.CP0_TCBind = newval;
1119}
1120
1121void helper_mttc0_tcbind (target_ulong arg1)
1122{
1123    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1124    uint32_t mask = (1 << CP0TCBd_TBE);
1125    uint32_t newval;
1126    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1127
1128    if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1129        mask |= (1 << CP0TCBd_CurVPE);
1130    if (other_tc == other->current_tc) {
1131        newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1132        other->active_tc.CP0_TCBind = newval;
1133    } else {
1134        newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
1135        other->tcs[other_tc].CP0_TCBind = newval;
1136    }
1137}
1138
1139void helper_mtc0_tcrestart (target_ulong arg1)
1140{
1141    env->active_tc.PC = arg1;
1142    env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1143    env->lladdr = 0ULL;
1144    /* MIPS16 not implemented. */
1145}
1146
1147void helper_mttc0_tcrestart (target_ulong arg1)
1148{
1149    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1150    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1151
1152    if (other_tc == other->current_tc) {
1153        other->active_tc.PC = arg1;
1154        other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1155        other->lladdr = 0ULL;
1156        /* MIPS16 not implemented. */
1157    } else {
1158        other->tcs[other_tc].PC = arg1;
1159        other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1160        other->lladdr = 0ULL;
1161        /* MIPS16 not implemented. */
1162    }
1163}
1164
1165void helper_mtc0_tchalt (target_ulong arg1)
1166{
1167    env->active_tc.CP0_TCHalt = arg1 & 0x1;
1168
1169    // TODO: Halt TC / Restart (if allocated+active) TC.
1170}
1171
1172void helper_mttc0_tchalt (target_ulong arg1)
1173{
1174    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1175    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1176
1177    // TODO: Halt TC / Restart (if allocated+active) TC.
1178
1179    if (other_tc == other->current_tc)
1180        other->active_tc.CP0_TCHalt = arg1;
1181    else
1182        other->tcs[other_tc].CP0_TCHalt = arg1;
1183}
1184
1185void helper_mtc0_tccontext (target_ulong arg1)
1186{
1187    env->active_tc.CP0_TCContext = arg1;
1188}
1189
1190void helper_mttc0_tccontext (target_ulong arg1)
1191{
1192    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1193    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1194
1195    if (other_tc == other->current_tc)
1196        other->active_tc.CP0_TCContext = arg1;
1197    else
1198        other->tcs[other_tc].CP0_TCContext = arg1;
1199}
1200
1201void helper_mtc0_tcschedule (target_ulong arg1)
1202{
1203    env->active_tc.CP0_TCSchedule = arg1;
1204}
1205
1206void helper_mttc0_tcschedule (target_ulong arg1)
1207{
1208    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1209    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1210
1211    if (other_tc == other->current_tc)
1212        other->active_tc.CP0_TCSchedule = arg1;
1213    else
1214        other->tcs[other_tc].CP0_TCSchedule = arg1;
1215}
1216
1217void helper_mtc0_tcschefback (target_ulong arg1)
1218{
1219    env->active_tc.CP0_TCScheFBack = arg1;
1220}
1221
1222void helper_mttc0_tcschefback (target_ulong arg1)
1223{
1224    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1225    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1226
1227    if (other_tc == other->current_tc)
1228        other->active_tc.CP0_TCScheFBack = arg1;
1229    else
1230        other->tcs[other_tc].CP0_TCScheFBack = arg1;
1231}
1232
1233void helper_mtc0_entrylo1 (target_ulong arg1)
1234{
1235    /* Large physaddr (PABITS) not implemented */
1236    /* 1k pages not implemented */
1237    env->CP0_EntryLo1 = arg1 & 0x3FFFFFFF;
1238}
1239
1240void helper_mtc0_context (target_ulong arg1)
1241{
1242    env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
1243}
1244
1245void helper_mtc0_pagemask (target_ulong arg1)
1246{
1247    /* 1k pages not implemented */
1248    env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1249}
1250
1251void helper_mtc0_pagegrain (target_ulong arg1)
1252{
1253    /* SmartMIPS not implemented */
1254    /* Large physaddr (PABITS) not implemented */
1255    /* 1k pages not implemented */
1256    env->CP0_PageGrain = 0;
1257}
1258
1259void helper_mtc0_wired (target_ulong arg1)
1260{
1261    env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1262}
1263
1264void helper_mtc0_srsconf0 (target_ulong arg1)
1265{
1266    env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1267}
1268
1269void helper_mtc0_srsconf1 (target_ulong arg1)
1270{
1271    env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1272}
1273
1274void helper_mtc0_srsconf2 (target_ulong arg1)
1275{
1276    env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1277}
1278
1279void helper_mtc0_srsconf3 (target_ulong arg1)
1280{
1281    env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1282}
1283
1284void helper_mtc0_srsconf4 (target_ulong arg1)
1285{
1286    env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1287}
1288
1289void helper_mtc0_hwrena (target_ulong arg1)
1290{
1291    env->CP0_HWREna = arg1 & 0x0000000F;
1292}
1293
1294void helper_mtc0_count (target_ulong arg1)
1295{
1296    cpu_mips_store_count(env, arg1);
1297}
1298
1299void helper_mtc0_entryhi (target_ulong arg1)
1300{
1301    target_ulong old, val;
1302
1303    /* 1k pages not implemented */
1304    val = arg1 & ((TARGET_PAGE_MASK << 1) | 0xFF);
1305#if defined(TARGET_MIPS64)
1306    val &= env->SEGMask;
1307#endif
1308    old = env->CP0_EntryHi;
1309    env->CP0_EntryHi = val;
1310    if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1311        sync_c0_entryhi(env, env->current_tc);
1312    }
1313    /* If the ASID changes, flush qemu's TLB.  */
1314    if ((old & 0xFF) != (val & 0xFF))
1315        cpu_mips_tlb_flush(env, 1);
1316}
1317
1318void helper_mttc0_entryhi(target_ulong arg1)
1319{
1320    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1321    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1322
1323    other->CP0_EntryHi = arg1;
1324    sync_c0_entryhi(other, other_tc);
1325}
1326
1327void helper_mtc0_compare (target_ulong arg1)
1328{
1329    cpu_mips_store_compare(env, arg1);
1330}
1331
1332void helper_mtc0_status (target_ulong arg1)
1333{
1334    uint32_t val, old;
1335    uint32_t mask = env->CP0_Status_rw_bitmask;
1336
1337    val = arg1 & mask;
1338    old = env->CP0_Status;
1339    env->CP0_Status = (env->CP0_Status & ~mask) | val;
1340    if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1341        sync_c0_status(env, env->current_tc);
1342    } else {
1343        compute_hflags(env);
1344    }
1345
1346    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1347        qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1348                old, old & env->CP0_Cause & CP0Ca_IP_mask,
1349                val, val & env->CP0_Cause & CP0Ca_IP_mask,
1350                env->CP0_Cause);
1351        switch (env->hflags & MIPS_HFLAG_KSU) {
1352        case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1353        case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1354        case MIPS_HFLAG_KM: qemu_log("\n"); break;
1355        default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1356        }
1357    }
1358    cpu_mips_update_irq(env);
1359}
1360
1361void helper_mttc0_status(target_ulong arg1)
1362{
1363    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1364    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1365
1366    other->CP0_Status = arg1 & ~0xf1000018;
1367    sync_c0_status(other, other_tc);
1368}
1369
1370void helper_mtc0_intctl (target_ulong arg1)
1371{
1372    /* vectored interrupts not implemented, no performance counters. */
1373    env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000002e0) | (arg1 & 0x000002e0);
1374}
1375
1376void helper_mtc0_srsctl (target_ulong arg1)
1377{
1378    uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1379    env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1380}
1381
1382void helper_mtc0_cause (target_ulong arg1)
1383{
1384    uint32_t mask = 0x00C00300;
1385    uint32_t old = env->CP0_Cause;
1386
1387    if (env->insn_flags & ISA_MIPS32R2)
1388        mask |= 1 << CP0Ca_DC;
1389
1390    env->CP0_Cause = (env->CP0_Cause & ~mask) | (arg1 & mask);
1391
1392    if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
1393        if (env->CP0_Cause & (1 << CP0Ca_DC))
1394            cpu_mips_stop_count(env);
1395        else
1396            cpu_mips_start_count(env);
1397    }
1398
1399    /* Handle the software interrupt as an hardware one, as they
1400       are very similar */
1401    if (arg1 & CP0Ca_IP_mask) {
1402        cpu_mips_update_irq(env);
1403    }
1404}
1405
1406void helper_mtc0_ebase (target_ulong arg1)
1407{
1408    /* vectored interrupts not implemented */
1409    /* Multi-CPU not implemented */
1410    env->CP0_EBase = 0x80000000 | (arg1 & 0x3FFFF000);
1411}
1412
1413void helper_mtc0_config0 (target_ulong arg1)
1414{
1415    env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1416}
1417
1418void helper_mtc0_config2 (target_ulong arg1)
1419{
1420    /* tertiary/secondary caches not implemented */
1421    env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1422}
1423
1424void helper_mtc0_lladdr (target_ulong arg1)
1425{
1426    target_long mask = env->CP0_LLAddr_rw_bitmask;
1427    arg1 = arg1 << env->CP0_LLAddr_shift;
1428    env->lladdr = (env->lladdr & ~mask) | (arg1 & mask);
1429}
1430
1431void helper_mtc0_watchlo (target_ulong arg1, uint32_t sel)
1432{
1433    /* Watch exceptions for instructions, data loads, data stores
1434       not implemented. */
1435    env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1436}
1437
1438void helper_mtc0_watchhi (target_ulong arg1, uint32_t sel)
1439{
1440    env->CP0_WatchHi[sel] = (arg1 & 0x40FF0FF8);
1441    env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1442}
1443
1444void helper_mtc0_xcontext (target_ulong arg1)
1445{
1446    target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1447    env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1448}
1449
1450void helper_mtc0_framemask (target_ulong arg1)
1451{
1452    env->CP0_Framemask = arg1; /* XXX */
1453}
1454
1455void helper_mtc0_debug (target_ulong arg1)
1456{
1457    env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1458    if (arg1 & (1 << CP0DB_DM))
1459        env->hflags |= MIPS_HFLAG_DM;
1460    else
1461        env->hflags &= ~MIPS_HFLAG_DM;
1462}
1463
1464void helper_mttc0_debug(target_ulong arg1)
1465{
1466    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1467    uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1468    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1469
1470    /* XXX: Might be wrong, check with EJTAG spec. */
1471    if (other_tc == other->current_tc)
1472        other->active_tc.CP0_Debug_tcstatus = val;
1473    else
1474        other->tcs[other_tc].CP0_Debug_tcstatus = val;
1475    other->CP0_Debug = (other->CP0_Debug &
1476                     ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1477                     (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1478}
1479
1480void helper_mtc0_performance0 (target_ulong arg1)
1481{
1482    env->CP0_Performance0 = arg1 & 0x000007ff;
1483}
1484
1485void helper_mtc0_taglo (target_ulong arg1)
1486{
1487    env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1488}
1489
1490void helper_mtc0_datalo (target_ulong arg1)
1491{
1492    env->CP0_DataLo = arg1; /* XXX */
1493}
1494
1495void helper_mtc0_taghi (target_ulong arg1)
1496{
1497    env->CP0_TagHi = arg1; /* XXX */
1498}
1499
1500void helper_mtc0_datahi (target_ulong arg1)
1501{
1502    env->CP0_DataHi = arg1; /* XXX */
1503}
1504
1505/* MIPS MT functions */
1506target_ulong helper_mftgpr(uint32_t sel)
1507{
1508    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1509    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1510
1511    if (other_tc == other->current_tc)
1512        return other->active_tc.gpr[sel];
1513    else
1514        return other->tcs[other_tc].gpr[sel];
1515}
1516
1517target_ulong helper_mftlo(uint32_t sel)
1518{
1519    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1520    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1521
1522    if (other_tc == other->current_tc)
1523        return other->active_tc.LO[sel];
1524    else
1525        return other->tcs[other_tc].LO[sel];
1526}
1527
1528target_ulong helper_mfthi(uint32_t sel)
1529{
1530    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1531    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1532
1533    if (other_tc == other->current_tc)
1534        return other->active_tc.HI[sel];
1535    else
1536        return other->tcs[other_tc].HI[sel];
1537}
1538
1539target_ulong helper_mftacx(uint32_t sel)
1540{
1541    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1542    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1543
1544    if (other_tc == other->current_tc)
1545        return other->active_tc.ACX[sel];
1546    else
1547        return other->tcs[other_tc].ACX[sel];
1548}
1549
1550target_ulong helper_mftdsp(void)
1551{
1552    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1553    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1554
1555    if (other_tc == other->current_tc)
1556        return other->active_tc.DSPControl;
1557    else
1558        return other->tcs[other_tc].DSPControl;
1559}
1560
1561void helper_mttgpr(target_ulong arg1, uint32_t sel)
1562{
1563    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1564    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1565
1566    if (other_tc == other->current_tc)
1567        other->active_tc.gpr[sel] = arg1;
1568    else
1569        other->tcs[other_tc].gpr[sel] = arg1;
1570}
1571
1572void helper_mttlo(target_ulong arg1, uint32_t sel)
1573{
1574    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1575    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1576
1577    if (other_tc == other->current_tc)
1578        other->active_tc.LO[sel] = arg1;
1579    else
1580        other->tcs[other_tc].LO[sel] = arg1;
1581}
1582
1583void helper_mtthi(target_ulong arg1, uint32_t sel)
1584{
1585    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1586    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1587
1588    if (other_tc == other->current_tc)
1589        other->active_tc.HI[sel] = arg1;
1590    else
1591        other->tcs[other_tc].HI[sel] = arg1;
1592}
1593
1594void helper_mttacx(target_ulong arg1, uint32_t sel)
1595{
1596    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1597    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1598
1599    if (other_tc == other->current_tc)
1600        other->active_tc.ACX[sel] = arg1;
1601    else
1602        other->tcs[other_tc].ACX[sel] = arg1;
1603}
1604
1605void helper_mttdsp(target_ulong arg1)
1606{
1607    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1608    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1609
1610    if (other_tc == other->current_tc)
1611        other->active_tc.DSPControl = arg1;
1612    else
1613        other->tcs[other_tc].DSPControl = arg1;
1614}
1615
1616/* MIPS MT functions */
1617target_ulong helper_dmt(target_ulong arg1)
1618{
1619    // TODO
1620    arg1 = 0;
1621    // rt = arg1
1622
1623    return arg1;
1624}
1625
1626target_ulong helper_emt(target_ulong arg1)
1627{
1628    // TODO
1629    arg1 = 0;
1630    // rt = arg1
1631
1632    return arg1;
1633}
1634
1635target_ulong helper_dvpe(target_ulong arg1)
1636{
1637    // TODO
1638    arg1 = 0;
1639    // rt = arg1
1640
1641    return arg1;
1642}
1643
1644target_ulong helper_evpe(target_ulong arg1)
1645{
1646    // TODO
1647    arg1 = 0;
1648    // rt = arg1
1649
1650    return arg1;
1651}
1652#endif /* !CONFIG_USER_ONLY */
1653
1654void helper_fork(target_ulong arg1, target_ulong arg2)
1655{
1656    // arg1 = rt, arg2 = rs
1657    arg1 = 0;
1658    // TODO: store to TC register
1659}
1660
1661target_ulong helper_yield(target_ulong arg1)
1662{
1663    if (arg1 < 0) {
1664        /* No scheduling policy implemented. */
1665        if (arg1 != -2) {
1666            if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1667                env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
1668                env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1669                env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1670                helper_raise_exception(env, EXCP_THREAD);
1671            }
1672        }
1673    } else if (arg1 == 0) {
1674        if (0 /* TODO: TC underflow */) {
1675            env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1676            helper_raise_exception(env, EXCP_THREAD);
1677        } else {
1678            // TODO: Deallocate TC
1679        }
1680    } else if (arg1 > 0) {
1681        /* Yield qualifier inputs not implemented. */
1682        env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1683        env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1684        helper_raise_exception(env, EXCP_THREAD);
1685    }
1686    return env->CP0_YQMask;
1687}
1688
1689#ifndef CONFIG_USER_ONLY
1690static void inline r4k_invalidate_tlb_shadow (CPUMIPSState *env, int idx)
1691{
1692    r4k_tlb_t *tlb;
1693    uint8_t ASID = env->CP0_EntryHi & 0xFF;
1694
1695    tlb = &env->tlb->mmu.r4k.tlb[idx];
1696    /* The qemu TLB is flushed when the ASID changes, so no need to
1697    flush these entries again.  */
1698    if (tlb->G == 0 && tlb->ASID != ASID) {
1699        return;
1700    }
1701}
1702
1703static void inline r4k_invalidate_tlb (CPUMIPSState *env, int idx)
1704{
1705    r4k_tlb_t *tlb;
1706    target_ulong addr;
1707    target_ulong end;
1708    uint8_t ASID = env->CP0_EntryHi & 0xFF;
1709    target_ulong mask;
1710
1711    tlb = &env->tlb->mmu.r4k.tlb[idx];
1712    /* The qemu TLB is flushed when the ASID changes, so no need to
1713    flush these entries again.  */
1714    if (tlb->G == 0 && tlb->ASID != ASID) {
1715        return;
1716    }
1717
1718    /* 1k pages are not supported. */
1719    mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1720    if (tlb->V0) {
1721        addr = tlb->VPN & ~mask;
1722#if defined(TARGET_MIPS64)
1723        if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
1724            addr |= 0x3FFFFF0000000000ULL;
1725        }
1726#endif
1727        end = addr | (mask >> 1);
1728        while (addr < end) {
1729            tlb_flush_page (env, addr);
1730            addr += TARGET_PAGE_SIZE;
1731        }
1732    }
1733    if (tlb->V1) {
1734        addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1);
1735#if defined(TARGET_MIPS64)
1736        if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
1737            addr |= 0x3FFFFF0000000000ULL;
1738        }
1739#endif
1740        end = addr | mask;
1741        while (addr - 1 < end) {
1742            tlb_flush_page (env, addr);
1743            addr += TARGET_PAGE_SIZE;
1744        }
1745    }
1746}
1747
1748/* TLB management */
1749void cpu_mips_tlb_flush (CPUMIPSState *env, int flush_global)
1750{
1751    /* Flush qemu's TLB and discard all shadowed entries.  */
1752    tlb_flush (env, flush_global);
1753}
1754
1755static void r4k_fill_tlb(CPUMIPSState *env, int idx)
1756{
1757    r4k_tlb_t *tlb;
1758
1759    /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
1760    tlb = &env->tlb->mmu.r4k.tlb[idx];
1761    tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
1762#if defined(TARGET_MIPS64)
1763    tlb->VPN &= env->SEGMask;
1764#endif
1765    tlb->ASID = env->CP0_EntryHi & 0xFF;
1766    tlb->PageMask = env->CP0_PageMask;
1767    tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
1768    tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
1769    tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
1770    tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
1771    tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
1772    tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
1773    tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
1774    tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
1775    tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
1776}
1777
1778void r4k_helper_ptw_tlbrefill(CPUMIPSState *env)
1779{
1780   /* Do TLB load on behalf of Page Table Walk */
1781    int r = cpu_mips_get_random(env);
1782    r4k_invalidate_tlb_shadow(env, r);
1783    r4k_fill_tlb(env, r);
1784}
1785
1786void r4k_helper_tlbwi (CPUMIPSState *env)
1787{
1788    r4k_tlb_t *tlb;
1789    target_ulong tag;
1790    target_ulong VPN;
1791    target_ulong mask;
1792
1793    /* If tlbwi is trying to upgrading access permissions on current entry,
1794     * we do not need to flush tlb hash table.
1795     */
1796    tlb = &env->tlb->mmu.r4k.tlb[env->CP0_Index % env->tlb->nb_tlb];
1797    mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1798    tag = env->CP0_EntryHi & ~mask;
1799    VPN = tlb->VPN & ~mask;
1800    if (VPN == tag)
1801    {
1802        if (tlb->ASID == (env->CP0_EntryHi & 0xFF))
1803        {
1804            tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
1805            tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
1806            tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
1807            tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
1808            tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
1809            tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
1810            tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
1811            tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
1812            return;
1813        }
1814    }
1815
1816    /*flush all the tlb cache */
1817    cpu_mips_tlb_flush (env, 1);
1818
1819    r4k_invalidate_tlb(env, env->CP0_Index % env->tlb->nb_tlb);
1820    r4k_fill_tlb(env, env->CP0_Index % env->tlb->nb_tlb);
1821}
1822
1823void r4k_helper_tlbwr (CPUMIPSState *env)
1824{
1825    int r = cpu_mips_get_random(env);
1826
1827    r4k_invalidate_tlb_shadow(env, r);
1828    r4k_fill_tlb(env, r);
1829}
1830
1831void r4k_helper_tlbp(CPUMIPSState *env)
1832{
1833    r4k_tlb_t *tlb;
1834    target_ulong mask;
1835    target_ulong tag;
1836    target_ulong VPN;
1837    uint8_t ASID;
1838    int i;
1839
1840    ASID = env->CP0_EntryHi & 0xFF;
1841    for (i = 0; i < env->tlb->nb_tlb; i++) {
1842        tlb = &env->tlb->mmu.r4k.tlb[i];
1843        /* 1k pages are not supported. */
1844        mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1845        tag = env->CP0_EntryHi & ~mask;
1846        VPN = tlb->VPN & ~mask;
1847        /* Check ASID, virtual page number & size */
1848        if (unlikely((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag)) {
1849            /* TLB match */
1850            env->CP0_Index = i;
1851            break;
1852        }
1853    }
1854    if (i == env->tlb->nb_tlb) {
1855        /* No match.  Discard any shadow entries, if any of them match. */
1856        int index = ((env->CP0_EntryHi>>5)&0x1ff00) | ASID;
1857        index |= (env->CP0_EntryHi>>13)&0x20000;
1858        env->CP0_Index |= 0x80000000;
1859    }
1860}
1861
1862void r4k_helper_tlbr(CPUMIPSState *env)
1863{
1864    r4k_tlb_t *tlb;
1865    uint8_t ASID;
1866
1867    ASID = env->CP0_EntryHi & 0xFF;
1868    tlb = &env->tlb->mmu.r4k.tlb[env->CP0_Index % env->tlb->nb_tlb];
1869
1870    /* If this will change the current ASID, flush qemu's TLB.  */
1871    if (ASID != tlb->ASID)
1872        cpu_mips_tlb_flush (env, 1);
1873
1874    /*flush all the tlb cache */
1875    cpu_mips_tlb_flush (env, 1);
1876
1877    env->CP0_EntryHi = tlb->VPN | tlb->ASID;
1878    env->CP0_PageMask = tlb->PageMask;
1879    env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
1880                        (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
1881    env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
1882                        (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
1883}
1884
1885void helper_tlbwi(CPUMIPSState *env)
1886{
1887    env->tlb->helper_tlbwi(env);
1888}
1889
1890void helper_tlbwr(CPUMIPSState *env)
1891{
1892    env->tlb->helper_tlbwr(env);
1893}
1894
1895void helper_tlbp(CPUMIPSState *env)
1896{
1897    env->tlb->helper_tlbp(env);
1898}
1899
1900void helper_tlbr(CPUMIPSState *env)
1901{
1902    env->tlb->helper_tlbr(env);
1903}
1904
1905/* Specials */
1906target_ulong helper_di(CPUMIPSState *env)
1907{
1908    target_ulong t0 = env->CP0_Status;
1909
1910    env->CP0_Status = t0 & ~(1 << CP0St_IE);
1911    cpu_mips_update_irq(env);
1912
1913    return t0;
1914}
1915
1916target_ulong helper_ei(CPUMIPSState *env)
1917{
1918    target_ulong t0 = env->CP0_Status;
1919
1920    env->CP0_Status = t0 | (1 << CP0St_IE);
1921    cpu_mips_update_irq(env);
1922
1923    return t0;
1924}
1925
1926static void debug_pre_eret(CPUMIPSState *env)
1927{
1928    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1929        qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1930                env->active_tc.PC, env->CP0_EPC);
1931        if (env->CP0_Status & (1 << CP0St_ERL))
1932            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1933        if (env->hflags & MIPS_HFLAG_DM)
1934            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1935        qemu_log("\n");
1936    }
1937}
1938
1939static void debug_post_eret(CPUMIPSState *env)
1940{
1941    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1942        qemu_log("  =>  PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1943                env->active_tc.PC, env->CP0_EPC);
1944        if (env->CP0_Status & (1 << CP0St_ERL))
1945            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1946        if (env->hflags & MIPS_HFLAG_DM)
1947            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1948        switch (env->hflags & MIPS_HFLAG_KSU) {
1949        case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1950        case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1951        case MIPS_HFLAG_KM: qemu_log("\n"); break;
1952        default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1953        }
1954    }
1955}
1956
1957void helper_eret (CPUMIPSState *env)
1958{
1959    debug_pre_eret(env);
1960    if (env->CP0_Status & (1 << CP0St_ERL)) {
1961        env->active_tc.PC = env->CP0_ErrorEPC;
1962        env->CP0_Status &= ~(1 << CP0St_ERL);
1963    } else {
1964        env->active_tc.PC = env->CP0_EPC;
1965        env->CP0_Status &= ~(1 << CP0St_EXL);
1966    }
1967    compute_hflags(env);
1968    debug_post_eret(env);
1969    env->lladdr = 1;
1970}
1971
1972void helper_deret (CPUMIPSState *env)
1973{
1974    debug_pre_eret(env);
1975    env->active_tc.PC = env->CP0_DEPC;
1976    env->hflags &= MIPS_HFLAG_DM;
1977    compute_hflags(env);
1978    debug_post_eret(env);
1979    env->lladdr = 1;
1980}
1981#endif /* !CONFIG_USER_ONLY */
1982
1983target_ulong helper_rdhwr_cpunum(CPUMIPSState *env)
1984{
1985    if ((env->hflags & MIPS_HFLAG_CP0) ||
1986        (env->CP0_HWREna & (1 << 0)))
1987        return env->CP0_EBase & 0x3ff;
1988    else
1989        helper_raise_exception(env, EXCP_RI);
1990
1991    return 0;
1992}
1993
1994target_ulong helper_rdhwr_synci_step(CPUMIPSState *env)
1995{
1996    if ((env->hflags & MIPS_HFLAG_CP0) ||
1997        (env->CP0_HWREna & (1 << 1)))
1998        return env->SYNCI_Step;
1999    else
2000        helper_raise_exception(env, EXCP_RI);
2001
2002    return 0;
2003}
2004
2005target_ulong helper_rdhwr_cc(CPUMIPSState *env)
2006{
2007    if ((env->hflags & MIPS_HFLAG_CP0) ||
2008        (env->CP0_HWREna & (1 << 2)))
2009        return env->CP0_Count;
2010    else
2011        helper_raise_exception(env, EXCP_RI);
2012
2013    return 0;
2014}
2015
2016target_ulong helper_rdhwr_ccres(CPUMIPSState *env)
2017{
2018    if ((env->hflags & MIPS_HFLAG_CP0) ||
2019        (env->CP0_HWREna & (1 << 3)))
2020        return env->CCRes;
2021    else
2022        helper_raise_exception(env, EXCP_RI);
2023
2024    return 0;
2025}
2026
2027void helper_pmon(CPUMIPSState *env, int function)
2028{
2029    function /= 2;
2030    switch (function) {
2031    case 2: /* TODO: char inbyte(int waitflag); */
2032        if (env->active_tc.gpr[4] == 0)
2033            env->active_tc.gpr[2] = -1;
2034        /* Fall through */
2035    case 11: /* TODO: char inbyte (void); */
2036        env->active_tc.gpr[2] = -1;
2037        break;
2038    case 3:
2039    case 12:
2040        printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
2041        break;
2042    case 17:
2043        break;
2044    case 158:
2045        {
2046            unsigned char *fmt = (void *)(unsigned long)env->active_tc.gpr[4];
2047            printf("%s", fmt);
2048        }
2049        break;
2050    }
2051}
2052
2053void helper_wait(CPUMIPSState *env)
2054{
2055    env->halted = 1;
2056    helper_raise_exception(env, EXCP_HLT);
2057}
2058
2059#if !defined(CONFIG_USER_ONLY)
2060
2061static void do_unaligned_access (CPUMIPSState *env1,
2062                                 target_ulong addr, int is_write,
2063                                 int is_user, uintptr_t retaddr);
2064
2065#undef env
2066#define MMUSUFFIX _mmu
2067#define ALIGNED_ONLY
2068
2069#define SHIFT 0
2070#include "exec/softmmu_template.h"
2071
2072#define SHIFT 1
2073#include "exec/softmmu_template.h"
2074
2075#define SHIFT 2
2076#include "exec/softmmu_template.h"
2077
2078#define SHIFT 3
2079#include "exec/softmmu_template.h"
2080
2081static void do_unaligned_access (CPUMIPSState *env1,
2082                                 target_ulong addr, int is_write,
2083                                 int is_user, uintptr_t retaddr)
2084{
2085    env->CP0_BadVAddr = addr;
2086    do_restore_state (env, retaddr);
2087    helper_raise_exception(env, (is_write == 1) ? EXCP_AdES : EXCP_AdEL);
2088}
2089
2090void tlb_fill (CPUMIPSState* env1, target_ulong addr, int is_write, int mmu_idx,
2091               uintptr_t retaddr)
2092{
2093    TranslationBlock *tb;
2094    CPUMIPSState *saved_env;
2095    int ret;
2096
2097    /* XXX: hack to restore env in all cases, even if not called from
2098       generated code */
2099    saved_env = env;
2100    env = env1;
2101    ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx);
2102    if (ret) {
2103        if (retaddr) {
2104            /* now we have a real cpu fault */
2105            tb = tb_find_pc(retaddr);
2106            if (tb) {
2107                /* the PC is inside the translated code. It means that we have
2108                   a virtual CPU fault */
2109                cpu_restore_state(env, retaddr);
2110            }
2111        }
2112        helper_raise_exception_err(env, env->exception_index, env->error_code);
2113    }
2114    env = saved_env;
2115}
2116
2117void cpu_unassigned_access(CPUMIPSState* env1, hwaddr addr,
2118                           int is_write, int is_exec, int unused, int size)
2119{
2120    env = env1;
2121
2122    if (is_exec)
2123        helper_raise_exception(env, EXCP_IBE);
2124    else
2125        helper_raise_exception(env, EXCP_DBE);
2126}
2127/*
2128 * The following functions are address translation helper functions
2129 * for fast memory access in QEMU.
2130 */
2131static unsigned long v2p_mmu(target_ulong addr, int is_user)
2132{
2133    int index;
2134    target_ulong tlb_addr;
2135    hwaddr physaddr;
2136    uintptr_t retaddr;
2137
2138    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2139redo:
2140    tlb_addr = env->tlb_table[is_user][index].addr_read;
2141    if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
2142        physaddr = addr + env->tlb_table[is_user][index].addend;
2143    } else {
2144        /* the page is not in the TLB : fill it */
2145        retaddr = GETPC();
2146        tlb_fill(env, addr, 0, is_user, retaddr);
2147        goto redo;
2148    }
2149    return physaddr;
2150}
2151
2152/*
2153 * translation from virtual address of simulated OS
2154 * to the address of simulation host (not the physical
2155 * address of simulated OS.
2156 */
2157unsigned long v2p(target_ulong ptr, int is_user)
2158{
2159    CPUMIPSState *saved_env;
2160    int index;
2161    target_ulong addr;
2162    hwaddr physaddr;
2163
2164    saved_env = env;
2165    env = cpu_single_env;
2166    addr = ptr;
2167    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2168    if (__builtin_expect(env->tlb_table[is_user][index].addr_read !=
2169                (addr & TARGET_PAGE_MASK), 0)) {
2170        physaddr = v2p_mmu(addr, is_user);
2171    } else {
2172        physaddr = addr + env->tlb_table[is_user][index].addend;
2173    }
2174    env = saved_env;
2175    return physaddr;
2176}
2177
2178/* copy a string from the simulated virtual space to a buffer in QEMU */
2179void vstrcpy(target_ulong ptr, char *buf, int max)
2180{
2181    char *phys = 0;
2182    target_ulong page = 0;
2183
2184    if (buf == NULL) return;
2185
2186    while (max) {
2187        if ((ptr & TARGET_PAGE_MASK) != page) {
2188            phys = (char *)v2p(ptr, 0);
2189            page = ptr & TARGET_PAGE_MASK;
2190        }
2191        *buf = *phys;
2192        if (*phys == '\0')
2193            return;
2194        ptr ++;
2195        buf ++;
2196        phys ++;
2197        max --;
2198    }
2199}
2200
2201#endif /* !CONFIG_USER_ONLY */
2202
2203/* Complex FPU operations which may need stack space. */
2204
2205#define FLOAT_ONE32 make_float32(0x3f8 << 20)
2206#define FLOAT_ONE64 make_float64(0x3ffULL << 52)
2207#define FLOAT_TWO32 make_float32(1 << 30)
2208#define FLOAT_TWO64 make_float64(1ULL << 62)
2209#define FLOAT_QNAN32 0x7fbfffff
2210#define FLOAT_QNAN64 0x7ff7ffffffffffffULL
2211#define FLOAT_SNAN32 0x7fffffff
2212#define FLOAT_SNAN64 0x7fffffffffffffffULL
2213
2214/* convert MIPS rounding mode in FCR31 to IEEE library */
2215static unsigned int ieee_rm[] = {
2216    float_round_nearest_even,
2217    float_round_to_zero,
2218    float_round_up,
2219    float_round_down
2220};
2221
2222#define RESTORE_ROUNDING_MODE \
2223    set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
2224
2225#define RESTORE_FLUSH_MODE \
2226    set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, &env->active_fpu.fp_status);
2227
2228target_ulong helper_cfc1 (uint32_t reg)
2229{
2230    target_ulong arg1;
2231
2232    switch (reg) {
2233    case 0:
2234        arg1 = (int32_t)env->active_fpu.fcr0;
2235        break;
2236    case 25:
2237        arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
2238        break;
2239    case 26:
2240        arg1 = env->active_fpu.fcr31 & 0x0003f07c;
2241        break;
2242    case 28:
2243        arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
2244        break;
2245    default:
2246        arg1 = (int32_t)env->active_fpu.fcr31;
2247        break;
2248    }
2249
2250    return arg1;
2251}
2252
2253void helper_ctc1 (target_ulong arg1, uint32_t reg)
2254{
2255    switch(reg) {
2256    case 25:
2257        if (arg1 & 0xffffff00)
2258            return;
2259        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) |
2260                     ((arg1 & 0x1) << 23);
2261        break;
2262    case 26:
2263        if (arg1 & 0x007c0000)
2264            return;
2265        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c);
2266        break;
2267    case 28:
2268        if (arg1 & 0x007c0000)
2269            return;
2270        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) |
2271                     ((arg1 & 0x4) << 22);
2272        break;
2273    case 31:
2274        if (arg1 & 0x007c0000)
2275            return;
2276        env->active_fpu.fcr31 = arg1;
2277        break;
2278    default:
2279        return;
2280    }
2281    /* set rounding mode */
2282    RESTORE_ROUNDING_MODE;
2283    /* set flush-to-zero mode */
2284    RESTORE_FLUSH_MODE;
2285    set_float_exception_flags(0, &env->active_fpu.fp_status);
2286    if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
2287        helper_raise_exception(env, EXCP_FPE);
2288}
2289
2290static inline char ieee_ex_to_mips(char xcpt)
2291{
2292    return (xcpt & float_flag_inexact) >> 5 |
2293           (xcpt & float_flag_underflow) >> 3 |
2294           (xcpt & float_flag_overflow) >> 1 |
2295           (xcpt & float_flag_divbyzero) << 1 |
2296           (xcpt & float_flag_invalid) << 4;
2297}
2298
2299static inline char mips_ex_to_ieee(char xcpt)
2300{
2301    return (xcpt & FP_INEXACT) << 5 |
2302           (xcpt & FP_UNDERFLOW) << 3 |
2303           (xcpt & FP_OVERFLOW) << 1 |
2304           (xcpt & FP_DIV0) >> 1 |
2305           (xcpt & FP_INVALID) >> 4;
2306}
2307
2308static inline void update_fcr31(void)
2309{
2310    int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
2311
2312    SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
2313    if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp)
2314        helper_raise_exception(env, EXCP_FPE);
2315    else
2316        UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
2317}
2318
2319/* Float support.
2320   Single precition routines have a "s" suffix, double precision a
2321   "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2322   paired single lower "pl", paired single upper "pu".  */
2323
2324/* unary operations, modifying fp status  */
2325uint64_t helper_float_sqrt_d(uint64_t fdt0)
2326{
2327    return float64_sqrt(fdt0, &env->active_fpu.fp_status);
2328}
2329
2330uint32_t helper_float_sqrt_s(uint32_t fst0)
2331{
2332    return float32_sqrt(fst0, &env->active_fpu.fp_status);
2333}
2334
2335uint64_t helper_float_cvtd_s(uint32_t fst0)
2336{
2337    uint64_t fdt2;
2338
2339    set_float_exception_flags(0, &env->active_fpu.fp_status);
2340    fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
2341    update_fcr31();
2342    return fdt2;
2343}
2344
2345uint64_t helper_float_cvtd_w(uint32_t wt0)
2346{
2347    uint64_t fdt2;
2348
2349    set_float_exception_flags(0, &env->active_fpu.fp_status);
2350    fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
2351    update_fcr31();
2352    return fdt2;
2353}
2354
2355uint64_t helper_float_cvtd_l(uint64_t dt0)
2356{
2357    uint64_t fdt2;
2358
2359    set_float_exception_flags(0, &env->active_fpu.fp_status);
2360    fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
2361    update_fcr31();
2362    return fdt2;
2363}
2364
2365uint64_t helper_float_cvtl_d(uint64_t fdt0)
2366{
2367    uint64_t dt2;
2368
2369    set_float_exception_flags(0, &env->active_fpu.fp_status);
2370    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2371    update_fcr31();
2372    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2373        dt2 = FLOAT_SNAN64;
2374    return dt2;
2375}
2376
2377uint64_t helper_float_cvtl_s(uint32_t fst0)
2378{
2379    uint64_t dt2;
2380
2381    set_float_exception_flags(0, &env->active_fpu.fp_status);
2382    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2383    update_fcr31();
2384    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2385        dt2 = FLOAT_SNAN64;
2386    return dt2;
2387}
2388
2389uint64_t helper_float_cvtps_pw(uint64_t dt0)
2390{
2391    uint32_t fst2;
2392    uint32_t fsth2;
2393
2394    set_float_exception_flags(0, &env->active_fpu.fp_status);
2395    fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2396    fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
2397    update_fcr31();
2398    return ((uint64_t)fsth2 << 32) | fst2;
2399}
2400
2401uint64_t helper_float_cvtpw_ps(uint64_t fdt0)
2402{
2403    uint32_t wt2;
2404    uint32_t wth2;
2405
2406    set_float_exception_flags(0, &env->active_fpu.fp_status);
2407    wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2408    wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
2409    update_fcr31();
2410    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID)) {
2411        wt2 = FLOAT_SNAN32;
2412        wth2 = FLOAT_SNAN32;
2413    }
2414    return ((uint64_t)wth2 << 32) | wt2;
2415}
2416
2417uint32_t helper_float_cvts_d(uint64_t fdt0)
2418{
2419    uint32_t fst2;
2420
2421    set_float_exception_flags(0, &env->active_fpu.fp_status);
2422    fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
2423    update_fcr31();
2424    return fst2;
2425}
2426
2427uint32_t helper_float_cvts_w(uint32_t wt0)
2428{
2429    uint32_t fst2;
2430
2431    set_float_exception_flags(0, &env->active_fpu.fp_status);
2432    fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
2433    update_fcr31();
2434    return fst2;
2435}
2436
2437uint32_t helper_float_cvts_l(uint64_t dt0)
2438{
2439    uint32_t fst2;
2440
2441    set_float_exception_flags(0, &env->active_fpu.fp_status);
2442    fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
2443    update_fcr31();
2444    return fst2;
2445}
2446
2447uint32_t helper_float_cvts_pl(uint32_t wt0)
2448{
2449    uint32_t wt2;
2450
2451    set_float_exception_flags(0, &env->active_fpu.fp_status);
2452    wt2 = wt0;
2453    update_fcr31();
2454    return wt2;
2455}
2456
2457uint32_t helper_float_cvts_pu(uint32_t wth0)
2458{
2459    uint32_t wt2;
2460
2461    set_float_exception_flags(0, &env->active_fpu.fp_status);
2462    wt2 = wth0;
2463    update_fcr31();
2464    return wt2;
2465}
2466
2467uint32_t helper_float_cvtw_s(uint32_t fst0)
2468{
2469    uint32_t wt2;
2470
2471    set_float_exception_flags(0, &env->active_fpu.fp_status);
2472    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2473    update_fcr31();
2474    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2475        wt2 = FLOAT_SNAN32;
2476    return wt2;
2477}
2478
2479uint32_t helper_float_cvtw_d(uint64_t fdt0)
2480{
2481    uint32_t wt2;
2482
2483    set_float_exception_flags(0, &env->active_fpu.fp_status);
2484    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2485    update_fcr31();
2486    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2487        wt2 = FLOAT_SNAN32;
2488    return wt2;
2489}
2490
2491uint64_t helper_float_roundl_d(uint64_t fdt0)
2492{
2493    uint64_t dt2;
2494
2495    set_float_exception_flags(0, &env->active_fpu.fp_status);
2496    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2497    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2498    RESTORE_ROUNDING_MODE;
2499    update_fcr31();
2500    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2501        dt2 = FLOAT_SNAN64;
2502    return dt2;
2503}
2504
2505uint64_t helper_float_roundl_s(uint32_t fst0)
2506{
2507    uint64_t dt2;
2508
2509    set_float_exception_flags(0, &env->active_fpu.fp_status);
2510    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2511    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2512    RESTORE_ROUNDING_MODE;
2513    update_fcr31();
2514    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2515        dt2 = FLOAT_SNAN64;
2516    return dt2;
2517}
2518
2519uint32_t helper_float_roundw_d(uint64_t fdt0)
2520{
2521    uint32_t wt2;
2522
2523    set_float_exception_flags(0, &env->active_fpu.fp_status);
2524    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2525    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2526    RESTORE_ROUNDING_MODE;
2527    update_fcr31();
2528    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2529        wt2 = FLOAT_SNAN32;
2530    return wt2;
2531}
2532
2533uint32_t helper_float_roundw_s(uint32_t fst0)
2534{
2535    uint32_t wt2;
2536
2537    set_float_exception_flags(0, &env->active_fpu.fp_status);
2538    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2539    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2540    RESTORE_ROUNDING_MODE;
2541    update_fcr31();
2542    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2543        wt2 = FLOAT_SNAN32;
2544    return wt2;
2545}
2546
2547uint64_t helper_float_truncl_d(uint64_t fdt0)
2548{
2549    uint64_t dt2;
2550
2551    set_float_exception_flags(0, &env->active_fpu.fp_status);
2552    dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
2553    update_fcr31();
2554    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2555        dt2 = FLOAT_SNAN64;
2556    return dt2;
2557}
2558
2559uint64_t helper_float_truncl_s(uint32_t fst0)
2560{
2561    uint64_t dt2;
2562
2563    set_float_exception_flags(0, &env->active_fpu.fp_status);
2564    dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
2565    update_fcr31();
2566    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2567        dt2 = FLOAT_SNAN64;
2568    return dt2;
2569}
2570
2571uint32_t helper_float_truncw_d(uint64_t fdt0)
2572{
2573    uint32_t wt2;
2574
2575    set_float_exception_flags(0, &env->active_fpu.fp_status);
2576    wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
2577    update_fcr31();
2578    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2579        wt2 = FLOAT_SNAN32;
2580    return wt2;
2581}
2582
2583uint32_t helper_float_truncw_s(uint32_t fst0)
2584{
2585    uint32_t wt2;
2586
2587    set_float_exception_flags(0, &env->active_fpu.fp_status);
2588    wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
2589    update_fcr31();
2590    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2591        wt2 = FLOAT_SNAN32;
2592    return wt2;
2593}
2594
2595uint64_t helper_float_ceill_d(uint64_t fdt0)
2596{
2597    uint64_t dt2;
2598
2599    set_float_exception_flags(0, &env->active_fpu.fp_status);
2600    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2601    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2602    RESTORE_ROUNDING_MODE;
2603    update_fcr31();
2604    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2605        dt2 = FLOAT_SNAN64;
2606    return dt2;
2607}
2608
2609uint64_t helper_float_ceill_s(uint32_t fst0)
2610{
2611    uint64_t dt2;
2612
2613    set_float_exception_flags(0, &env->active_fpu.fp_status);
2614    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2615    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2616    RESTORE_ROUNDING_MODE;
2617    update_fcr31();
2618    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2619        dt2 = FLOAT_SNAN64;
2620    return dt2;
2621}
2622
2623uint32_t helper_float_ceilw_d(uint64_t fdt0)
2624{
2625    uint32_t wt2;
2626
2627    set_float_exception_flags(0, &env->active_fpu.fp_status);
2628    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2629    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2630    RESTORE_ROUNDING_MODE;
2631    update_fcr31();
2632    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2633        wt2 = FLOAT_SNAN32;
2634    return wt2;
2635}
2636
2637uint32_t helper_float_ceilw_s(uint32_t fst0)
2638{
2639    uint32_t wt2;
2640
2641    set_float_exception_flags(0, &env->active_fpu.fp_status);
2642    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2643    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2644    RESTORE_ROUNDING_MODE;
2645    update_fcr31();
2646    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2647        wt2 = FLOAT_SNAN32;
2648    return wt2;
2649}
2650
2651uint64_t helper_float_floorl_d(uint64_t fdt0)
2652{
2653    uint64_t dt2;
2654
2655    set_float_exception_flags(0, &env->active_fpu.fp_status);
2656    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2657    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2658    RESTORE_ROUNDING_MODE;
2659    update_fcr31();
2660    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2661        dt2 = FLOAT_SNAN64;
2662    return dt2;
2663}
2664
2665uint64_t helper_float_floorl_s(uint32_t fst0)
2666{
2667    uint64_t dt2;
2668
2669    set_float_exception_flags(0, &env->active_fpu.fp_status);
2670    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2671    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2672    RESTORE_ROUNDING_MODE;
2673    update_fcr31();
2674    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2675        dt2 = FLOAT_SNAN64;
2676    return dt2;
2677}
2678
2679uint32_t helper_float_floorw_d(uint64_t fdt0)
2680{
2681    uint32_t wt2;
2682
2683    set_float_exception_flags(0, &env->active_fpu.fp_status);
2684    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2685    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2686    RESTORE_ROUNDING_MODE;
2687    update_fcr31();
2688    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2689        wt2 = FLOAT_SNAN32;
2690    return wt2;
2691}
2692
2693uint32_t helper_float_floorw_s(uint32_t fst0)
2694{
2695    uint32_t wt2;
2696
2697    set_float_exception_flags(0, &env->active_fpu.fp_status);
2698    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2699    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2700    RESTORE_ROUNDING_MODE;
2701    update_fcr31();
2702    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2703        wt2 = FLOAT_SNAN32;
2704    return wt2;
2705}
2706
2707/* unary operations, not modifying fp status  */
2708#define FLOAT_UNOP(name)                                       \
2709uint64_t helper_float_ ## name ## _d(uint64_t fdt0)                \
2710{                                                              \
2711    return float64_ ## name(fdt0);                             \
2712}                                                              \
2713uint32_t helper_float_ ## name ## _s(uint32_t fst0)                \
2714{                                                              \
2715    return float32_ ## name(fst0);                             \
2716}                                                              \
2717uint64_t helper_float_ ## name ## _ps(uint64_t fdt0)               \
2718{                                                              \
2719    uint32_t wt0;                                              \
2720    uint32_t wth0;                                             \
2721                                                               \
2722    wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF);                 \
2723    wth0 = float32_ ## name(fdt0 >> 32);                       \
2724    return ((uint64_t)wth0 << 32) | wt0;                       \
2725}
2726FLOAT_UNOP(abs)
2727FLOAT_UNOP(chs)
2728#undef FLOAT_UNOP
2729
2730/* MIPS specific unary operations */
2731uint64_t helper_float_recip_d(uint64_t fdt0)
2732{
2733    uint64_t fdt2;
2734
2735    set_float_exception_flags(0, &env->active_fpu.fp_status);
2736    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2737    update_fcr31();
2738    return fdt2;
2739}
2740
2741uint32_t helper_float_recip_s(uint32_t fst0)
2742{
2743    uint32_t fst2;
2744
2745    set_float_exception_flags(0, &env->active_fpu.fp_status);
2746    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2747    update_fcr31();
2748    return fst2;
2749}
2750
2751uint64_t helper_float_rsqrt_d(uint64_t fdt0)
2752{
2753    uint64_t fdt2;
2754
2755    set_float_exception_flags(0, &env->active_fpu.fp_status);
2756    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2757    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2758    update_fcr31();
2759    return fdt2;
2760}
2761
2762uint32_t helper_float_rsqrt_s(uint32_t fst0)
2763{
2764    uint32_t fst2;
2765
2766    set_float_exception_flags(0, &env->active_fpu.fp_status);
2767    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2768    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2769    update_fcr31();
2770    return fst2;
2771}
2772
2773uint64_t helper_float_recip1_d(uint64_t fdt0)
2774{
2775    uint64_t fdt2;
2776
2777    set_float_exception_flags(0, &env->active_fpu.fp_status);
2778    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2779    update_fcr31();
2780    return fdt2;
2781}
2782
2783uint32_t helper_float_recip1_s(uint32_t fst0)
2784{
2785    uint32_t fst2;
2786
2787    set_float_exception_flags(0, &env->active_fpu.fp_status);
2788    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2789    update_fcr31();
2790    return fst2;
2791}
2792
2793uint64_t helper_float_recip1_ps(uint64_t fdt0)
2794{
2795    uint32_t fst2;
2796    uint32_t fsth2;
2797
2798    set_float_exception_flags(0, &env->active_fpu.fp_status);
2799    fst2 = float32_div(FLOAT_ONE32, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2800    fsth2 = float32_div(FLOAT_ONE32, fdt0 >> 32, &env->active_fpu.fp_status);
2801    update_fcr31();
2802    return ((uint64_t)fsth2 << 32) | fst2;
2803}
2804
2805uint64_t helper_float_rsqrt1_d(uint64_t fdt0)
2806{
2807    uint64_t fdt2;
2808
2809    set_float_exception_flags(0, &env->active_fpu.fp_status);
2810    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2811    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2812    update_fcr31();
2813    return fdt2;
2814}
2815
2816uint32_t helper_float_rsqrt1_s(uint32_t fst0)
2817{
2818    uint32_t fst2;
2819
2820    set_float_exception_flags(0, &env->active_fpu.fp_status);
2821    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2822    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2823    update_fcr31();
2824    return fst2;
2825}
2826
2827uint64_t helper_float_rsqrt1_ps(uint64_t fdt0)
2828{
2829    uint32_t fst2;
2830    uint32_t fsth2;
2831
2832    set_float_exception_flags(0, &env->active_fpu.fp_status);
2833    fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2834    fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
2835    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2836    fsth2 = float32_div(FLOAT_ONE32, fsth2, &env->active_fpu.fp_status);
2837    update_fcr31();
2838    return ((uint64_t)fsth2 << 32) | fst2;
2839}
2840
2841#define FLOAT_OP(name, p) void helper_float_##name##_##p(void)
2842
2843/* binary operations */
2844#define FLOAT_BINOP(name)                                          \
2845uint64_t helper_float_ ## name ## _d(uint64_t fdt0, uint64_t fdt1)     \
2846{                                                                  \
2847    uint64_t dt2;                                                  \
2848                                                                   \
2849    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2850    dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status);     \
2851    update_fcr31();                                                \
2852    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
2853        dt2 = FLOAT_QNAN64;                                        \
2854    return dt2;                                                    \
2855}                                                                  \
2856                                                                   \
2857uint32_t helper_float_ ## name ## _s(uint32_t fst0, uint32_t fst1)     \
2858{                                                                  \
2859    uint32_t wt2;                                                  \
2860                                                                   \
2861    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2862    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
2863    update_fcr31();                                                \
2864    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
2865        wt2 = FLOAT_QNAN32;                                        \
2866    return wt2;                                                    \
2867}                                                                  \
2868                                                                   \
2869uint64_t helper_float_ ## name ## _ps(uint64_t fdt0, uint64_t fdt1)    \
2870{                                                                  \
2871    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                             \
2872    uint32_t fsth0 = fdt0 >> 32;                                   \
2873    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                             \
2874    uint32_t fsth1 = fdt1 >> 32;                                   \
2875    uint32_t wt2;                                                  \
2876    uint32_t wth2;                                                 \
2877                                                                   \
2878    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2879    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
2880    wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status);  \
2881    update_fcr31();                                                \
2882    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) {              \
2883        wt2 = FLOAT_QNAN32;                                        \
2884        wth2 = FLOAT_QNAN32;                                       \
2885    }                                                              \
2886    return ((uint64_t)wth2 << 32) | wt2;                           \
2887}
2888
2889FLOAT_BINOP(add)
2890FLOAT_BINOP(sub)
2891FLOAT_BINOP(mul)
2892FLOAT_BINOP(div)
2893#undef FLOAT_BINOP
2894
2895/* ternary operations */
2896#define FLOAT_TERNOP(name1, name2)                                        \
2897uint64_t helper_float_ ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1,  \
2898                                           uint64_t fdt2)                 \
2899{                                                                         \
2900    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
2901    return float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
2902}                                                                         \
2903                                                                          \
2904uint32_t helper_float_ ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1,  \
2905                                           uint32_t fst2)                 \
2906{                                                                         \
2907    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2908    return float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2909}                                                                         \
2910                                                                          \
2911uint64_t helper_float_ ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1, \
2912                                            uint64_t fdt2)                \
2913{                                                                         \
2914    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
2915    uint32_t fsth0 = fdt0 >> 32;                                          \
2916    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
2917    uint32_t fsth1 = fdt1 >> 32;                                          \
2918    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
2919    uint32_t fsth2 = fdt2 >> 32;                                          \
2920                                                                          \
2921    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2922    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
2923    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2924    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
2925    return ((uint64_t)fsth2 << 32) | fst2;                                \
2926}
2927
2928FLOAT_TERNOP(mul, add)
2929FLOAT_TERNOP(mul, sub)
2930#undef FLOAT_TERNOP
2931
2932/* negated ternary operations */
2933#define FLOAT_NTERNOP(name1, name2)                                       \
2934uint64_t helper_float_n ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
2935                                           uint64_t fdt2)                 \
2936{                                                                         \
2937    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
2938    fdt2 = float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
2939    return float64_chs(fdt2);                                             \
2940}                                                                         \
2941                                                                          \
2942uint32_t helper_float_n ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
2943                                           uint32_t fst2)                 \
2944{                                                                         \
2945    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2946    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2947    return float32_chs(fst2);                                             \
2948}                                                                         \
2949                                                                          \
2950uint64_t helper_float_n ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1,\
2951                                           uint64_t fdt2)                 \
2952{                                                                         \
2953    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
2954    uint32_t fsth0 = fdt0 >> 32;                                          \
2955    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
2956    uint32_t fsth1 = fdt1 >> 32;                                          \
2957    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
2958    uint32_t fsth2 = fdt2 >> 32;                                          \
2959                                                                          \
2960    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2961    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
2962    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2963    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
2964    fst2 = float32_chs(fst2);                                             \
2965    fsth2 = float32_chs(fsth2);                                           \
2966    return ((uint64_t)fsth2 << 32) | fst2;                                \
2967}
2968
2969FLOAT_NTERNOP(mul, add)
2970FLOAT_NTERNOP(mul, sub)
2971#undef FLOAT_NTERNOP
2972
2973/* MIPS specific binary operations */
2974uint64_t helper_float_recip2_d(uint64_t fdt0, uint64_t fdt2)
2975{
2976    set_float_exception_flags(0, &env->active_fpu.fp_status);
2977    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
2978    fdt2 = float64_chs(float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status));
2979    update_fcr31();
2980    return fdt2;
2981}
2982
2983uint32_t helper_float_recip2_s(uint32_t fst0, uint32_t fst2)
2984{
2985    set_float_exception_flags(0, &env->active_fpu.fp_status);
2986    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2987    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
2988    update_fcr31();
2989    return fst2;
2990}
2991
2992uint64_t helper_float_recip2_ps(uint64_t fdt0, uint64_t fdt2)
2993{
2994    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2995    uint32_t fsth0 = fdt0 >> 32;
2996    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
2997    uint32_t fsth2 = fdt2 >> 32;
2998
2999    set_float_exception_flags(0, &env->active_fpu.fp_status);
3000    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3001    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3002    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
3003    fsth2 = float32_chs(float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status));
3004    update_fcr31();
3005    return ((uint64_t)fsth2 << 32) | fst2;
3006}
3007
3008uint64_t helper_float_rsqrt2_d(uint64_t fdt0, uint64_t fdt2)
3009{
3010    set_float_exception_flags(0, &env->active_fpu.fp_status);
3011    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
3012    fdt2 = float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status);
3013    fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
3014    update_fcr31();
3015    return fdt2;
3016}
3017
3018uint32_t helper_float_rsqrt2_s(uint32_t fst0, uint32_t fst2)
3019{
3020    set_float_exception_flags(0, &env->active_fpu.fp_status);
3021    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3022    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
3023    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
3024    update_fcr31();
3025    return fst2;
3026}
3027
3028uint64_t helper_float_rsqrt2_ps(uint64_t fdt0, uint64_t fdt2)
3029{
3030    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3031    uint32_t fsth0 = fdt0 >> 32;
3032    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
3033    uint32_t fsth2 = fdt2 >> 32;
3034
3035    set_float_exception_flags(0, &env->active_fpu.fp_status);
3036    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3037    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3038    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
3039    fsth2 = float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status);
3040    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
3041    fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
3042    update_fcr31();
3043    return ((uint64_t)fsth2 << 32) | fst2;
3044}
3045
3046uint64_t helper_float_addr_ps(uint64_t fdt0, uint64_t fdt1)
3047{
3048    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3049    uint32_t fsth0 = fdt0 >> 32;
3050    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3051    uint32_t fsth1 = fdt1 >> 32;
3052    uint32_t fst2;
3053    uint32_t fsth2;
3054
3055    set_float_exception_flags(0, &env->active_fpu.fp_status);
3056    fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
3057    fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
3058    update_fcr31();
3059    return ((uint64_t)fsth2 << 32) | fst2;
3060}
3061
3062uint64_t helper_float_mulr_ps(uint64_t fdt0, uint64_t fdt1)
3063{
3064    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3065    uint32_t fsth0 = fdt0 >> 32;
3066    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3067    uint32_t fsth1 = fdt1 >> 32;
3068    uint32_t fst2;
3069    uint32_t fsth2;
3070
3071    set_float_exception_flags(0, &env->active_fpu.fp_status);
3072    fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
3073    fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
3074    update_fcr31();
3075    return ((uint64_t)fsth2 << 32) | fst2;
3076}
3077
3078/* compare operations */
3079#define FOP_COND_D(op, cond)                                   \
3080void helper_cmp_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
3081{                                                              \
3082    int c = cond;                                              \
3083    update_fcr31();                                            \
3084    if (c)                                                     \
3085        SET_FP_COND(cc, env->active_fpu);                      \
3086    else                                                       \
3087        CLEAR_FP_COND(cc, env->active_fpu);                    \
3088}                                                              \
3089void helper_cmpabs_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3090{                                                              \
3091    int c;                                                     \
3092    fdt0 = float64_abs(fdt0);                                  \
3093    fdt1 = float64_abs(fdt1);                                  \
3094    c = cond;                                                  \
3095    update_fcr31();                                            \
3096    if (c)                                                     \
3097        SET_FP_COND(cc, env->active_fpu);                      \
3098    else                                                       \
3099        CLEAR_FP_COND(cc, env->active_fpu);                    \
3100}
3101
3102static int float64_is_unordered(int sig, float64 a, float64 b STATUS_PARAM)
3103{
3104    if (float64_is_signaling_nan(a) ||
3105        float64_is_signaling_nan(b) ||
3106        (sig && (float64_is_any_nan(a) || float64_is_any_nan(b)))) {
3107        float_raise(float_flag_invalid, status);
3108        return 1;
3109    } else if (float64_is_any_nan(a) || float64_is_any_nan(b)) {
3110        return 1;
3111    } else {
3112        return 0;
3113    }
3114}
3115
3116/* NOTE: the comma operator will make "cond" to eval to false,
3117 * but float*_is_unordered() is still called. */
3118FOP_COND_D(f,   (float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status), 0))
3119FOP_COND_D(un,  float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status))
3120FOP_COND_D(eq,  !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3121FOP_COND_D(ueq, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3122FOP_COND_D(olt, !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3123FOP_COND_D(ult, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3124FOP_COND_D(ole, !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3125FOP_COND_D(ule, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3126/* NOTE: the comma operator will make "cond" to eval to false,
3127 * but float*_is_unordered() is still called. */
3128FOP_COND_D(sf,  (float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status), 0))
3129FOP_COND_D(ngle,float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status))
3130FOP_COND_D(seq, !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3131FOP_COND_D(ngl, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3132FOP_COND_D(lt,  !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3133FOP_COND_D(nge, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3134FOP_COND_D(le,  !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3135FOP_COND_D(ngt, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3136
3137#define FOP_COND_S(op, cond)                                   \
3138void helper_cmp_s_ ## op (uint32_t fst0, uint32_t fst1, int cc)    \
3139{                                                              \
3140    int c = cond;                                              \
3141    update_fcr31();                                            \
3142    if (c)                                                     \
3143        SET_FP_COND(cc, env->active_fpu);                      \
3144    else                                                       \
3145        CLEAR_FP_COND(cc, env->active_fpu);                    \
3146}                                                              \
3147void helper_cmpabs_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
3148{                                                              \
3149    int c;                                                     \
3150    fst0 = float32_abs(fst0);                                  \
3151    fst1 = float32_abs(fst1);                                  \
3152    c = cond;                                                  \
3153    update_fcr31();                                            \
3154    if (c)                                                     \
3155        SET_FP_COND(cc, env->active_fpu);                      \
3156    else                                                       \
3157        CLEAR_FP_COND(cc, env->active_fpu);                    \
3158}
3159
3160static flag float32_is_unordered(int sig, float32 a, float32 b STATUS_PARAM)
3161{
3162    if (float32_is_signaling_nan(a) ||
3163        float32_is_signaling_nan(b) ||
3164        (sig && (float32_is_any_nan(a) || float32_is_any_nan(b)))) {
3165        float_raise(float_flag_invalid, status);
3166        return 1;
3167    } else if (float32_is_any_nan(a) || float32_is_any_nan(b)) {
3168        return 1;
3169    } else {
3170        return 0;
3171    }
3172}
3173
3174/* NOTE: the comma operator will make "cond" to eval to false,
3175 * but float*_is_unordered() is still called. */
3176FOP_COND_S(f,   (float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status), 0))
3177FOP_COND_S(un,  float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status))
3178FOP_COND_S(eq,  !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3179FOP_COND_S(ueq, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3180FOP_COND_S(olt, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3181FOP_COND_S(ult, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3182FOP_COND_S(ole, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_le(fst0, fst1, &env->active_fpu.fp_status))
3183FOP_COND_S(ule, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_le(fst0, fst1, &env->active_fpu.fp_status))
3184/* NOTE: the comma operator will make "cond" to eval to false,
3185 * but float*_is_unordered() is still called. */
3186FOP_COND_S(sf,  (float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status), 0))
3187FOP_COND_S(ngle,float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status))
3188FOP_COND_S(seq, !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3189FOP_COND_S(ngl, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3190FOP_COND_S(lt,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3191FOP_COND_S(nge, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3192FOP_COND_S(le,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_le(fst0, fst1, &env->active_fpu.fp_status))
3193FOP_COND_S(ngt, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_le(fst0, fst1, &env->active_fpu.fp_status))
3194
3195#define FOP_COND_PS(op, condl, condh)                           \
3196void helper_cmp_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
3197{                                                               \
3198    uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF);             \
3199    uint32_t fsth0 = float32_abs(fdt0 >> 32);                   \
3200    uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF);             \
3201    uint32_t fsth1 = float32_abs(fdt1 >> 32);                   \
3202    int cl = condl;                                             \
3203    int ch = condh;                                             \
3204                                                                \
3205    update_fcr31();                                             \
3206    if (cl)                                                     \
3207        SET_FP_COND(cc, env->active_fpu);                       \
3208    else                                                        \
3209        CLEAR_FP_COND(cc, env->active_fpu);                     \
3210    if (ch)                                                     \
3211        SET_FP_COND(cc + 1, env->active_fpu);                   \
3212    else                                                        \
3213        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
3214}                                                               \
3215void helper_cmpabs_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3216{                                                               \
3217    uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF);             \
3218    uint32_t fsth0 = float32_abs(fdt0 >> 32);                   \
3219    uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF);             \
3220    uint32_t fsth1 = float32_abs(fdt1 >> 32);                   \
3221    int cl = condl;                                             \
3222    int ch = condh;                                             \
3223                                                                \
3224    update_fcr31();                                             \
3225    if (cl)                                                     \
3226        SET_FP_COND(cc, env->active_fpu);                       \
3227    else                                                        \
3228        CLEAR_FP_COND(cc, env->active_fpu);                     \
3229    if (ch)                                                     \
3230        SET_FP_COND(cc + 1, env->active_fpu);                   \
3231    else                                                        \
3232        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
3233}
3234
3235/* NOTE: the comma operator will make "cond" to eval to false,
3236 * but float*_is_unordered() is still called. */
3237FOP_COND_PS(f,   (float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status), 0),
3238                 (float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status), 0))
3239FOP_COND_PS(un,  float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status),
3240                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status))
3241FOP_COND_PS(eq,  !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3242                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3243FOP_COND_PS(ueq, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3244                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3245FOP_COND_PS(olt, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3246                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3247FOP_COND_PS(ult, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3248                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3249FOP_COND_PS(ole, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_le(fst0, fst1, &env->active_fpu.fp_status),
3250                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3251FOP_COND_PS(ule, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_le(fst0, fst1, &env->active_fpu.fp_status),
3252                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3253/* NOTE: the comma operator will make "cond" to eval to false,
3254 * but float*_is_unordered() is still called. */
3255FOP_COND_PS(sf,  (float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status), 0),
3256                 (float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status), 0))
3257FOP_COND_PS(ngle,float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status),
3258                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status))
3259FOP_COND_PS(seq, !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3260                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3261FOP_COND_PS(ngl, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3262                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3263FOP_COND_PS(lt,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3264                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3265FOP_COND_PS(nge, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3266                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3267FOP_COND_PS(le,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_le(fst0, fst1, &env->active_fpu.fp_status),
3268                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3269FOP_COND_PS(ngt, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_le(fst0, fst1, &env->active_fpu.fp_status),
3270                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3271