1/*
2 *  MIPS emulation helpers for qemu.
3 *
4 *  Copyright (c) 2004-2005 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19#include <stdlib.h>
20#include "cpu.h"
21#include "qemu/host-utils.h"
22#include "tcg.h"
23#include "helper.h"
24
25#if !defined(CONFIG_USER_ONLY)
26#include "exec/softmmu_exec.h"
27#endif /* !defined(CONFIG_USER_ONLY) */
28
29#ifndef CONFIG_USER_ONLY
30static inline void cpu_mips_tlb_flush (CPUMIPSState *env, int flush_global);
31#endif
32
33static inline void compute_hflags(CPUMIPSState *env)
34{
35    env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 |
36                     MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU |
37                     MIPS_HFLAG_UX);
38    if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
39        !(env->CP0_Status & (1 << CP0St_ERL)) &&
40        !(env->hflags & MIPS_HFLAG_DM)) {
41        env->hflags |= (env->CP0_Status >> CP0St_KSU) & MIPS_HFLAG_KSU;
42    }
43#if defined(TARGET_MIPS64)
44    if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_UM) ||
45        (env->CP0_Status & (1 << CP0St_PX)) ||
46        (env->CP0_Status & (1 << CP0St_UX))) {
47        env->hflags |= MIPS_HFLAG_64;
48    }
49    if (env->CP0_Status & (1 << CP0St_UX)) {
50        env->hflags |= MIPS_HFLAG_UX;
51    }
52#endif
53    if ((env->CP0_Status & (1 << CP0St_CU0)) ||
54        !(env->hflags & MIPS_HFLAG_KSU)) {
55        env->hflags |= MIPS_HFLAG_CP0;
56    }
57    if (env->CP0_Status & (1 << CP0St_CU1)) {
58        env->hflags |= MIPS_HFLAG_FPU;
59    }
60    if (env->CP0_Status & (1 << CP0St_FR)) {
61        env->hflags |= MIPS_HFLAG_F64;
62    }
63    if (env->insn_flags & ISA_MIPS32R2) {
64        if (env->active_fpu.fcr0 & (1 << FCR0_F64)) {
65            env->hflags |= MIPS_HFLAG_COP1X;
66        }
67    } else if (env->insn_flags & ISA_MIPS32) {
68        if (env->hflags & MIPS_HFLAG_64) {
69            env->hflags |= MIPS_HFLAG_COP1X;
70        }
71    } else if (env->insn_flags & ISA_MIPS4) {
72        /* All supported MIPS IV CPUs use the XX (CU3) to enable
73           and disable the MIPS IV extensions to the MIPS III ISA.
74           Some other MIPS IV CPUs ignore the bit, so the check here
75           would be too restrictive for them.  */
76        if (env->CP0_Status & (1 << CP0St_CU3)) {
77            env->hflags |= MIPS_HFLAG_COP1X;
78        }
79    }
80}
81
82/*****************************************************************************/
83/* Exceptions processing helpers */
84
85void helper_raise_exception_err (CPUMIPSState *env,
86                                 uint32_t exception, int error_code)
87{
88#if 1
89    if (exception < 0x100)
90        qemu_log("%s: %d %d\n", __func__, exception, error_code);
91#endif
92    env->exception_index = exception;
93    env->error_code = error_code;
94    cpu_loop_exit(env);
95}
96
97void helper_raise_exception (CPUMIPSState *env, uint32_t exception)
98{
99    helper_raise_exception_err(env, exception, 0);
100}
101
102void helper_interrupt_restart (CPUMIPSState *env)
103{
104    if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
105        !(env->CP0_Status & (1 << CP0St_ERL)) &&
106        !(env->hflags & MIPS_HFLAG_DM) &&
107        (env->CP0_Status & (1 << CP0St_IE)) &&
108        (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask)) {
109        env->CP0_Cause &= ~(0x1f << CP0Ca_EC);
110        helper_raise_exception(env, EXCP_EXT_INTERRUPT);
111    }
112}
113
114#if !defined(CONFIG_USER_ONLY)
115static void do_restore_state (CPUMIPSState *env, uintptr_t pc)
116{
117    TranslationBlock *tb;
118
119    tb = tb_find_pc (pc);
120    if (tb) {
121        cpu_restore_state (env, pc);
122    }
123}
124#endif
125
126#if defined(CONFIG_USER_ONLY)
127#define HELPER_LD(name, insn, type)                                     \
128static inline type do_##name(CPUMIPSState *env, target_ulong addr,      \
129                             int mem_idx)                               \
130{                                                                       \
131    return (type) cpu_##insn##_raw(env, addr);                                     \
132}
133#else
134#define HELPER_LD(name, insn, type)                                     \
135static inline type do_##name(CPUMIPSState *env, target_ulong addr,      \
136                             int mem_idx)                               \
137{                                                                       \
138    switch (mem_idx)                                                    \
139    {                                                                   \
140    case 0: return (type) cpu_##insn##_kernel(env, addr); break;        \
141    case 1: return (type) cpu_##insn##_super(env, addr); break;         \
142    default:                                                            \
143    case 2: return (type) cpu_##insn##_user(env, addr); break;          \
144    }                                                                   \
145}
146#endif
147HELPER_LD(lbu, ldub, uint8_t)
148HELPER_LD(lw, ldl, int32_t)
149#ifdef TARGET_MIPS64
150HELPER_LD(ld, ldq, int64_t)
151#endif
152#undef HELPER_LD
153
154#if defined(CONFIG_USER_ONLY)
155#define HELPER_ST(name, insn, type)                                     \
156static inline void do_##name(CPUMIPSState *env, target_ulong addr,      \
157                             type val, int mem_idx)                     \
158{                                                                       \
159    cpu_##insn##_raw(env, addr, val);                                              \
160}
161#else
162#define HELPER_ST(name, insn, type)                                     \
163static inline void do_##name(CPUMIPSState *env, target_ulong addr,      \
164                             type val, int mem_idx)                     \
165{                                                                       \
166    switch (mem_idx)                                                    \
167    {                                                                   \
168    case 0: cpu_##insn##_kernel(env, addr, val); break;                 \
169    case 1: cpu_##insn##_super(env, addr, val); break;                  \
170    default:                                                            \
171    case 2: cpu_##insn##_user(env, addr, val); break;                   \
172    }                                                                   \
173}
174#endif
175HELPER_ST(sb, stb, uint8_t)
176HELPER_ST(sw, stl, uint32_t)
177#ifdef TARGET_MIPS64
178HELPER_ST(sd, stq, uint64_t)
179#endif
180#undef HELPER_ST
181
182target_ulong helper_clo (target_ulong arg1)
183{
184    return clo32(arg1);
185}
186
187target_ulong helper_clz (target_ulong arg1)
188{
189    return clz32(arg1);
190}
191
192#if defined(TARGET_MIPS64)
193target_ulong helper_dclo (target_ulong arg1)
194{
195    return clo64(arg1);
196}
197
198target_ulong helper_dclz (target_ulong arg1)
199{
200    return clz64(arg1);
201}
202#endif /* TARGET_MIPS64 */
203
204/* 64 bits arithmetic for 32 bits hosts */
205static inline uint64_t get_HILO(CPUMIPSState *env)
206{
207    return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
208}
209
210static inline void set_HILO (CPUMIPSState *env, uint64_t HILO)
211{
212    env->active_tc.LO[0] = (int32_t)HILO;
213    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
214}
215
216static inline void set_HIT0_LO (CPUMIPSState *env, target_ulong arg1, uint64_t HILO)
217{
218    env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
219    arg1 = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
220}
221
222static inline void set_HI_LOT0 (CPUMIPSState *env, target_ulong arg1, uint64_t HILO)
223{
224    arg1 = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
225    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
226}
227
228/* Multiplication variants of the vr54xx. */
229target_ulong helper_muls(CPUMIPSState *env, target_ulong arg1,
230                         target_ulong arg2)
231{
232    set_HI_LOT0(env, arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
233
234    return arg1;
235}
236
237target_ulong helper_mulsu(CPUMIPSState *env, target_ulong arg1,
238                          target_ulong arg2)
239{
240    set_HI_LOT0(env, arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
241
242    return arg1;
243}
244
245target_ulong helper_macc(CPUMIPSState *env, target_ulong arg1,
246                         target_ulong arg2)
247{
248    set_HI_LOT0(env, arg1, ((int64_t)get_HILO(env)) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
249
250    return arg1;
251}
252
253target_ulong helper_macchi(CPUMIPSState *env, target_ulong arg1,
254                           target_ulong arg2)
255{
256    set_HIT0_LO(env, arg1, ((int64_t)get_HILO(env)) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
257
258    return arg1;
259}
260
261target_ulong helper_maccu(CPUMIPSState *env, target_ulong arg1,
262                          target_ulong arg2)
263{
264    set_HI_LOT0(env, arg1, ((uint64_t)get_HILO(env)) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
265
266    return arg1;
267}
268
269target_ulong helper_macchiu(CPUMIPSState *env, target_ulong arg1,
270                            target_ulong arg2)
271{
272    set_HIT0_LO(env, arg1, ((uint64_t)get_HILO(env)) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
273
274    return arg1;
275}
276
277target_ulong helper_msac(CPUMIPSState *env, target_ulong arg1,
278                         target_ulong arg2)
279{
280    set_HI_LOT0(env, arg1, ((int64_t)get_HILO(env)) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
281
282    return arg1;
283}
284
285target_ulong helper_msachi(CPUMIPSState *env, target_ulong arg1,
286                           target_ulong arg2)
287{
288    set_HIT0_LO(env, arg1, ((int64_t)get_HILO(env)) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
289
290    return arg1;
291}
292
293target_ulong helper_msacu(CPUMIPSState *env, target_ulong arg1,
294                          target_ulong arg2)
295{
296    set_HI_LOT0(env, arg1, ((uint64_t)get_HILO(env)) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
297
298    return arg1;
299}
300
301target_ulong helper_msachiu(CPUMIPSState *env, target_ulong arg1,
302                            target_ulong arg2)
303{
304    set_HIT0_LO(env, arg1, ((uint64_t)get_HILO(env)) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
305
306    return arg1;
307}
308
309target_ulong helper_mulhi(CPUMIPSState *env, target_ulong arg1,
310                          target_ulong arg2)
311{
312    set_HIT0_LO(env, arg1, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
313
314    return arg1;
315}
316
317target_ulong helper_mulhiu(CPUMIPSState *env, target_ulong arg1,
318                           target_ulong arg2)
319{
320    set_HIT0_LO(env, arg1, (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
321
322    return arg1;
323}
324
325target_ulong helper_mulshi(CPUMIPSState *env, target_ulong arg1,
326                           target_ulong arg2)
327{
328    set_HIT0_LO(env, arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
329
330    return arg1;
331}
332
333target_ulong helper_mulshiu(CPUMIPSState *env, target_ulong arg1,
334                            target_ulong arg2)
335{
336    set_HIT0_LO(env, arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
337
338    return arg1;
339}
340
341#ifdef TARGET_MIPS64
342void helper_dmult (CPUMIPSState *env, target_ulong arg1, target_ulong arg2)
343{
344    muls64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
345}
346
347void helper_dmultu (CPUMIPSState *env, target_ulong arg1, target_ulong arg2)
348{
349    mulu64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
350}
351#endif
352
353#ifndef CONFIG_USER_ONLY
354
355static inline hwaddr do_translate_address(CPUMIPSState *env,
356                                                      target_ulong address,
357                                                      int rw)
358{
359    hwaddr lladdr;
360
361    lladdr = cpu_mips_translate_address(env, address, rw);
362
363    if (lladdr == (hwaddr)-1LL) {
364        cpu_loop_exit(env);
365    } else {
366        return lladdr;
367    }
368}
369
370#define HELPER_LD_ATOMIC(name, insn)                                          \
371target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx)  \
372{                                                                             \
373    env->lladdr = do_translate_address(env, arg, 0);                        \
374    /* NOTE(digit): Use of 'cpu_single_env' works around compiler bug! */     \
375    cpu_single_env->llval = do_##insn(env, arg, mem_idx);                    \
376    return env->llval;                                                       \
377}
378HELPER_LD_ATOMIC(ll, lw)
379#ifdef TARGET_MIPS64
380HELPER_LD_ATOMIC(lld, ld)
381#endif
382#undef HELPER_LD_ATOMIC
383
384#define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask)                      \
385target_ulong helper_##name(CPUMIPSState *env, target_ulong arg1,              \
386                           target_ulong arg2, int mem_idx)                    \
387{                                                                             \
388    target_long tmp;                                                          \
389                                                                              \
390    if (arg2 & almask) {                                                      \
391        env->CP0_BadVAddr = arg2;                                             \
392        helper_raise_exception(env, EXCP_AdES);                               \
393    }                                                                         \
394    if (do_translate_address(env, arg2, 1) == env->lladdr) {                  \
395        tmp = do_##ld_insn(env, arg2, mem_idx);                               \
396        if (tmp == env->llval) {                                              \
397            do_##st_insn(env, arg2, arg1, mem_idx);                           \
398            return 1;                                                         \
399        }                                                                     \
400    }                                                                         \
401    return 0;                                                                 \
402}
403HELPER_ST_ATOMIC(sc, lw, sw, 0x3)
404#ifdef TARGET_MIPS64
405HELPER_ST_ATOMIC(scd, ld, sd, 0x7)
406#endif
407#undef HELPER_ST_ATOMIC
408#endif
409
410#ifdef TARGET_WORDS_BIGENDIAN
411#define GET_LMASK(v) ((v) & 3)
412#define GET_OFFSET(addr, offset) (addr + (offset))
413#else
414#define GET_LMASK(v) (((v) & 3) ^ 3)
415#define GET_OFFSET(addr, offset) (addr - (offset))
416#endif
417
418target_ulong helper_lwl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
419                        int mem_idx)
420{
421    target_ulong tmp;
422
423    tmp = do_lbu(env, arg2, mem_idx);
424    arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
425
426    if (GET_LMASK(arg2) <= 2) {
427        tmp = do_lbu(env, GET_OFFSET(arg2, 1), mem_idx);
428        arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
429    }
430
431    if (GET_LMASK(arg2) <= 1) {
432        tmp = do_lbu(env, GET_OFFSET(arg2, 2), mem_idx);
433        arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
434    }
435
436    if (GET_LMASK(arg2) == 0) {
437        tmp = do_lbu(env, GET_OFFSET(arg2, 3), mem_idx);
438        arg1 = (arg1 & 0xFFFFFF00) | tmp;
439    }
440    return (int32_t)arg1;
441}
442
443target_ulong helper_lwr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
444                        int mem_idx)
445{
446    target_ulong tmp;
447
448    tmp = do_lbu(env, arg2, mem_idx);
449    arg1 = (arg1 & 0xFFFFFF00) | tmp;
450
451    if (GET_LMASK(arg2) >= 1) {
452        tmp = do_lbu(env, GET_OFFSET(arg2, -1), mem_idx);
453        arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
454    }
455
456    if (GET_LMASK(arg2) >= 2) {
457        tmp = do_lbu(env, GET_OFFSET(arg2, -2), mem_idx);
458        arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
459    }
460
461    if (GET_LMASK(arg2) == 3) {
462        tmp = do_lbu(env, GET_OFFSET(arg2, -3), mem_idx);
463        arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
464    }
465    return (int32_t)arg1;
466}
467
468void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
469                int mem_idx)
470{
471    do_sb(env, arg2, (uint8_t)(arg1 >> 24), mem_idx);
472
473    if (GET_LMASK(arg2) <= 2)
474        do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx);
475
476    if (GET_LMASK(arg2) <= 1)
477        do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx);
478
479    if (GET_LMASK(arg2) == 0)
480        do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx);
481}
482
483void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
484                int mem_idx)
485{
486    do_sb(env, arg2, (uint8_t)arg1, mem_idx);
487
488    if (GET_LMASK(arg2) >= 1)
489        do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
490
491    if (GET_LMASK(arg2) >= 2)
492        do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
493
494    if (GET_LMASK(arg2) == 3)
495        do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
496}
497
498#if defined(TARGET_MIPS64)
499/* "half" load and stores.  We must do the memory access inline,
500   or fault handling won't work.  */
501
502#ifdef TARGET_WORDS_BIGENDIAN
503#define GET_LMASK64(v) ((v) & 7)
504#else
505#define GET_LMASK64(v) (((v) & 7) ^ 7)
506#endif
507
508target_ulong helper_ldl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
509                        int mem_idx)
510{
511    uint64_t tmp;
512
513    tmp = do_lbu(env, arg2, mem_idx);
514    arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
515
516    if (GET_LMASK64(arg2) <= 6) {
517        tmp = do_lbu(env, GET_OFFSET(arg2, 1), mem_idx);
518        arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
519    }
520
521    if (GET_LMASK64(arg2) <= 5) {
522        tmp = do_lbu(env, GET_OFFSET(arg2, 2), mem_idx);
523        arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
524    }
525
526    if (GET_LMASK64(arg2) <= 4) {
527        tmp = do_lbu(env, GET_OFFSET(arg2, 3), mem_idx);
528        arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
529    }
530
531    if (GET_LMASK64(arg2) <= 3) {
532        tmp = do_lbu(GET_OFFSET(arg2, 4), mem_idx);
533        arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
534    }
535
536    if (GET_LMASK64(arg2) <= 2) {
537        tmp = do_lbu(env, GET_OFFSET(arg2, 5), mem_idx);
538        arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
539    }
540
541    if (GET_LMASK64(arg2) <= 1) {
542        tmp = do_lbu(env, GET_OFFSET(arg2, 6), mem_idx);
543        arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
544    }
545
546    if (GET_LMASK64(arg2) == 0) {
547        tmp = do_lbu(env, GET_OFFSET(arg2, 7), mem_idx);
548        arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
549    }
550
551    return arg1;
552}
553
554target_ulong helper_ldr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
555                        int mem_idx)
556{
557    uint64_t tmp;
558
559    tmp = do_lbu(env, arg2, mem_idx);
560    arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
561
562    if (GET_LMASK64(arg2) >= 1) {
563        tmp = do_lbu(env, GET_OFFSET(arg2, -1), mem_idx);
564        arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp  << 8);
565    }
566
567    if (GET_LMASK64(arg2) >= 2) {
568        tmp = do_lbu(env, GET_OFFSET(arg2, -2), mem_idx);
569        arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
570    }
571
572    if (GET_LMASK64(arg2) >= 3) {
573        tmp = do_lbu(env, GET_OFFSET(arg2, -3), mem_idx);
574        arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
575    }
576
577    if (GET_LMASK64(arg2) >= 4) {
578        tmp = do_lbu(env, GET_OFFSET(arg2, -4), mem_idx);
579        arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
580    }
581
582    if (GET_LMASK64(arg2) >= 5) {
583        tmp = do_lbu(env, GET_OFFSET(arg2, -5), mem_idx);
584        arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
585    }
586
587    if (GET_LMASK64(arg2) >= 6) {
588        tmp = do_lbu(env, GET_OFFSET(arg2, -6), mem_idx);
589        arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
590    }
591
592    if (GET_LMASK64(arg2) == 7) {
593        tmp = do_lbu(env, GET_OFFSET(arg2, -7), mem_idx);
594        arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
595    }
596
597    return arg1;
598}
599
600void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
601                 int mem_idx)
602{
603    do_sb(env, arg2, (uint8_t)(arg1 >> 56), mem_idx);
604
605    if (GET_LMASK64(arg2) <= 6)
606        do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx);
607
608    if (GET_LMASK64(arg2) <= 5)
609        do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx);
610
611    if (GET_LMASK64(arg2) <= 4)
612        do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx);
613
614    if (GET_LMASK64(arg2) <= 3)
615        do_sb(env, GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx);
616
617    if (GET_LMASK64(arg2) <= 2)
618        do_sb(env, GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx);
619
620    if (GET_LMASK64(arg2) <= 1)
621        do_sb(env, GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx);
622
623    if (GET_LMASK64(arg2) <= 0)
624        do_sb(env, GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx);
625}
626
627void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
628                 int mem_idx)
629{
630    do_sb(env, arg2, (uint8_t)arg1, mem_idx);
631
632    if (GET_LMASK64(arg2) >= 1)
633        do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
634
635    if (GET_LMASK64(arg2) >= 2)
636        do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
637
638    if (GET_LMASK64(arg2) >= 3)
639        do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
640
641    if (GET_LMASK64(arg2) >= 4)
642        do_sb(env, GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx);
643
644    if (GET_LMASK64(arg2) >= 5)
645        do_sb(env, GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx);
646
647    if (GET_LMASK64(arg2) >= 6)
648        do_sb(env, GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx);
649
650    if (GET_LMASK64(arg2) == 7)
651        do_sb(env, GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx);
652}
653#endif /* TARGET_MIPS64 */
654
655#ifndef CONFIG_USER_ONLY
656/* tc should point to an int with the value of the global TC index.
657   This function will transform it into a local index within the
658   returned CPUState.
659
660   FIXME: This code assumes that all VPEs have the same number of TCs,
661          which depends on runtime setup. Can probably be fixed by
662          walking the list of CPUMIPSStates.  */
663static CPUMIPSState *mips_cpu_map_tc(CPUMIPSState *env, int *tc)
664{
665    int vpe_idx, nr_threads = ENV_GET_CPU(env)->nr_threads;
666    int tc_idx = *tc;
667
668    if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) {
669        /* Not allowed to address other CPUs.  */
670        *tc = env->current_tc;
671        return env;
672    }
673
674    vpe_idx = tc_idx / nr_threads;
675    *tc = tc_idx % nr_threads;
676    CPUState *other = qemu_get_cpu(vpe_idx);
677    return other ? other->env_ptr : env;
678}
679
680/* The per VPE CP0_Status register shares some fields with the per TC
681   CP0_TCStatus registers. These fields are wired to the same registers,
682   so changes to either of them should be reflected on both registers.
683
684   Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
685
686   These helper call synchronizes the regs for a given cpu.  */
687
688/* Called for updates to CP0_Status.  */
689static void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc)
690{
691    int32_t tcstatus, *tcst;
692    uint32_t v = cpu->CP0_Status;
693    uint32_t cu, mx, asid, ksu;
694    uint32_t mask = ((1 << CP0TCSt_TCU3)
695                       | (1 << CP0TCSt_TCU2)
696                       | (1 << CP0TCSt_TCU1)
697                       | (1 << CP0TCSt_TCU0)
698                       | (1 << CP0TCSt_TMX)
699                       | (3 << CP0TCSt_TKSU)
700                       | (0xff << CP0TCSt_TASID));
701
702    cu = (v >> CP0St_CU0) & 0xf;
703    mx = (v >> CP0St_MX) & 0x1;
704    ksu = (v >> CP0St_KSU) & 0x3;
705    asid = env->CP0_EntryHi & 0xff;
706
707    tcstatus = cu << CP0TCSt_TCU0;
708    tcstatus |= mx << CP0TCSt_TMX;
709    tcstatus |= ksu << CP0TCSt_TKSU;
710    tcstatus |= asid;
711
712    if (tc == cpu->current_tc) {
713        tcst = &cpu->active_tc.CP0_TCStatus;
714    } else {
715        tcst = &cpu->tcs[tc].CP0_TCStatus;
716    }
717
718    *tcst &= ~mask;
719    *tcst |= tcstatus;
720    compute_hflags(cpu);
721}
722
723/* Called for updates to CP0_TCStatus.  */
724static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc,
725                             target_ulong v)
726{
727    uint32_t status;
728    uint32_t tcu, tmx, tasid, tksu;
729    uint32_t mask = ((1 << CP0St_CU3)
730                       | (1 << CP0St_CU2)
731                       | (1 << CP0St_CU1)
732                       | (1 << CP0St_CU0)
733                       | (1 << CP0St_MX)
734                       | (3 << CP0St_KSU));
735
736    tcu = (v >> CP0TCSt_TCU0) & 0xf;
737    tmx = (v >> CP0TCSt_TMX) & 0x1;
738    tasid = v & 0xff;
739    tksu = (v >> CP0TCSt_TKSU) & 0x3;
740
741    status = tcu << CP0St_CU0;
742    status |= tmx << CP0St_MX;
743    status |= tksu << CP0St_KSU;
744
745    cpu->CP0_Status &= ~mask;
746    cpu->CP0_Status |= status;
747
748    /* Sync the TASID with EntryHi.  */
749    cpu->CP0_EntryHi &= ~0xff;
750    cpu->CP0_EntryHi = tasid;
751
752    compute_hflags(cpu);
753}
754
755/* Called for updates to CP0_EntryHi.  */
756static void sync_c0_entryhi(CPUMIPSState *cpu, int tc)
757{
758    int32_t *tcst;
759    uint32_t asid, v = cpu->CP0_EntryHi;
760
761    asid = v & 0xff;
762
763    if (tc == cpu->current_tc) {
764        tcst = &cpu->active_tc.CP0_TCStatus;
765    } else {
766        tcst = &cpu->tcs[tc].CP0_TCStatus;
767    }
768
769    *tcst &= ~0xff;
770    *tcst |= asid;
771}
772
773/* CP0 helpers */
774target_ulong helper_mfc0_mvpcontrol(CPUMIPSState *env)
775{
776    return env->mvp->CP0_MVPControl;
777}
778
779target_ulong helper_mfc0_mvpconf0(CPUMIPSState *env)
780{
781    return env->mvp->CP0_MVPConf0;
782}
783
784target_ulong helper_mfc0_mvpconf1(CPUMIPSState *env)
785{
786    return env->mvp->CP0_MVPConf1;
787}
788
789target_ulong helper_mfc0_random(CPUMIPSState *env)
790{
791    return (int32_t)cpu_mips_get_random(env);
792}
793
794target_ulong helper_mfc0_tcstatus(CPUMIPSState *env)
795{
796    return env->active_tc.CP0_TCStatus;
797}
798
799target_ulong helper_mftc0_tcstatus(CPUMIPSState *env)
800{
801    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
802    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
803
804    if (other_tc == other->current_tc)
805        return other->active_tc.CP0_TCStatus;
806    else
807        return other->tcs[other_tc].CP0_TCStatus;
808}
809
810target_ulong helper_mfc0_tcbind(CPUMIPSState *env)
811{
812    return env->active_tc.CP0_TCBind;
813}
814
815target_ulong helper_mftc0_tcbind(CPUMIPSState *env)
816{
817    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
818    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
819
820    if (other_tc == other->current_tc)
821        return other->active_tc.CP0_TCBind;
822    else
823        return other->tcs[other_tc].CP0_TCBind;
824}
825
826target_ulong helper_mfc0_tcrestart(CPUMIPSState *env)
827{
828    return env->active_tc.PC;
829}
830
831target_ulong helper_mftc0_tcrestart(CPUMIPSState *env)
832{
833    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
834    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
835
836    if (other_tc == other->current_tc)
837        return other->active_tc.PC;
838    else
839        return other->tcs[other_tc].PC;
840}
841
842target_ulong helper_mfc0_tchalt(CPUMIPSState *env)
843{
844    return env->active_tc.CP0_TCHalt;
845}
846
847target_ulong helper_mftc0_tchalt(CPUMIPSState *env)
848{
849    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
850    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
851
852    if (other_tc == other->current_tc)
853        return other->active_tc.CP0_TCHalt;
854    else
855        return other->tcs[other_tc].CP0_TCHalt;
856}
857
858target_ulong helper_mfc0_tccontext(CPUMIPSState *env)
859{
860    return env->active_tc.CP0_TCContext;
861}
862
863target_ulong helper_mftc0_tccontext(CPUMIPSState *env)
864{
865    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
866    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
867
868    if (other_tc == other->current_tc)
869        return other->active_tc.CP0_TCContext;
870    else
871        return other->tcs[other_tc].CP0_TCContext;
872}
873
874target_ulong helper_mfc0_tcschedule(CPUMIPSState *env)
875{
876    return env->active_tc.CP0_TCSchedule;
877}
878
879target_ulong helper_mftc0_tcschedule(CPUMIPSState *env)
880{
881    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
882    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
883
884    if (other_tc == other->current_tc)
885        return other->active_tc.CP0_TCSchedule;
886    else
887        return other->tcs[other_tc].CP0_TCSchedule;
888}
889
890target_ulong helper_mfc0_tcschefback(CPUMIPSState *env)
891{
892    return env->active_tc.CP0_TCScheFBack;
893}
894
895target_ulong helper_mftc0_tcschefback(CPUMIPSState *env)
896{
897    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
898    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
899
900    if (other_tc == other->current_tc)
901        return other->active_tc.CP0_TCScheFBack;
902    else
903        return other->tcs[other_tc].CP0_TCScheFBack;
904}
905
906target_ulong helper_mfc0_count(CPUMIPSState *env)
907{
908    return (int32_t)cpu_mips_get_count(env);
909}
910
911target_ulong helper_mftc0_entryhi(CPUMIPSState *env)
912{
913    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
914    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
915
916    return other->CP0_EntryHi;
917}
918
919target_ulong helper_mftc0_status(CPUMIPSState *env)
920{
921    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
922    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
923
924    return other->CP0_Status;
925}
926
927target_ulong helper_mfc0_lladdr(CPUMIPSState *env)
928{
929    return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift);
930}
931
932target_ulong helper_mfc0_watchlo(CPUMIPSState *env, uint32_t sel)
933{
934    return (int32_t)env->CP0_WatchLo[sel];
935}
936
937target_ulong helper_mfc0_watchhi(CPUMIPSState *env, uint32_t sel)
938{
939    return env->CP0_WatchHi[sel];
940}
941
942target_ulong helper_mfc0_debug(CPUMIPSState *env)
943{
944    target_ulong t0 = env->CP0_Debug;
945    if (env->hflags & MIPS_HFLAG_DM)
946        t0 |= 1 << CP0DB_DM;
947
948    return t0;
949}
950
951target_ulong helper_mftc0_debug(CPUMIPSState *env)
952{
953    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
954    int32_t tcstatus;
955    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
956
957    if (other_tc == other->current_tc)
958        tcstatus = other->active_tc.CP0_Debug_tcstatus;
959    else
960        tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus;
961
962    /* XXX: Might be wrong, check with EJTAG spec. */
963    return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
964            (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
965}
966
967#if defined(TARGET_MIPS64)
968target_ulong helper_dmfc0_tcrestart(CPUMIPSState *env)
969{
970    return env->active_tc.PC;
971}
972
973target_ulong helper_dmfc0_tchalt(CPUMIPSState *env)
974{
975    return env->active_tc.CP0_TCHalt;
976}
977
978target_ulong helper_dmfc0_tccontext(CPUMIPSState *env)
979{
980    return env->active_tc.CP0_TCContext;
981}
982
983target_ulong helper_dmfc0_tcschedule(CPUMIPSState *env)
984{
985    return env->active_tc.CP0_TCSchedule;
986}
987
988target_ulong helper_dmfc0_tcschefback(CPUMIPSState *env)
989{
990    return env->active_tc.CP0_TCScheFBack;
991}
992
993target_ulong helper_dmfc0_lladdr(CPUMIPSState *env)
994{
995    return env->lladdr >> env->CP0_LLAddr_shift;
996}
997
998target_ulong helper_dmfc0_watchlo(CPUMIPSState *env, uint32_t sel)
999{
1000    return env->CP0_WatchLo[sel];
1001}
1002#endif /* TARGET_MIPS64 */
1003
1004void helper_mtc0_index(CPUMIPSState *env, target_ulong arg1)
1005{
1006    int num = 1;
1007    unsigned int tmp = env->tlb->nb_tlb;
1008
1009    do {
1010        tmp >>= 1;
1011        num <<= 1;
1012    } while (tmp);
1013    env->CP0_Index = (env->CP0_Index & 0x80000000) | (arg1 & (num - 1));
1014}
1015
1016void helper_mtc0_mvpcontrol(CPUMIPSState *env, target_ulong arg1)
1017{
1018    uint32_t mask = 0;
1019    uint32_t newval;
1020
1021    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
1022        mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
1023                (1 << CP0MVPCo_EVP);
1024    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1025        mask |= (1 << CP0MVPCo_STLB);
1026    newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
1027
1028    // TODO: Enable/disable shared TLB, enable/disable VPEs.
1029
1030    env->mvp->CP0_MVPControl = newval;
1031}
1032
1033void helper_mtc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
1034{
1035    uint32_t mask;
1036    uint32_t newval;
1037
1038    mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
1039           (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
1040    newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
1041
1042    /* Yield scheduler intercept not implemented. */
1043    /* Gating storage scheduler intercept not implemented. */
1044
1045    // TODO: Enable/disable TCs.
1046
1047    env->CP0_VPEControl = newval;
1048}
1049
1050void helper_mtc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
1051{
1052    uint32_t mask = 0;
1053    uint32_t newval;
1054
1055    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
1056        if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
1057            mask |= (0xff << CP0VPEC0_XTC);
1058        mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1059    }
1060    newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1061
1062    // TODO: TC exclusive handling due to ERL/EXL.
1063
1064    env->CP0_VPEConf0 = newval;
1065}
1066
1067void helper_mtc0_vpeconf1(CPUMIPSState *env, target_ulong arg1)
1068{
1069    uint32_t mask = 0;
1070    uint32_t newval;
1071
1072    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1073        mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
1074                (0xff << CP0VPEC1_NCP1);
1075    newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
1076
1077    /* UDI not implemented. */
1078    /* CP2 not implemented. */
1079
1080    // TODO: Handle FPU (CP1) binding.
1081
1082    env->CP0_VPEConf1 = newval;
1083}
1084
1085void helper_mtc0_yqmask(CPUMIPSState *env, target_ulong arg1)
1086{
1087    /* Yield qualifier inputs not implemented. */
1088    env->CP0_YQMask = 0x00000000;
1089}
1090
1091void helper_mtc0_vpeopt(CPUMIPSState *env, target_ulong arg1)
1092{
1093    env->CP0_VPEOpt = arg1 & 0x0000ffff;
1094}
1095
1096void helper_mtc0_entrylo0(CPUMIPSState *env, target_ulong arg1)
1097{
1098    /* Large physaddr (PABITS) not implemented */
1099    /* 1k pages not implemented */
1100    env->CP0_EntryLo0 = arg1 & 0x3FFFFFFF;
1101}
1102
1103void helper_mtc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
1104{
1105    uint32_t mask = env->CP0_TCStatus_rw_bitmask;
1106    uint32_t newval;
1107
1108    newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
1109
1110    env->active_tc.CP0_TCStatus = newval;
1111    sync_c0_tcstatus(env, env->current_tc, newval);
1112}
1113
1114void helper_mttc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
1115{
1116    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1117    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1118
1119    if (other_tc == other->current_tc)
1120        other->active_tc.CP0_TCStatus = arg1;
1121    else
1122        other->tcs[other_tc].CP0_TCStatus = arg1;
1123    sync_c0_tcstatus(other, other_tc, arg1);
1124}
1125
1126void helper_mtc0_tcbind(CPUMIPSState *env, target_ulong arg1)
1127{
1128    uint32_t mask = (1 << CP0TCBd_TBE);
1129    uint32_t newval;
1130
1131    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1132        mask |= (1 << CP0TCBd_CurVPE);
1133    newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1134    env->active_tc.CP0_TCBind = newval;
1135}
1136
1137void helper_mttc0_tcbind(CPUMIPSState *env, target_ulong arg1)
1138{
1139    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1140    uint32_t mask = (1 << CP0TCBd_TBE);
1141    uint32_t newval;
1142    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1143
1144    if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1145        mask |= (1 << CP0TCBd_CurVPE);
1146    if (other_tc == other->current_tc) {
1147        newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1148        other->active_tc.CP0_TCBind = newval;
1149    } else {
1150        newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
1151        other->tcs[other_tc].CP0_TCBind = newval;
1152    }
1153}
1154
1155void helper_mtc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
1156{
1157    env->active_tc.PC = arg1;
1158    env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1159    env->lladdr = 0ULL;
1160    /* MIPS16 not implemented. */
1161}
1162
1163void helper_mttc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
1164{
1165    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1166    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1167
1168    if (other_tc == other->current_tc) {
1169        other->active_tc.PC = arg1;
1170        other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1171        other->lladdr = 0ULL;
1172        /* MIPS16 not implemented. */
1173    } else {
1174        other->tcs[other_tc].PC = arg1;
1175        other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1176        other->lladdr = 0ULL;
1177        /* MIPS16 not implemented. */
1178    }
1179}
1180
1181void helper_mtc0_tchalt(CPUMIPSState *env, target_ulong arg1)
1182{
1183    env->active_tc.CP0_TCHalt = arg1 & 0x1;
1184
1185    // TODO: Halt TC / Restart (if allocated+active) TC.
1186}
1187
1188void helper_mttc0_tchalt(CPUMIPSState *env, target_ulong arg1)
1189{
1190    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1191    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1192
1193    // TODO: Halt TC / Restart (if allocated+active) TC.
1194
1195    if (other_tc == other->current_tc)
1196        other->active_tc.CP0_TCHalt = arg1;
1197    else
1198        other->tcs[other_tc].CP0_TCHalt = arg1;
1199}
1200
1201void helper_mtc0_tccontext(CPUMIPSState *env, target_ulong arg1)
1202{
1203    env->active_tc.CP0_TCContext = arg1;
1204}
1205
1206void helper_mttc0_tccontext(CPUMIPSState *env, target_ulong arg1)
1207{
1208    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1209    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1210
1211    if (other_tc == other->current_tc)
1212        other->active_tc.CP0_TCContext = arg1;
1213    else
1214        other->tcs[other_tc].CP0_TCContext = arg1;
1215}
1216
1217void helper_mtc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
1218{
1219    env->active_tc.CP0_TCSchedule = arg1;
1220}
1221
1222void helper_mttc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
1223{
1224    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1225    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1226
1227    if (other_tc == other->current_tc)
1228        other->active_tc.CP0_TCSchedule = arg1;
1229    else
1230        other->tcs[other_tc].CP0_TCSchedule = arg1;
1231}
1232
1233void helper_mtc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
1234{
1235    env->active_tc.CP0_TCScheFBack = arg1;
1236}
1237
1238void helper_mttc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
1239{
1240    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1241    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1242
1243    if (other_tc == other->current_tc)
1244        other->active_tc.CP0_TCScheFBack = arg1;
1245    else
1246        other->tcs[other_tc].CP0_TCScheFBack = arg1;
1247}
1248
1249void helper_mtc0_entrylo1(CPUMIPSState *env, target_ulong arg1)
1250{
1251    /* Large physaddr (PABITS) not implemented */
1252    /* 1k pages not implemented */
1253    env->CP0_EntryLo1 = arg1 & 0x3FFFFFFF;
1254}
1255
1256void helper_mtc0_context(CPUMIPSState *env, target_ulong arg1)
1257{
1258    env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
1259}
1260
1261void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1)
1262{
1263    /* 1k pages not implemented */
1264    env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1265}
1266
1267void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1)
1268{
1269    /* SmartMIPS not implemented */
1270    /* Large physaddr (PABITS) not implemented */
1271    /* 1k pages not implemented */
1272    env->CP0_PageGrain = 0;
1273}
1274
1275void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1)
1276{
1277    env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1278}
1279
1280void helper_mtc0_srsconf0(CPUMIPSState *env, target_ulong arg1)
1281{
1282    env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1283}
1284
1285void helper_mtc0_srsconf1(CPUMIPSState *env, target_ulong arg1)
1286{
1287    env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1288}
1289
1290void helper_mtc0_srsconf2(CPUMIPSState *env, target_ulong arg1)
1291{
1292    env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1293}
1294
1295void helper_mtc0_srsconf3(CPUMIPSState *env, target_ulong arg1)
1296{
1297    env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1298}
1299
1300void helper_mtc0_srsconf4(CPUMIPSState *env, target_ulong arg1)
1301{
1302    env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1303}
1304
1305void helper_mtc0_hwrena(CPUMIPSState *env, target_ulong arg1)
1306{
1307    env->CP0_HWREna = arg1 & 0x0000000F;
1308}
1309
1310void helper_mtc0_count(CPUMIPSState *env, target_ulong arg1)
1311{
1312    cpu_mips_store_count(env, arg1);
1313}
1314
1315void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1)
1316{
1317    target_ulong old, val;
1318
1319    /* 1k pages not implemented */
1320    val = arg1 & ((TARGET_PAGE_MASK << 1) | 0xFF);
1321#if defined(TARGET_MIPS64)
1322    val &= env->SEGMask;
1323#endif
1324    old = env->CP0_EntryHi;
1325    env->CP0_EntryHi = val;
1326    if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1327        sync_c0_entryhi(env, env->current_tc);
1328    }
1329    /* If the ASID changes, flush qemu's TLB.  */
1330    if ((old & 0xFF) != (val & 0xFF))
1331        cpu_mips_tlb_flush(env, 1);
1332}
1333
1334void helper_mttc0_entryhi(CPUMIPSState *env, target_ulong arg1)
1335{
1336    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1337    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1338
1339    other->CP0_EntryHi = arg1;
1340    sync_c0_entryhi(other, other_tc);
1341}
1342
1343void helper_mtc0_compare(CPUMIPSState *env, target_ulong arg1)
1344{
1345    cpu_mips_store_compare(env, arg1);
1346}
1347
1348void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1)
1349{
1350    uint32_t val, old;
1351    uint32_t mask = env->CP0_Status_rw_bitmask;
1352
1353    val = arg1 & mask;
1354    old = env->CP0_Status;
1355    env->CP0_Status = (env->CP0_Status & ~mask) | val;
1356    if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1357        sync_c0_status(env, env, env->current_tc);
1358    } else {
1359        compute_hflags(env);
1360    }
1361
1362    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1363        qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1364                old, old & env->CP0_Cause & CP0Ca_IP_mask,
1365                val, val & env->CP0_Cause & CP0Ca_IP_mask,
1366                env->CP0_Cause);
1367        switch (env->hflags & MIPS_HFLAG_KSU) {
1368        case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1369        case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1370        case MIPS_HFLAG_KM: qemu_log("\n"); break;
1371        default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1372        }
1373    }
1374    cpu_mips_update_irq(env);
1375}
1376
1377void helper_mttc0_status(CPUMIPSState *env, target_ulong arg1)
1378{
1379    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1380    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1381
1382    other->CP0_Status = arg1 & ~0xf1000018;
1383    sync_c0_status(env, other, other_tc);
1384}
1385
1386void helper_mtc0_intctl(CPUMIPSState *env, target_ulong arg1)
1387{
1388    /* vectored interrupts not implemented, no performance counters. */
1389    env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000002e0) | (arg1 & 0x000002e0);
1390}
1391
1392void helper_mtc0_srsctl(CPUMIPSState *env, target_ulong arg1)
1393{
1394    uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1395    env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1396}
1397
1398void helper_mtc0_cause(CPUMIPSState *env, target_ulong arg1)
1399{
1400    uint32_t mask = 0x00C00300;
1401    uint32_t old = env->CP0_Cause;
1402
1403    if (env->insn_flags & ISA_MIPS32R2)
1404        mask |= 1 << CP0Ca_DC;
1405
1406    env->CP0_Cause = (env->CP0_Cause & ~mask) | (arg1 & mask);
1407
1408    if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
1409        if (env->CP0_Cause & (1 << CP0Ca_DC))
1410            cpu_mips_stop_count(env);
1411        else
1412            cpu_mips_start_count(env);
1413    }
1414
1415    /* Handle the software interrupt as an hardware one, as they
1416       are very similar */
1417    if (arg1 & CP0Ca_IP_mask) {
1418        cpu_mips_update_irq(env);
1419    }
1420}
1421
1422void helper_mtc0_ebase(CPUMIPSState *env, target_ulong arg1)
1423{
1424    /* vectored interrupts not implemented */
1425    /* Multi-CPU not implemented */
1426    env->CP0_EBase = 0x80000000 | (arg1 & 0x3FFFF000);
1427}
1428
1429void helper_mtc0_config0(CPUMIPSState *env, target_ulong arg1)
1430{
1431    env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1432}
1433
1434void helper_mtc0_config2(CPUMIPSState *env, target_ulong arg1)
1435{
1436    /* tertiary/secondary caches not implemented */
1437    env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1438}
1439
1440void helper_mtc0_lladdr(CPUMIPSState *env, target_ulong arg1)
1441{
1442    target_long mask = env->CP0_LLAddr_rw_bitmask;
1443    arg1 = arg1 << env->CP0_LLAddr_shift;
1444    env->lladdr = (env->lladdr & ~mask) | (arg1 & mask);
1445}
1446
1447void helper_mtc0_watchlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1448{
1449    /* Watch exceptions for instructions, data loads, data stores
1450       not implemented. */
1451    env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1452}
1453
1454void helper_mtc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1455{
1456    env->CP0_WatchHi[sel] = (arg1 & 0x40FF0FF8);
1457    env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1458}
1459
1460void helper_mtc0_xcontext(CPUMIPSState *env, target_ulong arg1)
1461{
1462    target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1463    env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1464}
1465
1466void helper_mtc0_framemask(CPUMIPSState *env, target_ulong arg1)
1467{
1468    env->CP0_Framemask = arg1; /* XXX */
1469}
1470
1471void helper_mtc0_debug(CPUMIPSState *env, target_ulong arg1)
1472{
1473    env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1474    if (arg1 & (1 << CP0DB_DM))
1475        env->hflags |= MIPS_HFLAG_DM;
1476    else
1477        env->hflags &= ~MIPS_HFLAG_DM;
1478}
1479
1480void helper_mttc0_debug(CPUMIPSState *env, target_ulong arg1)
1481{
1482    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1483    uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1484    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1485
1486    /* XXX: Might be wrong, check with EJTAG spec. */
1487    if (other_tc == other->current_tc)
1488        other->active_tc.CP0_Debug_tcstatus = val;
1489    else
1490        other->tcs[other_tc].CP0_Debug_tcstatus = val;
1491    other->CP0_Debug = (other->CP0_Debug &
1492                     ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1493                     (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1494}
1495
1496void helper_mtc0_performance0(CPUMIPSState *env, target_ulong arg1)
1497{
1498    env->CP0_Performance0 = arg1 & 0x000007ff;
1499}
1500
1501void helper_mtc0_taglo(CPUMIPSState *env, target_ulong arg1)
1502{
1503    env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1504}
1505
1506void helper_mtc0_datalo(CPUMIPSState *env, target_ulong arg1)
1507{
1508    env->CP0_DataLo = arg1; /* XXX */
1509}
1510
1511void helper_mtc0_taghi(CPUMIPSState *env, target_ulong arg1)
1512{
1513    env->CP0_TagHi = arg1; /* XXX */
1514}
1515
1516void helper_mtc0_datahi(CPUMIPSState *env, target_ulong arg1)
1517{
1518    env->CP0_DataHi = arg1; /* XXX */
1519}
1520
1521/* MIPS MT functions */
1522target_ulong helper_mftgpr(CPUMIPSState *env, uint32_t sel)
1523{
1524    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1525    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1526
1527    if (other_tc == other->current_tc)
1528        return other->active_tc.gpr[sel];
1529    else
1530        return other->tcs[other_tc].gpr[sel];
1531}
1532
1533target_ulong helper_mftlo(CPUMIPSState *env, uint32_t sel)
1534{
1535    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1536    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1537
1538    if (other_tc == other->current_tc)
1539        return other->active_tc.LO[sel];
1540    else
1541        return other->tcs[other_tc].LO[sel];
1542}
1543
1544target_ulong helper_mfthi(CPUMIPSState *env, uint32_t sel)
1545{
1546    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1547    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1548
1549    if (other_tc == other->current_tc)
1550        return other->active_tc.HI[sel];
1551    else
1552        return other->tcs[other_tc].HI[sel];
1553}
1554
1555target_ulong helper_mftacx(CPUMIPSState *env, uint32_t sel)
1556{
1557    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1558    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1559
1560    if (other_tc == other->current_tc)
1561        return other->active_tc.ACX[sel];
1562    else
1563        return other->tcs[other_tc].ACX[sel];
1564}
1565
1566target_ulong helper_mftdsp(CPUMIPSState *env)
1567{
1568    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1569    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1570
1571    if (other_tc == other->current_tc)
1572        return other->active_tc.DSPControl;
1573    else
1574        return other->tcs[other_tc].DSPControl;
1575}
1576
1577void helper_mttgpr(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1578{
1579    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1580    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1581
1582    if (other_tc == other->current_tc)
1583        other->active_tc.gpr[sel] = arg1;
1584    else
1585        other->tcs[other_tc].gpr[sel] = arg1;
1586}
1587
1588void helper_mttlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1589{
1590    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1591    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1592
1593    if (other_tc == other->current_tc)
1594        other->active_tc.LO[sel] = arg1;
1595    else
1596        other->tcs[other_tc].LO[sel] = arg1;
1597}
1598
1599void helper_mtthi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1600{
1601    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1602    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1603
1604    if (other_tc == other->current_tc)
1605        other->active_tc.HI[sel] = arg1;
1606    else
1607        other->tcs[other_tc].HI[sel] = arg1;
1608}
1609
1610void helper_mttacx(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1611{
1612    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1613    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1614
1615    if (other_tc == other->current_tc)
1616        other->active_tc.ACX[sel] = arg1;
1617    else
1618        other->tcs[other_tc].ACX[sel] = arg1;
1619}
1620
1621void helper_mttdsp(CPUMIPSState *env, target_ulong arg1)
1622{
1623    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1624    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1625
1626    if (other_tc == other->current_tc)
1627        other->active_tc.DSPControl = arg1;
1628    else
1629        other->tcs[other_tc].DSPControl = arg1;
1630}
1631
1632/* MIPS MT functions */
1633target_ulong helper_dmt(void)
1634{
1635    // TODO
1636     return 0;
1637}
1638
1639target_ulong helper_emt(void)
1640{
1641    // TODO
1642    return 0;
1643}
1644
1645target_ulong helper_dvpe(CPUMIPSState *env)
1646{
1647    // TODO
1648    return 0;
1649}
1650
1651target_ulong helper_evpe(CPUMIPSState *env)
1652{
1653    // TODO
1654    return 0;
1655}
1656#endif /* !CONFIG_USER_ONLY */
1657
1658void helper_fork(target_ulong arg1, target_ulong arg2)
1659{
1660    // arg1 = rt, arg2 = rs
1661    arg1 = 0;
1662    // TODO: store to TC register
1663}
1664
1665target_ulong helper_yield(CPUMIPSState *env, target_ulong arg)
1666{
1667    target_long arg1 = arg;
1668
1669    if (arg1 < 0) {
1670        /* No scheduling policy implemented. */
1671        if (arg1 != -2) {
1672            if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1673                env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
1674                env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1675                env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1676                helper_raise_exception(env, EXCP_THREAD);
1677            }
1678        }
1679    } else if (arg1 == 0) {
1680        if (0 /* TODO: TC underflow */) {
1681            env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1682            helper_raise_exception(env, EXCP_THREAD);
1683        } else {
1684            // TODO: Deallocate TC
1685        }
1686    } else if (arg1 > 0) {
1687        /* Yield qualifier inputs not implemented. */
1688        env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1689        env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1690        helper_raise_exception(env, EXCP_THREAD);
1691    }
1692    return env->CP0_YQMask;
1693}
1694
1695#ifndef CONFIG_USER_ONLY
1696static void inline r4k_invalidate_tlb_shadow (CPUMIPSState *env, int idx)
1697{
1698    r4k_tlb_t *tlb;
1699    uint8_t ASID = env->CP0_EntryHi & 0xFF;
1700
1701    tlb = &env->tlb->mmu.r4k.tlb[idx];
1702    /* The qemu TLB is flushed when the ASID changes, so no need to
1703    flush these entries again.  */
1704    if (tlb->G == 0 && tlb->ASID != ASID) {
1705        return;
1706    }
1707}
1708
1709static void inline r4k_invalidate_tlb (CPUMIPSState *env, int idx)
1710{
1711    r4k_tlb_t *tlb;
1712    target_ulong addr;
1713    target_ulong end;
1714    uint8_t ASID = env->CP0_EntryHi & 0xFF;
1715    target_ulong mask;
1716
1717    tlb = &env->tlb->mmu.r4k.tlb[idx];
1718    /* The qemu TLB is flushed when the ASID changes, so no need to
1719    flush these entries again.  */
1720    if (tlb->G == 0 && tlb->ASID != ASID) {
1721        return;
1722    }
1723
1724    /* 1k pages are not supported. */
1725    mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1726    if (tlb->V0) {
1727        addr = tlb->VPN & ~mask;
1728#if defined(TARGET_MIPS64)
1729        if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
1730            addr |= 0x3FFFFF0000000000ULL;
1731        }
1732#endif
1733        end = addr | (mask >> 1);
1734        while (addr < end) {
1735            tlb_flush_page (env, addr);
1736            addr += TARGET_PAGE_SIZE;
1737        }
1738    }
1739    if (tlb->V1) {
1740        addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1);
1741#if defined(TARGET_MIPS64)
1742        if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
1743            addr |= 0x3FFFFF0000000000ULL;
1744        }
1745#endif
1746        end = addr | mask;
1747        while (addr - 1 < end) {
1748            tlb_flush_page (env, addr);
1749            addr += TARGET_PAGE_SIZE;
1750        }
1751    }
1752}
1753
1754/* TLB management */
1755void cpu_mips_tlb_flush (CPUMIPSState *env, int flush_global)
1756{
1757    /* Flush qemu's TLB and discard all shadowed entries.  */
1758    tlb_flush (env, flush_global);
1759}
1760
1761static void r4k_fill_tlb(CPUMIPSState *env, int idx)
1762{
1763    r4k_tlb_t *tlb;
1764
1765    /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
1766    tlb = &env->tlb->mmu.r4k.tlb[idx];
1767    tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
1768#if defined(TARGET_MIPS64)
1769    tlb->VPN &= env->SEGMask;
1770#endif
1771    tlb->ASID = env->CP0_EntryHi & 0xFF;
1772    tlb->PageMask = env->CP0_PageMask;
1773    tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
1774    tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
1775    tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
1776    tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
1777    tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
1778    tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
1779    tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
1780    tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
1781    tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
1782}
1783
1784void r4k_helper_ptw_tlbrefill(CPUMIPSState *env)
1785{
1786   /* Do TLB load on behalf of Page Table Walk */
1787    int r = cpu_mips_get_random(env);
1788    r4k_invalidate_tlb_shadow(env, r);
1789    r4k_fill_tlb(env, r);
1790}
1791
1792void r4k_helper_tlbwi (CPUMIPSState *env)
1793{
1794    r4k_tlb_t *tlb;
1795    target_ulong tag;
1796    target_ulong VPN;
1797    target_ulong mask;
1798
1799    /* If tlbwi is trying to upgrading access permissions on current entry,
1800     * we do not need to flush tlb hash table.
1801     */
1802    tlb = &env->tlb->mmu.r4k.tlb[env->CP0_Index % env->tlb->nb_tlb];
1803    mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1804    tag = env->CP0_EntryHi & ~mask;
1805    VPN = tlb->VPN & ~mask;
1806    if (VPN == tag)
1807    {
1808        if (tlb->ASID == (env->CP0_EntryHi & 0xFF))
1809        {
1810            tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
1811            tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
1812            tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
1813            tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
1814            tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
1815            tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
1816            tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
1817            tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
1818            return;
1819        }
1820    }
1821
1822    /*flush all the tlb cache */
1823    cpu_mips_tlb_flush (env, 1);
1824
1825    r4k_invalidate_tlb(env, env->CP0_Index % env->tlb->nb_tlb);
1826    r4k_fill_tlb(env, env->CP0_Index % env->tlb->nb_tlb);
1827}
1828
1829void r4k_helper_tlbwr (CPUMIPSState *env)
1830{
1831    int r = cpu_mips_get_random(env);
1832
1833    r4k_invalidate_tlb_shadow(env, r);
1834    r4k_fill_tlb(env, r);
1835}
1836
1837void r4k_helper_tlbp(CPUMIPSState *env)
1838{
1839    r4k_tlb_t *tlb;
1840    target_ulong mask;
1841    target_ulong tag;
1842    target_ulong VPN;
1843    uint8_t ASID;
1844    int i;
1845
1846    ASID = env->CP0_EntryHi & 0xFF;
1847    for (i = 0; i < env->tlb->nb_tlb; i++) {
1848        tlb = &env->tlb->mmu.r4k.tlb[i];
1849        /* 1k pages are not supported. */
1850        mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1851        tag = env->CP0_EntryHi & ~mask;
1852        VPN = tlb->VPN & ~mask;
1853        /* Check ASID, virtual page number & size */
1854        if (unlikely((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag)) {
1855            /* TLB match */
1856            env->CP0_Index = i;
1857            break;
1858        }
1859    }
1860    if (i == env->tlb->nb_tlb) {
1861        /* No match.  Discard any shadow entries, if any of them match. */
1862        int index = ((env->CP0_EntryHi>>5)&0x1ff00) | ASID;
1863        index |= (env->CP0_EntryHi>>13)&0x20000;
1864        env->CP0_Index |= 0x80000000;
1865    }
1866}
1867
1868void r4k_helper_tlbr(CPUMIPSState *env)
1869{
1870    r4k_tlb_t *tlb;
1871    uint8_t ASID;
1872
1873    ASID = env->CP0_EntryHi & 0xFF;
1874    tlb = &env->tlb->mmu.r4k.tlb[env->CP0_Index % env->tlb->nb_tlb];
1875
1876    /* If this will change the current ASID, flush qemu's TLB.  */
1877    if (ASID != tlb->ASID)
1878        cpu_mips_tlb_flush (env, 1);
1879
1880    /*flush all the tlb cache */
1881    cpu_mips_tlb_flush (env, 1);
1882
1883    env->CP0_EntryHi = tlb->VPN | tlb->ASID;
1884    env->CP0_PageMask = tlb->PageMask;
1885    env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
1886                        (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
1887    env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
1888                        (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
1889}
1890
1891void helper_tlbwi(CPUMIPSState *env)
1892{
1893    env->tlb->helper_tlbwi(env);
1894}
1895
1896void helper_tlbwr(CPUMIPSState *env)
1897{
1898    env->tlb->helper_tlbwr(env);
1899}
1900
1901void helper_tlbp(CPUMIPSState *env)
1902{
1903    env->tlb->helper_tlbp(env);
1904}
1905
1906void helper_tlbr(CPUMIPSState *env)
1907{
1908    env->tlb->helper_tlbr(env);
1909}
1910
1911/* Specials */
1912target_ulong helper_di(CPUMIPSState *env)
1913{
1914    target_ulong t0 = env->CP0_Status;
1915
1916    env->CP0_Status = t0 & ~(1 << CP0St_IE);
1917    cpu_mips_update_irq(env);
1918
1919    return t0;
1920}
1921
1922target_ulong helper_ei(CPUMIPSState *env)
1923{
1924    target_ulong t0 = env->CP0_Status;
1925
1926    env->CP0_Status = t0 | (1 << CP0St_IE);
1927    cpu_mips_update_irq(env);
1928
1929    return t0;
1930}
1931
1932static void debug_pre_eret(CPUMIPSState *env)
1933{
1934    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1935        qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1936                env->active_tc.PC, env->CP0_EPC);
1937        if (env->CP0_Status & (1 << CP0St_ERL))
1938            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1939        if (env->hflags & MIPS_HFLAG_DM)
1940            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1941        qemu_log("\n");
1942    }
1943}
1944
1945static void debug_post_eret(CPUMIPSState *env)
1946{
1947    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1948        qemu_log("  =>  PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1949                env->active_tc.PC, env->CP0_EPC);
1950        if (env->CP0_Status & (1 << CP0St_ERL))
1951            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1952        if (env->hflags & MIPS_HFLAG_DM)
1953            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1954        switch (env->hflags & MIPS_HFLAG_KSU) {
1955        case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1956        case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1957        case MIPS_HFLAG_KM: qemu_log("\n"); break;
1958        default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1959        }
1960    }
1961}
1962
1963void helper_eret (CPUMIPSState *env)
1964{
1965    debug_pre_eret(env);
1966    if (env->CP0_Status & (1 << CP0St_ERL)) {
1967        env->active_tc.PC = env->CP0_ErrorEPC;
1968        env->CP0_Status &= ~(1 << CP0St_ERL);
1969    } else {
1970        env->active_tc.PC = env->CP0_EPC;
1971        env->CP0_Status &= ~(1 << CP0St_EXL);
1972    }
1973    compute_hflags(env);
1974    debug_post_eret(env);
1975    env->lladdr = 1;
1976}
1977
1978void helper_deret (CPUMIPSState *env)
1979{
1980    debug_pre_eret(env);
1981    env->active_tc.PC = env->CP0_DEPC;
1982    env->hflags &= MIPS_HFLAG_DM;
1983    compute_hflags(env);
1984    debug_post_eret(env);
1985    env->lladdr = 1;
1986}
1987#endif /* !CONFIG_USER_ONLY */
1988
1989target_ulong helper_rdhwr_cpunum(CPUMIPSState *env)
1990{
1991    if ((env->hflags & MIPS_HFLAG_CP0) ||
1992        (env->CP0_HWREna & (1 << 0)))
1993        return env->CP0_EBase & 0x3ff;
1994    else
1995        helper_raise_exception(env, EXCP_RI);
1996
1997    return 0;
1998}
1999
2000target_ulong helper_rdhwr_synci_step(CPUMIPSState *env)
2001{
2002    if ((env->hflags & MIPS_HFLAG_CP0) ||
2003        (env->CP0_HWREna & (1 << 1)))
2004        return env->SYNCI_Step;
2005    else
2006        helper_raise_exception(env, EXCP_RI);
2007
2008    return 0;
2009}
2010
2011target_ulong helper_rdhwr_cc(CPUMIPSState *env)
2012{
2013    if ((env->hflags & MIPS_HFLAG_CP0) ||
2014        (env->CP0_HWREna & (1 << 2)))
2015        return env->CP0_Count;
2016    else
2017        helper_raise_exception(env, EXCP_RI);
2018
2019    return 0;
2020}
2021
2022target_ulong helper_rdhwr_ccres(CPUMIPSState *env)
2023{
2024    if ((env->hflags & MIPS_HFLAG_CP0) ||
2025        (env->CP0_HWREna & (1 << 3)))
2026        return env->CCRes;
2027    else
2028        helper_raise_exception(env, EXCP_RI);
2029
2030    return 0;
2031}
2032
2033void helper_pmon(CPUMIPSState *env, int function)
2034{
2035    function /= 2;
2036    switch (function) {
2037    case 2: /* TODO: char inbyte(int waitflag); */
2038        if (env->active_tc.gpr[4] == 0)
2039            env->active_tc.gpr[2] = -1;
2040        /* Fall through */
2041    case 11: /* TODO: char inbyte (void); */
2042        env->active_tc.gpr[2] = -1;
2043        break;
2044    case 3:
2045    case 12:
2046        printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
2047        break;
2048    case 17:
2049        break;
2050    case 158:
2051        {
2052            unsigned char *fmt = (void *)(unsigned long)env->active_tc.gpr[4];
2053            printf("%s", fmt);
2054        }
2055        break;
2056    }
2057}
2058
2059void helper_wait(CPUMIPSState *env)
2060{
2061    ENV_GET_CPU(env)->halted = 1;
2062    helper_raise_exception(env, EXCP_HLT);
2063}
2064
2065#if !defined(CONFIG_USER_ONLY)
2066
2067static void do_unaligned_access (CPUMIPSState *env,
2068                                 target_ulong addr, int is_write,
2069                                 int is_user, uintptr_t retaddr);
2070
2071#define MMUSUFFIX _mmu
2072#define ALIGNED_ONLY
2073
2074#define SHIFT 0
2075#include "exec/softmmu_template.h"
2076
2077#define SHIFT 1
2078#include "exec/softmmu_template.h"
2079
2080#define SHIFT 2
2081#include "exec/softmmu_template.h"
2082
2083#define SHIFT 3
2084#include "exec/softmmu_template.h"
2085
2086static void do_unaligned_access(CPUMIPSState *env, target_ulong addr,
2087                                int is_write, int is_user, uintptr_t retaddr)
2088{
2089    env->CP0_BadVAddr = addr;
2090    do_restore_state (env, retaddr);
2091    helper_raise_exception(env, (is_write == 1) ? EXCP_AdES : EXCP_AdEL);
2092}
2093
2094void tlb_fill (CPUMIPSState* env, target_ulong addr, int is_write, int mmu_idx,
2095               uintptr_t retaddr)
2096{
2097    TranslationBlock *tb;
2098    int ret;
2099
2100    ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx);
2101    if (ret) {
2102        if (retaddr) {
2103            /* now we have a real cpu fault */
2104            tb = tb_find_pc(retaddr);
2105            if (tb) {
2106                /* the PC is inside the translated code. It means that we have
2107                   a virtual CPU fault */
2108                cpu_restore_state(env, retaddr);
2109            }
2110        }
2111        helper_raise_exception_err(env, env->exception_index, env->error_code);
2112    }
2113}
2114
2115void cpu_unassigned_access(CPUMIPSState* env, hwaddr addr,
2116                           int is_write, int is_exec, int unused, int size)
2117{
2118    if (is_exec)
2119        helper_raise_exception(env, EXCP_IBE);
2120    else
2121        helper_raise_exception(env, EXCP_DBE);
2122}
2123/*
2124 * The following functions are address translation helper functions
2125 * for fast memory access in QEMU.
2126 */
2127static unsigned long v2p_mmu(CPUMIPSState *env, target_ulong addr, int is_user)
2128{
2129    int index;
2130    target_ulong tlb_addr;
2131    hwaddr physaddr;
2132    uintptr_t retaddr;
2133
2134    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2135redo:
2136    tlb_addr = env->tlb_table[is_user][index].addr_read;
2137    if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
2138        physaddr = addr + env->tlb_table[is_user][index].addend;
2139    } else {
2140        /* the page is not in the TLB : fill it */
2141        retaddr = GETPC();
2142        tlb_fill(env, addr, 0, is_user, retaddr);
2143        goto redo;
2144    }
2145    return physaddr;
2146}
2147
2148/*
2149 * translation from virtual address of simulated OS
2150 * to the address of simulation host (not the physical
2151 * address of simulated OS.
2152 */
2153unsigned long v2p(target_ulong ptr, int is_user)
2154{
2155    CPUMIPSState *env;
2156    int index;
2157    target_ulong addr;
2158    hwaddr physaddr;
2159
2160    env = cpu_single_env;
2161    addr = ptr;
2162    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2163    if (__builtin_expect(env->tlb_table[is_user][index].addr_read !=
2164                (addr & TARGET_PAGE_MASK), 0)) {
2165        physaddr = v2p_mmu(env, addr, is_user);
2166    } else {
2167        physaddr = addr + env->tlb_table[is_user][index].addend;
2168    }
2169    return physaddr;
2170}
2171
2172/* copy a string from the simulated virtual space to a buffer in QEMU */
2173void vstrcpy(target_ulong ptr, char *buf, int max)
2174{
2175    char *phys = 0;
2176    target_ulong page = 0;
2177
2178    if (buf == NULL) return;
2179
2180    while (max) {
2181        if ((ptr & TARGET_PAGE_MASK) != page) {
2182            phys = (char *)v2p(ptr, 0);
2183            page = ptr & TARGET_PAGE_MASK;
2184        }
2185        *buf = *phys;
2186        if (*phys == '\0')
2187            return;
2188        ptr ++;
2189        buf ++;
2190        phys ++;
2191        max --;
2192    }
2193}
2194
2195#endif /* !CONFIG_USER_ONLY */
2196
2197/* Complex FPU operations which may need stack space. */
2198
2199#define FLOAT_ONE32 make_float32(0x3f8 << 20)
2200#define FLOAT_ONE64 make_float64(0x3ffULL << 52)
2201#define FLOAT_TWO32 make_float32(1 << 30)
2202#define FLOAT_TWO64 make_float64(1ULL << 62)
2203#define FLOAT_QNAN32 0x7fbfffff
2204#define FLOAT_QNAN64 0x7ff7ffffffffffffULL
2205#define FLOAT_SNAN32 0x7fffffff
2206#define FLOAT_SNAN64 0x7fffffffffffffffULL
2207
2208/* convert MIPS rounding mode in FCR31 to IEEE library */
2209static unsigned int ieee_rm[] = {
2210    float_round_nearest_even,
2211    float_round_to_zero,
2212    float_round_up,
2213    float_round_down
2214};
2215
2216#define RESTORE_ROUNDING_MODE \
2217    set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
2218
2219#define RESTORE_FLUSH_MODE \
2220    set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, &env->active_fpu.fp_status);
2221
2222target_ulong helper_cfc1 (CPUMIPSState *env, uint32_t reg)
2223{
2224    target_ulong arg1;
2225
2226    switch (reg) {
2227    case 0:
2228        arg1 = (int32_t)env->active_fpu.fcr0;
2229        break;
2230    case 25:
2231        arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
2232        break;
2233    case 26:
2234        arg1 = env->active_fpu.fcr31 & 0x0003f07c;
2235        break;
2236    case 28:
2237        arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
2238        break;
2239    default:
2240        arg1 = (int32_t)env->active_fpu.fcr31;
2241        break;
2242    }
2243
2244    return arg1;
2245}
2246
2247void helper_ctc1(CPUMIPSState *env, target_ulong arg1, uint32_t reg)
2248{
2249    switch(reg) {
2250    case 25:
2251        if (arg1 & 0xffffff00)
2252            return;
2253        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) |
2254                     ((arg1 & 0x1) << 23);
2255        break;
2256    case 26:
2257        if (arg1 & 0x007c0000)
2258            return;
2259        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c);
2260        break;
2261    case 28:
2262        if (arg1 & 0x007c0000)
2263            return;
2264        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) |
2265                     ((arg1 & 0x4) << 22);
2266        break;
2267    case 31:
2268        if (arg1 & 0x007c0000)
2269            return;
2270        env->active_fpu.fcr31 = arg1;
2271        break;
2272    default:
2273        return;
2274    }
2275    /* set rounding mode */
2276    RESTORE_ROUNDING_MODE;
2277    /* set flush-to-zero mode */
2278    RESTORE_FLUSH_MODE;
2279    set_float_exception_flags(0, &env->active_fpu.fp_status);
2280    if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
2281        helper_raise_exception(env, EXCP_FPE);
2282}
2283
2284static inline char ieee_ex_to_mips(char xcpt)
2285{
2286    return (xcpt & float_flag_inexact) >> 5 |
2287           (xcpt & float_flag_underflow) >> 3 |
2288           (xcpt & float_flag_overflow) >> 1 |
2289           (xcpt & float_flag_divbyzero) << 1 |
2290           (xcpt & float_flag_invalid) << 4;
2291}
2292
2293static inline char mips_ex_to_ieee(char xcpt)
2294{
2295    return (xcpt & FP_INEXACT) << 5 |
2296           (xcpt & FP_UNDERFLOW) << 3 |
2297           (xcpt & FP_OVERFLOW) << 1 |
2298           (xcpt & FP_DIV0) >> 1 |
2299           (xcpt & FP_INVALID) >> 4;
2300}
2301
2302static inline void update_fcr31(CPUMIPSState *env)
2303{
2304    int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
2305
2306    SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
2307    if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp)
2308        helper_raise_exception(env, EXCP_FPE);
2309    else
2310        UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
2311}
2312
2313/* Float support.
2314   Single precition routines have a "s" suffix, double precision a
2315   "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2316   paired single lower "pl", paired single upper "pu".  */
2317
2318/* unary operations, modifying fp status  */
2319uint64_t helper_float_sqrt_d(CPUMIPSState *env, uint64_t fdt0)
2320{
2321    return float64_sqrt(fdt0, &env->active_fpu.fp_status);
2322}
2323
2324uint32_t helper_float_sqrt_s(CPUMIPSState *env, uint32_t fst0)
2325{
2326    return float32_sqrt(fst0, &env->active_fpu.fp_status);
2327}
2328
2329uint64_t helper_float_cvtd_s(CPUMIPSState *env, uint32_t fst0)
2330{
2331    uint64_t fdt2;
2332
2333    set_float_exception_flags(0, &env->active_fpu.fp_status);
2334    fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
2335    update_fcr31(env);
2336    return fdt2;
2337}
2338
2339uint64_t helper_float_cvtd_w(CPUMIPSState *env, uint32_t wt0)
2340{
2341    uint64_t fdt2;
2342
2343    set_float_exception_flags(0, &env->active_fpu.fp_status);
2344    fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
2345    update_fcr31(env);
2346    return fdt2;
2347}
2348
2349uint64_t helper_float_cvtd_l(CPUMIPSState *env, uint64_t dt0)
2350{
2351    uint64_t fdt2;
2352
2353    set_float_exception_flags(0, &env->active_fpu.fp_status);
2354    fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
2355    update_fcr31(env);
2356    return fdt2;
2357}
2358
2359uint64_t helper_float_cvtl_d(CPUMIPSState *env, uint64_t fdt0)
2360{
2361    uint64_t dt2;
2362
2363    set_float_exception_flags(0, &env->active_fpu.fp_status);
2364    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2365    update_fcr31(env);
2366    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2367        dt2 = FLOAT_SNAN64;
2368    return dt2;
2369}
2370
2371uint64_t helper_float_cvtl_s(CPUMIPSState *env, uint32_t fst0)
2372{
2373    uint64_t dt2;
2374
2375    set_float_exception_flags(0, &env->active_fpu.fp_status);
2376    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2377    update_fcr31(env);
2378    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2379        dt2 = FLOAT_SNAN64;
2380    return dt2;
2381}
2382
2383uint64_t helper_float_cvtps_pw(CPUMIPSState *env, uint64_t dt0)
2384{
2385    uint32_t fst2;
2386    uint32_t fsth2;
2387
2388    set_float_exception_flags(0, &env->active_fpu.fp_status);
2389    fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2390    fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
2391    update_fcr31(env);
2392    return ((uint64_t)fsth2 << 32) | fst2;
2393}
2394
2395uint64_t helper_float_cvtpw_ps(CPUMIPSState *env, uint64_t fdt0)
2396{
2397    uint32_t wt2;
2398    uint32_t wth2;
2399
2400    set_float_exception_flags(0, &env->active_fpu.fp_status);
2401    wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2402    wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
2403    update_fcr31(env);
2404    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID)) {
2405        wt2 = FLOAT_SNAN32;
2406        wth2 = FLOAT_SNAN32;
2407    }
2408    return ((uint64_t)wth2 << 32) | wt2;
2409}
2410
2411uint32_t helper_float_cvts_d(CPUMIPSState *env, uint64_t fdt0)
2412{
2413    uint32_t fst2;
2414
2415    set_float_exception_flags(0, &env->active_fpu.fp_status);
2416    fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
2417    update_fcr31(env);
2418    return fst2;
2419}
2420
2421uint32_t helper_float_cvts_w(CPUMIPSState *env, uint32_t wt0)
2422{
2423    uint32_t fst2;
2424
2425    set_float_exception_flags(0, &env->active_fpu.fp_status);
2426    fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
2427    update_fcr31(env);
2428    return fst2;
2429}
2430
2431uint32_t helper_float_cvts_l(CPUMIPSState *env, uint64_t dt0)
2432{
2433    uint32_t fst2;
2434
2435    set_float_exception_flags(0, &env->active_fpu.fp_status);
2436    fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
2437    update_fcr31(env);
2438    return fst2;
2439}
2440
2441uint32_t helper_float_cvts_pl(CPUMIPSState *env, uint32_t wt0)
2442{
2443    uint32_t wt2;
2444
2445    set_float_exception_flags(0, &env->active_fpu.fp_status);
2446    wt2 = wt0;
2447    update_fcr31(env);
2448    return wt2;
2449}
2450
2451uint32_t helper_float_cvts_pu(CPUMIPSState *env, uint32_t wth0)
2452{
2453    uint32_t wt2;
2454
2455    set_float_exception_flags(0, &env->active_fpu.fp_status);
2456    wt2 = wth0;
2457    update_fcr31(env);
2458    return wt2;
2459}
2460
2461uint32_t helper_float_cvtw_s(CPUMIPSState *env, uint32_t fst0)
2462{
2463    uint32_t wt2;
2464
2465    set_float_exception_flags(0, &env->active_fpu.fp_status);
2466    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2467    update_fcr31(env);
2468    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2469        wt2 = FLOAT_SNAN32;
2470    return wt2;
2471}
2472
2473uint32_t helper_float_cvtw_d(CPUMIPSState *env, uint64_t fdt0)
2474{
2475    uint32_t wt2;
2476
2477    set_float_exception_flags(0, &env->active_fpu.fp_status);
2478    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2479    update_fcr31(env);
2480    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2481        wt2 = FLOAT_SNAN32;
2482    return wt2;
2483}
2484
2485uint64_t helper_float_roundl_d(CPUMIPSState *env, uint64_t fdt0)
2486{
2487    uint64_t dt2;
2488
2489    set_float_exception_flags(0, &env->active_fpu.fp_status);
2490    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2491    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2492    RESTORE_ROUNDING_MODE;
2493    update_fcr31(env);
2494    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2495        dt2 = FLOAT_SNAN64;
2496    return dt2;
2497}
2498
2499uint64_t helper_float_roundl_s(CPUMIPSState *env, uint32_t fst0)
2500{
2501    uint64_t dt2;
2502
2503    set_float_exception_flags(0, &env->active_fpu.fp_status);
2504    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2505    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2506    RESTORE_ROUNDING_MODE;
2507    update_fcr31(env);
2508    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2509        dt2 = FLOAT_SNAN64;
2510    return dt2;
2511}
2512
2513uint32_t helper_float_roundw_d(CPUMIPSState *env, uint64_t fdt0)
2514{
2515    uint32_t wt2;
2516
2517    set_float_exception_flags(0, &env->active_fpu.fp_status);
2518    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2519    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2520    RESTORE_ROUNDING_MODE;
2521    update_fcr31(env);
2522    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2523        wt2 = FLOAT_SNAN32;
2524    return wt2;
2525}
2526
2527uint32_t helper_float_roundw_s(CPUMIPSState *env, uint32_t fst0)
2528{
2529    uint32_t wt2;
2530
2531    set_float_exception_flags(0, &env->active_fpu.fp_status);
2532    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2533    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2534    RESTORE_ROUNDING_MODE;
2535    update_fcr31(env);
2536    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2537        wt2 = FLOAT_SNAN32;
2538    return wt2;
2539}
2540
2541uint64_t helper_float_truncl_d(CPUMIPSState *env, uint64_t fdt0)
2542{
2543    uint64_t dt2;
2544
2545    set_float_exception_flags(0, &env->active_fpu.fp_status);
2546    dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
2547    update_fcr31(env);
2548    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2549        dt2 = FLOAT_SNAN64;
2550    return dt2;
2551}
2552
2553uint64_t helper_float_truncl_s(CPUMIPSState *env, uint32_t fst0)
2554{
2555    uint64_t dt2;
2556
2557    set_float_exception_flags(0, &env->active_fpu.fp_status);
2558    dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
2559    update_fcr31(env);
2560    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2561        dt2 = FLOAT_SNAN64;
2562    return dt2;
2563}
2564
2565uint32_t helper_float_truncw_d(CPUMIPSState *env, uint64_t fdt0)
2566{
2567    uint32_t wt2;
2568
2569    set_float_exception_flags(0, &env->active_fpu.fp_status);
2570    wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
2571    update_fcr31(env);
2572    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2573        wt2 = FLOAT_SNAN32;
2574    return wt2;
2575}
2576
2577uint32_t helper_float_truncw_s(CPUMIPSState *env, uint32_t fst0)
2578{
2579    uint32_t wt2;
2580
2581    set_float_exception_flags(0, &env->active_fpu.fp_status);
2582    wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
2583    update_fcr31(env);
2584    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2585        wt2 = FLOAT_SNAN32;
2586    return wt2;
2587}
2588
2589uint64_t helper_float_ceill_d(CPUMIPSState *env, uint64_t fdt0)
2590{
2591    uint64_t dt2;
2592
2593    set_float_exception_flags(0, &env->active_fpu.fp_status);
2594    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2595    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2596    RESTORE_ROUNDING_MODE;
2597    update_fcr31(env);
2598    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2599        dt2 = FLOAT_SNAN64;
2600    return dt2;
2601}
2602
2603uint64_t helper_float_ceill_s(CPUMIPSState *env, uint32_t fst0)
2604{
2605    uint64_t dt2;
2606
2607    set_float_exception_flags(0, &env->active_fpu.fp_status);
2608    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2609    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2610    RESTORE_ROUNDING_MODE;
2611    update_fcr31(env);
2612    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2613        dt2 = FLOAT_SNAN64;
2614    return dt2;
2615}
2616
2617uint32_t helper_float_ceilw_d(CPUMIPSState *env, uint64_t fdt0)
2618{
2619    uint32_t wt2;
2620
2621    set_float_exception_flags(0, &env->active_fpu.fp_status);
2622    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2623    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2624    RESTORE_ROUNDING_MODE;
2625    update_fcr31(env);
2626    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2627        wt2 = FLOAT_SNAN32;
2628    return wt2;
2629}
2630
2631uint32_t helper_float_ceilw_s(CPUMIPSState *env, uint32_t fst0)
2632{
2633    uint32_t wt2;
2634
2635    set_float_exception_flags(0, &env->active_fpu.fp_status);
2636    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2637    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2638    RESTORE_ROUNDING_MODE;
2639    update_fcr31(env);
2640    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2641        wt2 = FLOAT_SNAN32;
2642    return wt2;
2643}
2644
2645uint64_t helper_float_floorl_d(CPUMIPSState *env, uint64_t fdt0)
2646{
2647    uint64_t dt2;
2648
2649    set_float_exception_flags(0, &env->active_fpu.fp_status);
2650    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2651    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2652    RESTORE_ROUNDING_MODE;
2653    update_fcr31(env);
2654    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2655        dt2 = FLOAT_SNAN64;
2656    return dt2;
2657}
2658
2659uint64_t helper_float_floorl_s(CPUMIPSState *env, uint32_t fst0)
2660{
2661    uint64_t dt2;
2662
2663    set_float_exception_flags(0, &env->active_fpu.fp_status);
2664    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2665    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2666    RESTORE_ROUNDING_MODE;
2667    update_fcr31(env);
2668    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2669        dt2 = FLOAT_SNAN64;
2670    return dt2;
2671}
2672
2673uint32_t helper_float_floorw_d(CPUMIPSState *env, uint64_t fdt0)
2674{
2675    uint32_t wt2;
2676
2677    set_float_exception_flags(0, &env->active_fpu.fp_status);
2678    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2679    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2680    RESTORE_ROUNDING_MODE;
2681    update_fcr31(env);
2682    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2683        wt2 = FLOAT_SNAN32;
2684    return wt2;
2685}
2686
2687uint32_t helper_float_floorw_s(CPUMIPSState *env, uint32_t fst0)
2688{
2689    uint32_t wt2;
2690
2691    set_float_exception_flags(0, &env->active_fpu.fp_status);
2692    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2693    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2694    RESTORE_ROUNDING_MODE;
2695    update_fcr31(env);
2696    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2697        wt2 = FLOAT_SNAN32;
2698    return wt2;
2699}
2700
2701/* unary operations, not modifying fp status  */
2702#define FLOAT_UNOP(name)                                       \
2703uint64_t helper_float_ ## name ## _d(uint64_t fdt0)                \
2704{                                                              \
2705    return float64_ ## name(fdt0);                             \
2706}                                                              \
2707uint32_t helper_float_ ## name ## _s(uint32_t fst0)                \
2708{                                                              \
2709    return float32_ ## name(fst0);                             \
2710}                                                              \
2711uint64_t helper_float_ ## name ## _ps(uint64_t fdt0)               \
2712{                                                              \
2713    uint32_t wt0;                                              \
2714    uint32_t wth0;                                             \
2715                                                               \
2716    wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF);                 \
2717    wth0 = float32_ ## name(fdt0 >> 32);                       \
2718    return ((uint64_t)wth0 << 32) | wt0;                       \
2719}
2720FLOAT_UNOP(abs)
2721FLOAT_UNOP(chs)
2722#undef FLOAT_UNOP
2723
2724/* MIPS specific unary operations */
2725uint64_t helper_float_recip_d(CPUMIPSState *env, uint64_t fdt0)
2726{
2727    uint64_t fdt2;
2728
2729    set_float_exception_flags(0, &env->active_fpu.fp_status);
2730    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2731    update_fcr31(env);
2732    return fdt2;
2733}
2734
2735uint32_t helper_float_recip_s(CPUMIPSState *env, uint32_t fst0)
2736{
2737    uint32_t fst2;
2738
2739    set_float_exception_flags(0, &env->active_fpu.fp_status);
2740    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2741    update_fcr31(env);
2742    return fst2;
2743}
2744
2745uint64_t helper_float_rsqrt_d(CPUMIPSState *env, uint64_t fdt0)
2746{
2747    uint64_t fdt2;
2748
2749    set_float_exception_flags(0, &env->active_fpu.fp_status);
2750    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2751    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2752    update_fcr31(env);
2753    return fdt2;
2754}
2755
2756uint32_t helper_float_rsqrt_s(CPUMIPSState *env, uint32_t fst0)
2757{
2758    uint32_t fst2;
2759
2760    set_float_exception_flags(0, &env->active_fpu.fp_status);
2761    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2762    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2763    update_fcr31(env);
2764    return fst2;
2765}
2766
2767uint64_t helper_float_recip1_d(CPUMIPSState *env, uint64_t fdt0)
2768{
2769    uint64_t fdt2;
2770
2771    set_float_exception_flags(0, &env->active_fpu.fp_status);
2772    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2773    update_fcr31(env);
2774    return fdt2;
2775}
2776
2777uint32_t helper_float_recip1_s(CPUMIPSState *env, uint32_t fst0)
2778{
2779    uint32_t fst2;
2780
2781    set_float_exception_flags(0, &env->active_fpu.fp_status);
2782    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2783    update_fcr31(env);
2784    return fst2;
2785}
2786
2787uint64_t helper_float_recip1_ps(CPUMIPSState *env, uint64_t fdt0)
2788{
2789    uint32_t fst2;
2790    uint32_t fsth2;
2791
2792    set_float_exception_flags(0, &env->active_fpu.fp_status);
2793    fst2 = float32_div(FLOAT_ONE32, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2794    fsth2 = float32_div(FLOAT_ONE32, fdt0 >> 32, &env->active_fpu.fp_status);
2795    update_fcr31(env);
2796    return ((uint64_t)fsth2 << 32) | fst2;
2797}
2798
2799uint64_t helper_float_rsqrt1_d(CPUMIPSState *env, uint64_t fdt0)
2800{
2801    uint64_t fdt2;
2802
2803    set_float_exception_flags(0, &env->active_fpu.fp_status);
2804    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2805    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2806    update_fcr31(env);
2807    return fdt2;
2808}
2809
2810uint32_t helper_float_rsqrt1_s(CPUMIPSState *env, uint32_t fst0)
2811{
2812    uint32_t fst2;
2813
2814    set_float_exception_flags(0, &env->active_fpu.fp_status);
2815    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2816    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2817    update_fcr31(env);
2818    return fst2;
2819}
2820
2821uint64_t helper_float_rsqrt1_ps(CPUMIPSState *env, uint64_t fdt0)
2822{
2823    uint32_t fst2;
2824    uint32_t fsth2;
2825
2826    set_float_exception_flags(0, &env->active_fpu.fp_status);
2827    fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2828    fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
2829    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2830    fsth2 = float32_div(FLOAT_ONE32, fsth2, &env->active_fpu.fp_status);
2831    update_fcr31(env);
2832    return ((uint64_t)fsth2 << 32) | fst2;
2833}
2834
2835#define FLOAT_OP(name, p) void helper_float_##name##_##p(CPUMIPSState *env)
2836
2837/* binary operations */
2838#define FLOAT_BINOP(name)                                          \
2839uint64_t helper_float_ ## name ## _d(CPUMIPSState *env,            \
2840                                     uint64_t fdt0, uint64_t fdt1) \
2841{                                                                  \
2842    uint64_t dt2;                                                  \
2843                                                                   \
2844    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2845    dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status);     \
2846    update_fcr31(env);                                                \
2847    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
2848        dt2 = FLOAT_QNAN64;                                        \
2849    return dt2;                                                    \
2850}                                                                  \
2851                                                                   \
2852uint32_t helper_float_ ## name ## _s(CPUMIPSState *env,            \
2853                                     uint32_t fst0, uint32_t fst1) \
2854{                                                                  \
2855    uint32_t wt2;                                                  \
2856                                                                   \
2857    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2858    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
2859    update_fcr31(env);                                                \
2860    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
2861        wt2 = FLOAT_QNAN32;                                        \
2862    return wt2;                                                    \
2863}                                                                  \
2864                                                                   \
2865uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env,           \
2866                                      uint64_t fdt0,               \
2867                                      uint64_t fdt1)               \
2868{                                                                  \
2869    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                             \
2870    uint32_t fsth0 = fdt0 >> 32;                                   \
2871    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                             \
2872    uint32_t fsth1 = fdt1 >> 32;                                   \
2873    uint32_t wt2;                                                  \
2874    uint32_t wth2;                                                 \
2875                                                                   \
2876    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2877    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
2878    wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status);  \
2879    update_fcr31(env);                                                \
2880    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) {              \
2881        wt2 = FLOAT_QNAN32;                                        \
2882        wth2 = FLOAT_QNAN32;                                       \
2883    }                                                              \
2884    return ((uint64_t)wth2 << 32) | wt2;                           \
2885}
2886
2887FLOAT_BINOP(add)
2888FLOAT_BINOP(sub)
2889FLOAT_BINOP(mul)
2890FLOAT_BINOP(div)
2891#undef FLOAT_BINOP
2892
2893/* ternary operations */
2894#define FLOAT_TERNOP(name1, name2)                                        \
2895uint64_t helper_float_ ## name1 ## name2 ## _d(CPUMIPSState *env,         \
2896                                           uint64_t fdt0, uint64_t fdt1,  \
2897                                           uint64_t fdt2)                 \
2898{                                                                         \
2899    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
2900    return float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
2901}                                                                         \
2902                                                                          \
2903uint32_t helper_float_ ## name1 ## name2 ## _s(CPUMIPSState *env,         \
2904                                           uint32_t fst0, uint32_t fst1,  \
2905                                           uint32_t fst2)                 \
2906{                                                                         \
2907    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2908    return float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2909}                                                                         \
2910                                                                          \
2911uint64_t helper_float_ ## name1 ## name2 ## _ps(CPUMIPSState *env,        \
2912                                            uint64_t fdt0, uint64_t fdt1, \
2913                                            uint64_t fdt2)                \
2914{                                                                         \
2915    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
2916    uint32_t fsth0 = fdt0 >> 32;                                          \
2917    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
2918    uint32_t fsth1 = fdt1 >> 32;                                          \
2919    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
2920    uint32_t fsth2 = fdt2 >> 32;                                          \
2921                                                                          \
2922    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2923    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
2924    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2925    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
2926    return ((uint64_t)fsth2 << 32) | fst2;                                \
2927}
2928
2929FLOAT_TERNOP(mul, add)
2930FLOAT_TERNOP(mul, sub)
2931#undef FLOAT_TERNOP
2932
2933/* negated ternary operations */
2934#define FLOAT_NTERNOP(name1, name2)                                       \
2935uint64_t helper_float_n ## name1 ## name2 ## _d(CPUMIPSState *env,        \
2936                                           uint64_t fdt0, uint64_t fdt1, \
2937                                           uint64_t fdt2)                 \
2938{                                                                         \
2939    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
2940    fdt2 = float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
2941    return float64_chs(fdt2);                                             \
2942}                                                                         \
2943                                                                          \
2944uint32_t helper_float_n ## name1 ## name2 ## _s(CPUMIPSState *env,        \
2945                                           uint32_t fst0, uint32_t fst1, \
2946                                           uint32_t fst2)                 \
2947{                                                                         \
2948    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2949    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2950    return float32_chs(fst2);                                             \
2951}                                                                         \
2952                                                                          \
2953uint64_t helper_float_n ## name1 ## name2 ## _ps(CPUMIPSState *env,       \
2954                                           uint64_t fdt0, uint64_t fdt1,\
2955                                           uint64_t fdt2)                 \
2956{                                                                         \
2957    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
2958    uint32_t fsth0 = fdt0 >> 32;                                          \
2959    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
2960    uint32_t fsth1 = fdt1 >> 32;                                          \
2961    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
2962    uint32_t fsth2 = fdt2 >> 32;                                          \
2963                                                                          \
2964    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2965    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
2966    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2967    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
2968    fst2 = float32_chs(fst2);                                             \
2969    fsth2 = float32_chs(fsth2);                                           \
2970    return ((uint64_t)fsth2 << 32) | fst2;                                \
2971}
2972
2973FLOAT_NTERNOP(mul, add)
2974FLOAT_NTERNOP(mul, sub)
2975#undef FLOAT_NTERNOP
2976
2977/* MIPS specific binary operations */
2978uint64_t helper_float_recip2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
2979{
2980    set_float_exception_flags(0, &env->active_fpu.fp_status);
2981    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
2982    fdt2 = float64_chs(float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status));
2983    update_fcr31(env);
2984    return fdt2;
2985}
2986
2987uint32_t helper_float_recip2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2)
2988{
2989    set_float_exception_flags(0, &env->active_fpu.fp_status);
2990    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2991    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
2992    update_fcr31(env);
2993    return fst2;
2994}
2995
2996uint64_t helper_float_recip2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
2997{
2998    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2999    uint32_t fsth0 = fdt0 >> 32;
3000    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
3001    uint32_t fsth2 = fdt2 >> 32;
3002
3003    set_float_exception_flags(0, &env->active_fpu.fp_status);
3004    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3005    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3006    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
3007    fsth2 = float32_chs(float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status));
3008    update_fcr31(env);
3009    return ((uint64_t)fsth2 << 32) | fst2;
3010}
3011
3012uint64_t helper_float_rsqrt2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
3013{
3014    set_float_exception_flags(0, &env->active_fpu.fp_status);
3015    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
3016    fdt2 = float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status);
3017    fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
3018    update_fcr31(env);
3019    return fdt2;
3020}
3021
3022uint32_t helper_float_rsqrt2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2)
3023{
3024    set_float_exception_flags(0, &env->active_fpu.fp_status);
3025    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3026    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
3027    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
3028    update_fcr31(env);
3029    return fst2;
3030}
3031
3032uint64_t helper_float_rsqrt2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
3033{
3034    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3035    uint32_t fsth0 = fdt0 >> 32;
3036    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
3037    uint32_t fsth2 = fdt2 >> 32;
3038
3039    set_float_exception_flags(0, &env->active_fpu.fp_status);
3040    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3041    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3042    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
3043    fsth2 = float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status);
3044    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
3045    fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
3046    update_fcr31(env);
3047    return ((uint64_t)fsth2 << 32) | fst2;
3048}
3049
3050uint64_t helper_float_addr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1)
3051{
3052    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3053    uint32_t fsth0 = fdt0 >> 32;
3054    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3055    uint32_t fsth1 = fdt1 >> 32;
3056    uint32_t fst2;
3057    uint32_t fsth2;
3058
3059    set_float_exception_flags(0, &env->active_fpu.fp_status);
3060    fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
3061    fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
3062    update_fcr31(env);
3063    return ((uint64_t)fsth2 << 32) | fst2;
3064}
3065
3066uint64_t helper_float_mulr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1)
3067{
3068    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3069    uint32_t fsth0 = fdt0 >> 32;
3070    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3071    uint32_t fsth1 = fdt1 >> 32;
3072    uint32_t fst2;
3073    uint32_t fsth2;
3074
3075    set_float_exception_flags(0, &env->active_fpu.fp_status);
3076    fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
3077    fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
3078    update_fcr31(env);
3079    return ((uint64_t)fsth2 << 32) | fst2;
3080}
3081
3082/* compare operations */
3083#define FOP_COND_D(op, cond)                                   \
3084void helper_cmp_d_ ## op(CPUMIPSState *env, uint64_t fdt0,     \
3085                         uint64_t fdt1, int cc)                \
3086{                                                              \
3087    int c = cond;                                              \
3088    update_fcr31(env);                                            \
3089    if (c)                                                     \
3090        SET_FP_COND(cc, env->active_fpu);                      \
3091    else                                                       \
3092        CLEAR_FP_COND(cc, env->active_fpu);                    \
3093}                                                              \
3094void helper_cmpabs_d_ ## op(CPUMIPSState *env, uint64_t fdt0,  \
3095                            uint64_t fdt1, int cc)             \
3096{                                                              \
3097    int c;                                                     \
3098    fdt0 = float64_abs(fdt0);                                  \
3099    fdt1 = float64_abs(fdt1);                                  \
3100    c = cond;                                                  \
3101    update_fcr31(env);                                            \
3102    if (c)                                                     \
3103        SET_FP_COND(cc, env->active_fpu);                      \
3104    else                                                       \
3105        CLEAR_FP_COND(cc, env->active_fpu);                    \
3106}
3107
3108static int float64_is_unordered(int sig, float64 a, float64 b STATUS_PARAM)
3109{
3110    if (float64_is_signaling_nan(a) ||
3111        float64_is_signaling_nan(b) ||
3112        (sig && (float64_is_any_nan(a) || float64_is_any_nan(b)))) {
3113        float_raise(float_flag_invalid, status);
3114        return 1;
3115    } else if (float64_is_any_nan(a) || float64_is_any_nan(b)) {
3116        return 1;
3117    } else {
3118        return 0;
3119    }
3120}
3121
3122/* NOTE: the comma operator will make "cond" to eval to false,
3123 * but float*_is_unordered() is still called. */
3124FOP_COND_D(f,   (float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status), 0))
3125FOP_COND_D(un,  float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status))
3126FOP_COND_D(eq,  !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3127FOP_COND_D(ueq, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3128FOP_COND_D(olt, !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3129FOP_COND_D(ult, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3130FOP_COND_D(ole, !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3131FOP_COND_D(ule, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3132/* NOTE: the comma operator will make "cond" to eval to false,
3133 * but float*_is_unordered() is still called. */
3134FOP_COND_D(sf,  (float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status), 0))
3135FOP_COND_D(ngle,float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status))
3136FOP_COND_D(seq, !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3137FOP_COND_D(ngl, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3138FOP_COND_D(lt,  !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3139FOP_COND_D(nge, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3140FOP_COND_D(le,  !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3141FOP_COND_D(ngt, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3142
3143#define FOP_COND_S(op, cond)                                   \
3144void helper_cmp_s_ ## op(CPUMIPSState *env, uint32_t fst0,     \
3145                         uint32_t fst1, int cc)                \
3146{                                                              \
3147    int c = cond;                                              \
3148    update_fcr31(env);                                            \
3149    if (c)                                                     \
3150        SET_FP_COND(cc, env->active_fpu);                      \
3151    else                                                       \
3152        CLEAR_FP_COND(cc, env->active_fpu);                    \
3153}                                                              \
3154void helper_cmpabs_s_ ## op(CPUMIPSState *env, uint32_t fst0,  \
3155                            uint32_t fst1, int cc)             \
3156{                                                              \
3157    int c;                                                     \
3158    fst0 = float32_abs(fst0);                                  \
3159    fst1 = float32_abs(fst1);                                  \
3160    c = cond;                                                  \
3161    update_fcr31(env);                                            \
3162    if (c)                                                     \
3163        SET_FP_COND(cc, env->active_fpu);                      \
3164    else                                                       \
3165        CLEAR_FP_COND(cc, env->active_fpu);                    \
3166}
3167
3168static flag float32_is_unordered(int sig, float32 a, float32 b STATUS_PARAM)
3169{
3170    if (float32_is_signaling_nan(a) ||
3171        float32_is_signaling_nan(b) ||
3172        (sig && (float32_is_any_nan(a) || float32_is_any_nan(b)))) {
3173        float_raise(float_flag_invalid, status);
3174        return 1;
3175    } else if (float32_is_any_nan(a) || float32_is_any_nan(b)) {
3176        return 1;
3177    } else {
3178        return 0;
3179    }
3180}
3181
3182/* NOTE: the comma operator will make "cond" to eval to false,
3183 * but float*_is_unordered() is still called. */
3184FOP_COND_S(f,   (float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status), 0))
3185FOP_COND_S(un,  float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status))
3186FOP_COND_S(eq,  !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3187FOP_COND_S(ueq, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3188FOP_COND_S(olt, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3189FOP_COND_S(ult, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3190FOP_COND_S(ole, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_le(fst0, fst1, &env->active_fpu.fp_status))
3191FOP_COND_S(ule, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_le(fst0, fst1, &env->active_fpu.fp_status))
3192/* NOTE: the comma operator will make "cond" to eval to false,
3193 * but float*_is_unordered() is still called. */
3194FOP_COND_S(sf,  (float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status), 0))
3195FOP_COND_S(ngle,float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status))
3196FOP_COND_S(seq, !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3197FOP_COND_S(ngl, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3198FOP_COND_S(lt,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3199FOP_COND_S(nge, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3200FOP_COND_S(le,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_le(fst0, fst1, &env->active_fpu.fp_status))
3201FOP_COND_S(ngt, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_le(fst0, fst1, &env->active_fpu.fp_status))
3202
3203#define FOP_COND_PS(op, condl, condh)                           \
3204void helper_cmp_ps_ ## op(CPUMIPSState *env, uint64_t fdt0,     \
3205                          uint64_t fdt1, int cc)                \
3206{                                                               \
3207    uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF);             \
3208    uint32_t fsth0 = float32_abs(fdt0 >> 32);                   \
3209    uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF);             \
3210    uint32_t fsth1 = float32_abs(fdt1 >> 32);                   \
3211    int cl = condl;                                             \
3212    int ch = condh;                                             \
3213                                                                \
3214    update_fcr31(env);                                             \
3215    if (cl)                                                     \
3216        SET_FP_COND(cc, env->active_fpu);                       \
3217    else                                                        \
3218        CLEAR_FP_COND(cc, env->active_fpu);                     \
3219    if (ch)                                                     \
3220        SET_FP_COND(cc + 1, env->active_fpu);                   \
3221    else                                                        \
3222        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
3223}                                                               \
3224void helper_cmpabs_ps_ ## op(CPUMIPSState *env, uint64_t fdt0,  \
3225                             uint64_t fdt1, int cc)             \
3226{                                                               \
3227    uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF);             \
3228    uint32_t fsth0 = float32_abs(fdt0 >> 32);                   \
3229    uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF);             \
3230    uint32_t fsth1 = float32_abs(fdt1 >> 32);                   \
3231    int cl = condl;                                             \
3232    int ch = condh;                                             \
3233                                                                \
3234    update_fcr31(env);                                             \
3235    if (cl)                                                     \
3236        SET_FP_COND(cc, env->active_fpu);                       \
3237    else                                                        \
3238        CLEAR_FP_COND(cc, env->active_fpu);                     \
3239    if (ch)                                                     \
3240        SET_FP_COND(cc + 1, env->active_fpu);                   \
3241    else                                                        \
3242        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
3243}
3244
3245/* NOTE: the comma operator will make "cond" to eval to false,
3246 * but float*_is_unordered() is still called. */
3247FOP_COND_PS(f,   (float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status), 0),
3248                 (float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status), 0))
3249FOP_COND_PS(un,  float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status),
3250                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status))
3251FOP_COND_PS(eq,  !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3252                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3253FOP_COND_PS(ueq, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3254                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3255FOP_COND_PS(olt, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3256                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3257FOP_COND_PS(ult, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3258                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3259FOP_COND_PS(ole, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_le(fst0, fst1, &env->active_fpu.fp_status),
3260                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3261FOP_COND_PS(ule, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_le(fst0, fst1, &env->active_fpu.fp_status),
3262                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3263/* NOTE: the comma operator will make "cond" to eval to false,
3264 * but float*_is_unordered() is still called. */
3265FOP_COND_PS(sf,  (float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status), 0),
3266                 (float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status), 0))
3267FOP_COND_PS(ngle,float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status),
3268                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status))
3269FOP_COND_PS(seq, !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3270                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3271FOP_COND_PS(ngl, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3272                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3273FOP_COND_PS(lt,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3274                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3275FOP_COND_PS(nge, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3276                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3277FOP_COND_PS(le,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_le(fst0, fst1, &env->active_fpu.fp_status),
3278                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3279FOP_COND_PS(ngt, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_le(fst0, fst1, &env->active_fpu.fp_status),
3280                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3281