1/*
2 *  MIPS emulation helpers for qemu.
3 *
4 *  Copyright (c) 2004-2005 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19#include <stdlib.h>
20#include "exec.h"
21
22#include "host-utils.h"
23
24#include "helper.h"
25/*****************************************************************************/
26/* Exceptions processing helpers */
27
28void helper_raise_exception_err (uint32_t exception, int error_code)
29{
30#if 1
31    if (exception < 0x100)
32        qemu_log("%s: %d %d\n", __func__, exception, error_code);
33#endif
34    env->exception_index = exception;
35    env->error_code = error_code;
36    cpu_loop_exit();
37}
38
39void helper_raise_exception (uint32_t exception)
40{
41    helper_raise_exception_err(exception, 0);
42}
43
44void helper_interrupt_restart (void)
45{
46    if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
47        !(env->CP0_Status & (1 << CP0St_ERL)) &&
48        !(env->hflags & MIPS_HFLAG_DM) &&
49        (env->CP0_Status & (1 << CP0St_IE)) &&
50        (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask)) {
51        env->CP0_Cause &= ~(0x1f << CP0Ca_EC);
52        helper_raise_exception(EXCP_EXT_INTERRUPT);
53    }
54}
55
56#if !defined(CONFIG_USER_ONLY)
57static void do_restore_state (void *pc_ptr)
58{
59    TranslationBlock *tb;
60    unsigned long pc = (unsigned long) pc_ptr;
61
62    tb = tb_find_pc (pc);
63    if (tb) {
64        cpu_restore_state (tb, env, pc);
65    }
66}
67#endif
68
69#if defined(CONFIG_USER_ONLY)
70#define HELPER_LD(name, insn, type)                                     \
71static inline type do_##name(target_ulong addr, int mem_idx)            \
72{                                                                       \
73    return (type) insn##_raw(addr);                                     \
74}
75#else
76#define HELPER_LD(name, insn, type)                                     \
77static inline type do_##name(target_ulong addr, int mem_idx)            \
78{                                                                       \
79    switch (mem_idx)                                                    \
80    {                                                                   \
81    case 0: return (type) insn##_kernel(addr); break;                   \
82    case 1: return (type) insn##_super(addr); break;                    \
83    default:                                                            \
84    case 2: return (type) insn##_user(addr); break;                     \
85    }                                                                   \
86}
87#endif
88HELPER_LD(lbu, ldub, uint8_t)
89HELPER_LD(lw, ldl, int32_t)
90#ifdef TARGET_MIPS64
91HELPER_LD(ld, ldq, int64_t)
92#endif
93#undef HELPER_LD
94
95#if defined(CONFIG_USER_ONLY)
96#define HELPER_ST(name, insn, type)                                     \
97static inline void do_##name(target_ulong addr, type val, int mem_idx)  \
98{                                                                       \
99    insn##_raw(addr, val);                                              \
100}
101#else
102#define HELPER_ST(name, insn, type)                                     \
103static inline void do_##name(target_ulong addr, type val, int mem_idx)  \
104{                                                                       \
105    switch (mem_idx)                                                    \
106    {                                                                   \
107    case 0: insn##_kernel(addr, val); break;                            \
108    case 1: insn##_super(addr, val); break;                             \
109    default:                                                            \
110    case 2: insn##_user(addr, val); break;                              \
111    }                                                                   \
112}
113#endif
114HELPER_ST(sb, stb, uint8_t)
115HELPER_ST(sw, stl, uint32_t)
116#ifdef TARGET_MIPS64
117HELPER_ST(sd, stq, uint64_t)
118#endif
119#undef HELPER_ST
120
121target_ulong helper_clo (target_ulong arg1)
122{
123    return clo32(arg1);
124}
125
126target_ulong helper_clz (target_ulong arg1)
127{
128    return clz32(arg1);
129}
130
131#if defined(TARGET_MIPS64)
132target_ulong helper_dclo (target_ulong arg1)
133{
134    return clo64(arg1);
135}
136
137target_ulong helper_dclz (target_ulong arg1)
138{
139    return clz64(arg1);
140}
141#endif /* TARGET_MIPS64 */
142
143/* 64 bits arithmetic for 32 bits hosts */
144static inline uint64_t get_HILO (void)
145{
146    return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
147}
148
149static inline void set_HILO (uint64_t HILO)
150{
151    env->active_tc.LO[0] = (int32_t)HILO;
152    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
153}
154
155static inline void set_HIT0_LO (target_ulong arg1, uint64_t HILO)
156{
157    env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
158    arg1 = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
159}
160
161static inline void set_HI_LOT0 (target_ulong arg1, uint64_t HILO)
162{
163    arg1 = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
164    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
165}
166
167/* Multiplication variants of the vr54xx. */
168target_ulong helper_muls (target_ulong arg1, target_ulong arg2)
169{
170    set_HI_LOT0(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
171
172    return arg1;
173}
174
175target_ulong helper_mulsu (target_ulong arg1, target_ulong arg2)
176{
177    set_HI_LOT0(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
178
179    return arg1;
180}
181
182target_ulong helper_macc (target_ulong arg1, target_ulong arg2)
183{
184    set_HI_LOT0(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
185
186    return arg1;
187}
188
189target_ulong helper_macchi (target_ulong arg1, target_ulong arg2)
190{
191    set_HIT0_LO(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
192
193    return arg1;
194}
195
196target_ulong helper_maccu (target_ulong arg1, target_ulong arg2)
197{
198    set_HI_LOT0(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
199
200    return arg1;
201}
202
203target_ulong helper_macchiu (target_ulong arg1, target_ulong arg2)
204{
205    set_HIT0_LO(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
206
207    return arg1;
208}
209
210target_ulong helper_msac (target_ulong arg1, target_ulong arg2)
211{
212    set_HI_LOT0(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
213
214    return arg1;
215}
216
217target_ulong helper_msachi (target_ulong arg1, target_ulong arg2)
218{
219    set_HIT0_LO(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
220
221    return arg1;
222}
223
224target_ulong helper_msacu (target_ulong arg1, target_ulong arg2)
225{
226    set_HI_LOT0(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
227
228    return arg1;
229}
230
231target_ulong helper_msachiu (target_ulong arg1, target_ulong arg2)
232{
233    set_HIT0_LO(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
234
235    return arg1;
236}
237
238target_ulong helper_mulhi (target_ulong arg1, target_ulong arg2)
239{
240    set_HIT0_LO(arg1, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
241
242    return arg1;
243}
244
245target_ulong helper_mulhiu (target_ulong arg1, target_ulong arg2)
246{
247    set_HIT0_LO(arg1, (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
248
249    return arg1;
250}
251
252target_ulong helper_mulshi (target_ulong arg1, target_ulong arg2)
253{
254    set_HIT0_LO(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
255
256    return arg1;
257}
258
259target_ulong helper_mulshiu (target_ulong arg1, target_ulong arg2)
260{
261    set_HIT0_LO(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
262
263    return arg1;
264}
265
266#ifdef TARGET_MIPS64
267void helper_dmult (target_ulong arg1, target_ulong arg2)
268{
269    muls64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
270}
271
272void helper_dmultu (target_ulong arg1, target_ulong arg2)
273{
274    mulu64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
275}
276#endif
277
278#ifndef CONFIG_USER_ONLY
279
280static inline target_phys_addr_t do_translate_address(target_ulong address, int rw)
281{
282    target_phys_addr_t lladdr;
283
284    lladdr = cpu_mips_translate_address(env, address, rw);
285
286    if (lladdr == -1LL) {
287        cpu_loop_exit();
288    } else {
289        return lladdr;
290    }
291}
292
293#define HELPER_LD_ATOMIC(name, insn)                                          \
294target_ulong helper_##name(target_ulong arg, int mem_idx)                     \
295{                                                                             \
296    env->lladdr = do_translate_address(arg, 0);                               \
297    env->llval = do_##insn(arg, mem_idx);                                     \
298    return env->llval;                                                        \
299}
300HELPER_LD_ATOMIC(ll, lw)
301#ifdef TARGET_MIPS64
302HELPER_LD_ATOMIC(lld, ld)
303#endif
304#undef HELPER_LD_ATOMIC
305
306#define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask)                      \
307target_ulong helper_##name(target_ulong arg1, target_ulong arg2, int mem_idx) \
308{                                                                             \
309    target_long tmp;                                                          \
310                                                                              \
311    if (arg2 & almask) {                                                      \
312        env->CP0_BadVAddr = arg2;                                             \
313        helper_raise_exception(EXCP_AdES);                                    \
314    }                                                                         \
315    if (do_translate_address(arg2, 1) == env->lladdr) {                       \
316        tmp = do_##ld_insn(arg2, mem_idx);                                    \
317        if (tmp == env->llval) {                                              \
318            do_##st_insn(arg2, arg1, mem_idx);                                \
319            return 1;                                                         \
320        }                                                                     \
321    }                                                                         \
322    return 0;                                                                 \
323}
324HELPER_ST_ATOMIC(sc, lw, sw, 0x3)
325#ifdef TARGET_MIPS64
326HELPER_ST_ATOMIC(scd, ld, sd, 0x7)
327#endif
328#undef HELPER_ST_ATOMIC
329#endif
330
331#ifdef TARGET_WORDS_BIGENDIAN
332#define GET_LMASK(v) ((v) & 3)
333#define GET_OFFSET(addr, offset) (addr + (offset))
334#else
335#define GET_LMASK(v) (((v) & 3) ^ 3)
336#define GET_OFFSET(addr, offset) (addr - (offset))
337#endif
338
339target_ulong helper_lwl(target_ulong arg1, target_ulong arg2, int mem_idx)
340{
341    target_ulong tmp;
342
343    tmp = do_lbu(arg2, mem_idx);
344    arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
345
346    if (GET_LMASK(arg2) <= 2) {
347        tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
348        arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
349    }
350
351    if (GET_LMASK(arg2) <= 1) {
352        tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
353        arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
354    }
355
356    if (GET_LMASK(arg2) == 0) {
357        tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
358        arg1 = (arg1 & 0xFFFFFF00) | tmp;
359    }
360    return (int32_t)arg1;
361}
362
363target_ulong helper_lwr(target_ulong arg1, target_ulong arg2, int mem_idx)
364{
365    target_ulong tmp;
366
367    tmp = do_lbu(arg2, mem_idx);
368    arg1 = (arg1 & 0xFFFFFF00) | tmp;
369
370    if (GET_LMASK(arg2) >= 1) {
371        tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
372        arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
373    }
374
375    if (GET_LMASK(arg2) >= 2) {
376        tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
377        arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
378    }
379
380    if (GET_LMASK(arg2) == 3) {
381        tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
382        arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
383    }
384    return (int32_t)arg1;
385}
386
387void helper_swl(target_ulong arg1, target_ulong arg2, int mem_idx)
388{
389    do_sb(arg2, (uint8_t)(arg1 >> 24), mem_idx);
390
391    if (GET_LMASK(arg2) <= 2)
392        do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx);
393
394    if (GET_LMASK(arg2) <= 1)
395        do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx);
396
397    if (GET_LMASK(arg2) == 0)
398        do_sb(GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx);
399}
400
401void helper_swr(target_ulong arg1, target_ulong arg2, int mem_idx)
402{
403    do_sb(arg2, (uint8_t)arg1, mem_idx);
404
405    if (GET_LMASK(arg2) >= 1)
406        do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
407
408    if (GET_LMASK(arg2) >= 2)
409        do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
410
411    if (GET_LMASK(arg2) == 3)
412        do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
413}
414
415#if defined(TARGET_MIPS64)
416/* "half" load and stores.  We must do the memory access inline,
417   or fault handling won't work.  */
418
419#ifdef TARGET_WORDS_BIGENDIAN
420#define GET_LMASK64(v) ((v) & 7)
421#else
422#define GET_LMASK64(v) (((v) & 7) ^ 7)
423#endif
424
425target_ulong helper_ldl(target_ulong arg1, target_ulong arg2, int mem_idx)
426{
427    uint64_t tmp;
428
429    tmp = do_lbu(arg2, mem_idx);
430    arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
431
432    if (GET_LMASK64(arg2) <= 6) {
433        tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
434        arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
435    }
436
437    if (GET_LMASK64(arg2) <= 5) {
438        tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
439        arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
440    }
441
442    if (GET_LMASK64(arg2) <= 4) {
443        tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
444        arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
445    }
446
447    if (GET_LMASK64(arg2) <= 3) {
448        tmp = do_lbu(GET_OFFSET(arg2, 4), mem_idx);
449        arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
450    }
451
452    if (GET_LMASK64(arg2) <= 2) {
453        tmp = do_lbu(GET_OFFSET(arg2, 5), mem_idx);
454        arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
455    }
456
457    if (GET_LMASK64(arg2) <= 1) {
458        tmp = do_lbu(GET_OFFSET(arg2, 6), mem_idx);
459        arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
460    }
461
462    if (GET_LMASK64(arg2) == 0) {
463        tmp = do_lbu(GET_OFFSET(arg2, 7), mem_idx);
464        arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
465    }
466
467    return arg1;
468}
469
470target_ulong helper_ldr(target_ulong arg1, target_ulong arg2, int mem_idx)
471{
472    uint64_t tmp;
473
474    tmp = do_lbu(arg2, mem_idx);
475    arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
476
477    if (GET_LMASK64(arg2) >= 1) {
478        tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
479        arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp  << 8);
480    }
481
482    if (GET_LMASK64(arg2) >= 2) {
483        tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
484        arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
485    }
486
487    if (GET_LMASK64(arg2) >= 3) {
488        tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
489        arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
490    }
491
492    if (GET_LMASK64(arg2) >= 4) {
493        tmp = do_lbu(GET_OFFSET(arg2, -4), mem_idx);
494        arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
495    }
496
497    if (GET_LMASK64(arg2) >= 5) {
498        tmp = do_lbu(GET_OFFSET(arg2, -5), mem_idx);
499        arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
500    }
501
502    if (GET_LMASK64(arg2) >= 6) {
503        tmp = do_lbu(GET_OFFSET(arg2, -6), mem_idx);
504        arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
505    }
506
507    if (GET_LMASK64(arg2) == 7) {
508        tmp = do_lbu(GET_OFFSET(arg2, -7), mem_idx);
509        arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
510    }
511
512    return arg1;
513}
514
515void helper_sdl(target_ulong arg1, target_ulong arg2, int mem_idx)
516{
517    do_sb(arg2, (uint8_t)(arg1 >> 56), mem_idx);
518
519    if (GET_LMASK64(arg2) <= 6)
520        do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx);
521
522    if (GET_LMASK64(arg2) <= 5)
523        do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx);
524
525    if (GET_LMASK64(arg2) <= 4)
526        do_sb(GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx);
527
528    if (GET_LMASK64(arg2) <= 3)
529        do_sb(GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx);
530
531    if (GET_LMASK64(arg2) <= 2)
532        do_sb(GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx);
533
534    if (GET_LMASK64(arg2) <= 1)
535        do_sb(GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx);
536
537    if (GET_LMASK64(arg2) <= 0)
538        do_sb(GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx);
539}
540
541void helper_sdr(target_ulong arg1, target_ulong arg2, int mem_idx)
542{
543    do_sb(arg2, (uint8_t)arg1, mem_idx);
544
545    if (GET_LMASK64(arg2) >= 1)
546        do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
547
548    if (GET_LMASK64(arg2) >= 2)
549        do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
550
551    if (GET_LMASK64(arg2) >= 3)
552        do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
553
554    if (GET_LMASK64(arg2) >= 4)
555        do_sb(GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx);
556
557    if (GET_LMASK64(arg2) >= 5)
558        do_sb(GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx);
559
560    if (GET_LMASK64(arg2) >= 6)
561        do_sb(GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx);
562
563    if (GET_LMASK64(arg2) == 7)
564        do_sb(GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx);
565}
566#endif /* TARGET_MIPS64 */
567
568#ifndef CONFIG_USER_ONLY
569/* CP0 helpers */
570target_ulong helper_mfc0_mvpcontrol (void)
571{
572    return env->mvp->CP0_MVPControl;
573}
574
575target_ulong helper_mfc0_mvpconf0 (void)
576{
577    return env->mvp->CP0_MVPConf0;
578}
579
580target_ulong helper_mfc0_mvpconf1 (void)
581{
582    return env->mvp->CP0_MVPConf1;
583}
584
585target_ulong helper_mfc0_random (void)
586{
587    return (int32_t)cpu_mips_get_random(env);
588}
589
590target_ulong helper_mfc0_tcstatus (void)
591{
592    return env->active_tc.CP0_TCStatus;
593}
594
595target_ulong helper_mftc0_tcstatus(void)
596{
597    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
598
599    if (other_tc == env->current_tc)
600        return env->active_tc.CP0_TCStatus;
601    else
602        return env->tcs[other_tc].CP0_TCStatus;
603}
604
605target_ulong helper_mfc0_tcbind (void)
606{
607    return env->active_tc.CP0_TCBind;
608}
609
610target_ulong helper_mftc0_tcbind(void)
611{
612    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
613
614    if (other_tc == env->current_tc)
615        return env->active_tc.CP0_TCBind;
616    else
617        return env->tcs[other_tc].CP0_TCBind;
618}
619
620target_ulong helper_mfc0_tcrestart (void)
621{
622    return env->active_tc.PC;
623}
624
625target_ulong helper_mftc0_tcrestart(void)
626{
627    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
628
629    if (other_tc == env->current_tc)
630        return env->active_tc.PC;
631    else
632        return env->tcs[other_tc].PC;
633}
634
635target_ulong helper_mfc0_tchalt (void)
636{
637    return env->active_tc.CP0_TCHalt;
638}
639
640target_ulong helper_mftc0_tchalt(void)
641{
642    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
643
644    if (other_tc == env->current_tc)
645        return env->active_tc.CP0_TCHalt;
646    else
647        return env->tcs[other_tc].CP0_TCHalt;
648}
649
650target_ulong helper_mfc0_tccontext (void)
651{
652    return env->active_tc.CP0_TCContext;
653}
654
655target_ulong helper_mftc0_tccontext(void)
656{
657    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
658
659    if (other_tc == env->current_tc)
660        return env->active_tc.CP0_TCContext;
661    else
662        return env->tcs[other_tc].CP0_TCContext;
663}
664
665target_ulong helper_mfc0_tcschedule (void)
666{
667    return env->active_tc.CP0_TCSchedule;
668}
669
670target_ulong helper_mftc0_tcschedule(void)
671{
672    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
673
674    if (other_tc == env->current_tc)
675        return env->active_tc.CP0_TCSchedule;
676    else
677        return env->tcs[other_tc].CP0_TCSchedule;
678}
679
680target_ulong helper_mfc0_tcschefback (void)
681{
682    return env->active_tc.CP0_TCScheFBack;
683}
684
685target_ulong helper_mftc0_tcschefback(void)
686{
687    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
688
689    if (other_tc == env->current_tc)
690        return env->active_tc.CP0_TCScheFBack;
691    else
692        return env->tcs[other_tc].CP0_TCScheFBack;
693}
694
695target_ulong helper_mfc0_count (void)
696{
697    return (int32_t)cpu_mips_get_count(env);
698}
699
700target_ulong helper_mftc0_entryhi(void)
701{
702    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
703    int32_t tcstatus;
704
705    if (other_tc == env->current_tc)
706        tcstatus = env->active_tc.CP0_TCStatus;
707    else
708        tcstatus = env->tcs[other_tc].CP0_TCStatus;
709
710    return (env->CP0_EntryHi & ~0xff) | (tcstatus & 0xff);
711}
712
713target_ulong helper_mftc0_status(void)
714{
715    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
716    target_ulong t0;
717    int32_t tcstatus;
718
719    if (other_tc == env->current_tc)
720        tcstatus = env->active_tc.CP0_TCStatus;
721    else
722        tcstatus = env->tcs[other_tc].CP0_TCStatus;
723
724    t0 = env->CP0_Status & ~0xf1000018;
725    t0 |= tcstatus & (0xf << CP0TCSt_TCU0);
726    t0 |= (tcstatus & (1 << CP0TCSt_TMX)) >> (CP0TCSt_TMX - CP0St_MX);
727    t0 |= (tcstatus & (0x3 << CP0TCSt_TKSU)) >> (CP0TCSt_TKSU - CP0St_KSU);
728
729    return t0;
730}
731
732target_ulong helper_mfc0_lladdr (void)
733{
734    return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift);
735}
736
737target_ulong helper_mfc0_watchlo (uint32_t sel)
738{
739    return (int32_t)env->CP0_WatchLo[sel];
740}
741
742target_ulong helper_mfc0_watchhi (uint32_t sel)
743{
744    return env->CP0_WatchHi[sel];
745}
746
747target_ulong helper_mfc0_debug (void)
748{
749    target_ulong t0 = env->CP0_Debug;
750    if (env->hflags & MIPS_HFLAG_DM)
751        t0 |= 1 << CP0DB_DM;
752
753    return t0;
754}
755
756target_ulong helper_mftc0_debug(void)
757{
758    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
759    int32_t tcstatus;
760
761    if (other_tc == env->current_tc)
762        tcstatus = env->active_tc.CP0_Debug_tcstatus;
763    else
764        tcstatus = env->tcs[other_tc].CP0_Debug_tcstatus;
765
766    /* XXX: Might be wrong, check with EJTAG spec. */
767    return (env->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
768            (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
769}
770
771#if defined(TARGET_MIPS64)
772target_ulong helper_dmfc0_tcrestart (void)
773{
774    return env->active_tc.PC;
775}
776
777target_ulong helper_dmfc0_tchalt (void)
778{
779    return env->active_tc.CP0_TCHalt;
780}
781
782target_ulong helper_dmfc0_tccontext (void)
783{
784    return env->active_tc.CP0_TCContext;
785}
786
787target_ulong helper_dmfc0_tcschedule (void)
788{
789    return env->active_tc.CP0_TCSchedule;
790}
791
792target_ulong helper_dmfc0_tcschefback (void)
793{
794    return env->active_tc.CP0_TCScheFBack;
795}
796
797target_ulong helper_dmfc0_lladdr (void)
798{
799    return env->lladdr >> env->CP0_LLAddr_shift;
800}
801
802target_ulong helper_dmfc0_watchlo (uint32_t sel)
803{
804    return env->CP0_WatchLo[sel];
805}
806#endif /* TARGET_MIPS64 */
807
808void helper_mtc0_index (target_ulong arg1)
809{
810    int num = 1;
811    unsigned int tmp = env->tlb->nb_tlb;
812
813    do {
814        tmp >>= 1;
815        num <<= 1;
816    } while (tmp);
817    env->CP0_Index = (env->CP0_Index & 0x80000000) | (arg1 & (num - 1));
818}
819
820void helper_mtc0_mvpcontrol (target_ulong arg1)
821{
822    uint32_t mask = 0;
823    uint32_t newval;
824
825    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
826        mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
827                (1 << CP0MVPCo_EVP);
828    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
829        mask |= (1 << CP0MVPCo_STLB);
830    newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
831
832    // TODO: Enable/disable shared TLB, enable/disable VPEs.
833
834    env->mvp->CP0_MVPControl = newval;
835}
836
837void helper_mtc0_vpecontrol (target_ulong arg1)
838{
839    uint32_t mask;
840    uint32_t newval;
841
842    mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
843           (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
844    newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
845
846    /* Yield scheduler intercept not implemented. */
847    /* Gating storage scheduler intercept not implemented. */
848
849    // TODO: Enable/disable TCs.
850
851    env->CP0_VPEControl = newval;
852}
853
854void helper_mtc0_vpeconf0 (target_ulong arg1)
855{
856    uint32_t mask = 0;
857    uint32_t newval;
858
859    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
860        if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
861            mask |= (0xff << CP0VPEC0_XTC);
862        mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
863    }
864    newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
865
866    // TODO: TC exclusive handling due to ERL/EXL.
867
868    env->CP0_VPEConf0 = newval;
869}
870
871void helper_mtc0_vpeconf1 (target_ulong arg1)
872{
873    uint32_t mask = 0;
874    uint32_t newval;
875
876    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
877        mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
878                (0xff << CP0VPEC1_NCP1);
879    newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
880
881    /* UDI not implemented. */
882    /* CP2 not implemented. */
883
884    // TODO: Handle FPU (CP1) binding.
885
886    env->CP0_VPEConf1 = newval;
887}
888
889void helper_mtc0_yqmask (target_ulong arg1)
890{
891    /* Yield qualifier inputs not implemented. */
892    env->CP0_YQMask = 0x00000000;
893}
894
895void helper_mtc0_vpeopt (target_ulong arg1)
896{
897    env->CP0_VPEOpt = arg1 & 0x0000ffff;
898}
899
900void helper_mtc0_entrylo0 (target_ulong arg1)
901{
902    /* Large physaddr (PABITS) not implemented */
903    /* 1k pages not implemented */
904    env->CP0_EntryLo0 = arg1 & 0x3FFFFFFF;
905}
906
907void helper_mtc0_tcstatus (target_ulong arg1)
908{
909    uint32_t mask = env->CP0_TCStatus_rw_bitmask;
910    uint32_t newval;
911
912    newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
913
914    // TODO: Sync with CP0_Status.
915
916    env->active_tc.CP0_TCStatus = newval;
917}
918
919void helper_mttc0_tcstatus (target_ulong arg1)
920{
921    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
922
923    // TODO: Sync with CP0_Status.
924
925    if (other_tc == env->current_tc)
926        env->active_tc.CP0_TCStatus = arg1;
927    else
928        env->tcs[other_tc].CP0_TCStatus = arg1;
929}
930
931void helper_mtc0_tcbind (target_ulong arg1)
932{
933    uint32_t mask = (1 << CP0TCBd_TBE);
934    uint32_t newval;
935
936    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
937        mask |= (1 << CP0TCBd_CurVPE);
938    newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
939    env->active_tc.CP0_TCBind = newval;
940}
941
942void helper_mttc0_tcbind (target_ulong arg1)
943{
944    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
945    uint32_t mask = (1 << CP0TCBd_TBE);
946    uint32_t newval;
947
948    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
949        mask |= (1 << CP0TCBd_CurVPE);
950    if (other_tc == env->current_tc) {
951        newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
952        env->active_tc.CP0_TCBind = newval;
953    } else {
954        newval = (env->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
955        env->tcs[other_tc].CP0_TCBind = newval;
956    }
957}
958
959void helper_mtc0_tcrestart (target_ulong arg1)
960{
961    env->active_tc.PC = arg1;
962    env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
963    env->lladdr = 0ULL;
964    /* MIPS16 not implemented. */
965}
966
967void helper_mttc0_tcrestart (target_ulong arg1)
968{
969    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
970
971    if (other_tc == env->current_tc) {
972        env->active_tc.PC = arg1;
973        env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
974        env->lladdr = 0ULL;
975        /* MIPS16 not implemented. */
976    } else {
977        env->tcs[other_tc].PC = arg1;
978        env->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
979        env->lladdr = 0ULL;
980        /* MIPS16 not implemented. */
981    }
982}
983
984void helper_mtc0_tchalt (target_ulong arg1)
985{
986    env->active_tc.CP0_TCHalt = arg1 & 0x1;
987
988    // TODO: Halt TC / Restart (if allocated+active) TC.
989}
990
991void helper_mttc0_tchalt (target_ulong arg1)
992{
993    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
994
995    // TODO: Halt TC / Restart (if allocated+active) TC.
996
997    if (other_tc == env->current_tc)
998        env->active_tc.CP0_TCHalt = arg1;
999    else
1000        env->tcs[other_tc].CP0_TCHalt = arg1;
1001}
1002
1003void helper_mtc0_tccontext (target_ulong arg1)
1004{
1005    env->active_tc.CP0_TCContext = arg1;
1006}
1007
1008void helper_mttc0_tccontext (target_ulong arg1)
1009{
1010    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1011
1012    if (other_tc == env->current_tc)
1013        env->active_tc.CP0_TCContext = arg1;
1014    else
1015        env->tcs[other_tc].CP0_TCContext = arg1;
1016}
1017
1018void helper_mtc0_tcschedule (target_ulong arg1)
1019{
1020    env->active_tc.CP0_TCSchedule = arg1;
1021}
1022
1023void helper_mttc0_tcschedule (target_ulong arg1)
1024{
1025    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1026
1027    if (other_tc == env->current_tc)
1028        env->active_tc.CP0_TCSchedule = arg1;
1029    else
1030        env->tcs[other_tc].CP0_TCSchedule = arg1;
1031}
1032
1033void helper_mtc0_tcschefback (target_ulong arg1)
1034{
1035    env->active_tc.CP0_TCScheFBack = arg1;
1036}
1037
1038void helper_mttc0_tcschefback (target_ulong arg1)
1039{
1040    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1041
1042    if (other_tc == env->current_tc)
1043        env->active_tc.CP0_TCScheFBack = arg1;
1044    else
1045        env->tcs[other_tc].CP0_TCScheFBack = arg1;
1046}
1047
1048void helper_mtc0_entrylo1 (target_ulong arg1)
1049{
1050    /* Large physaddr (PABITS) not implemented */
1051    /* 1k pages not implemented */
1052    env->CP0_EntryLo1 = arg1 & 0x3FFFFFFF;
1053}
1054
1055void helper_mtc0_context (target_ulong arg1)
1056{
1057    env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
1058}
1059
1060void helper_mtc0_pagemask (target_ulong arg1)
1061{
1062    /* 1k pages not implemented */
1063    env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1064}
1065
1066void helper_mtc0_pagegrain (target_ulong arg1)
1067{
1068    /* SmartMIPS not implemented */
1069    /* Large physaddr (PABITS) not implemented */
1070    /* 1k pages not implemented */
1071    env->CP0_PageGrain = 0;
1072}
1073
1074void helper_mtc0_wired (target_ulong arg1)
1075{
1076    env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1077}
1078
1079void helper_mtc0_srsconf0 (target_ulong arg1)
1080{
1081    env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1082}
1083
1084void helper_mtc0_srsconf1 (target_ulong arg1)
1085{
1086    env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1087}
1088
1089void helper_mtc0_srsconf2 (target_ulong arg1)
1090{
1091    env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1092}
1093
1094void helper_mtc0_srsconf3 (target_ulong arg1)
1095{
1096    env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1097}
1098
1099void helper_mtc0_srsconf4 (target_ulong arg1)
1100{
1101    env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1102}
1103
1104void helper_mtc0_hwrena (target_ulong arg1)
1105{
1106    env->CP0_HWREna = arg1 & 0x0000000F;
1107}
1108
1109void helper_mtc0_count (target_ulong arg1)
1110{
1111    cpu_mips_store_count(env, arg1);
1112}
1113
1114void helper_mtc0_entryhi (target_ulong arg1)
1115{
1116    target_ulong old, val;
1117
1118    /* 1k pages not implemented */
1119    val = arg1 & ((TARGET_PAGE_MASK << 1) | 0xFF);
1120#if defined(TARGET_MIPS64)
1121    val &= env->SEGMask;
1122#endif
1123    old = env->CP0_EntryHi;
1124    env->CP0_EntryHi = val;
1125    if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1126        uint32_t tcst = env->active_tc.CP0_TCStatus & ~0xff;
1127        env->active_tc.CP0_TCStatus = tcst | (val & 0xff);
1128    }
1129    /* If the ASID changes, flush qemu's TLB.  */
1130    if ((old & 0xFF) != (val & 0xFF))
1131        cpu_mips_tlb_flush(env, 1);
1132}
1133
1134void helper_mttc0_entryhi(target_ulong arg1)
1135{
1136    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1137    int32_t tcstatus;
1138
1139    env->CP0_EntryHi = (env->CP0_EntryHi & 0xff) | (arg1 & ~0xff);
1140    if (other_tc == env->current_tc) {
1141        tcstatus = (env->active_tc.CP0_TCStatus & ~0xff) | (arg1 & 0xff);
1142        env->active_tc.CP0_TCStatus = tcstatus;
1143    } else {
1144        tcstatus = (env->tcs[other_tc].CP0_TCStatus & ~0xff) | (arg1 & 0xff);
1145        env->tcs[other_tc].CP0_TCStatus = tcstatus;
1146    }
1147}
1148
1149void helper_mtc0_compare (target_ulong arg1)
1150{
1151    cpu_mips_store_compare(env, arg1);
1152}
1153
1154void helper_mtc0_status (target_ulong arg1)
1155{
1156    uint32_t val, old;
1157    uint32_t mask = env->CP0_Status_rw_bitmask;
1158
1159    val = arg1 & mask;
1160    old = env->CP0_Status;
1161    env->CP0_Status = (env->CP0_Status & ~mask) | val;
1162    compute_hflags(env);
1163    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1164        qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1165                old, old & env->CP0_Cause & CP0Ca_IP_mask,
1166                val, val & env->CP0_Cause & CP0Ca_IP_mask,
1167                env->CP0_Cause);
1168        switch (env->hflags & MIPS_HFLAG_KSU) {
1169        case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1170        case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1171        case MIPS_HFLAG_KM: qemu_log("\n"); break;
1172        default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1173        }
1174    }
1175    cpu_mips_update_irq(env);
1176}
1177
1178void helper_mttc0_status(target_ulong arg1)
1179{
1180    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1181    int32_t tcstatus = env->tcs[other_tc].CP0_TCStatus;
1182
1183    env->CP0_Status = arg1 & ~0xf1000018;
1184    tcstatus = (tcstatus & ~(0xf << CP0TCSt_TCU0)) | (arg1 & (0xf << CP0St_CU0));
1185    tcstatus = (tcstatus & ~(1 << CP0TCSt_TMX)) | ((arg1 & (1 << CP0St_MX)) << (CP0TCSt_TMX - CP0St_MX));
1186    tcstatus = (tcstatus & ~(0x3 << CP0TCSt_TKSU)) | ((arg1 & (0x3 << CP0St_KSU)) << (CP0TCSt_TKSU - CP0St_KSU));
1187    if (other_tc == env->current_tc)
1188        env->active_tc.CP0_TCStatus = tcstatus;
1189    else
1190        env->tcs[other_tc].CP0_TCStatus = tcstatus;
1191}
1192
1193void helper_mtc0_intctl (target_ulong arg1)
1194{
1195    /* vectored interrupts not implemented, no performance counters. */
1196    env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000002e0) | (arg1 & 0x000002e0);
1197}
1198
1199void helper_mtc0_srsctl (target_ulong arg1)
1200{
1201    uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1202    env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1203}
1204
1205void helper_mtc0_cause (target_ulong arg1)
1206{
1207    uint32_t mask = 0x00C00300;
1208    uint32_t old = env->CP0_Cause;
1209
1210    if (env->insn_flags & ISA_MIPS32R2)
1211        mask |= 1 << CP0Ca_DC;
1212
1213    env->CP0_Cause = (env->CP0_Cause & ~mask) | (arg1 & mask);
1214
1215    if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
1216        if (env->CP0_Cause & (1 << CP0Ca_DC))
1217            cpu_mips_stop_count(env);
1218        else
1219            cpu_mips_start_count(env);
1220    }
1221
1222    /* Handle the software interrupt as an hardware one, as they
1223       are very similar */
1224    if (arg1 & CP0Ca_IP_mask) {
1225        cpu_mips_update_irq(env);
1226    }
1227}
1228
1229void helper_mtc0_ebase (target_ulong arg1)
1230{
1231    /* vectored interrupts not implemented */
1232    /* Multi-CPU not implemented */
1233    env->CP0_EBase = 0x80000000 | (arg1 & 0x3FFFF000);
1234}
1235
1236void helper_mtc0_config0 (target_ulong arg1)
1237{
1238    env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1239}
1240
1241void helper_mtc0_config2 (target_ulong arg1)
1242{
1243    /* tertiary/secondary caches not implemented */
1244    env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1245}
1246
1247void helper_mtc0_lladdr (target_ulong arg1)
1248{
1249    target_long mask = env->CP0_LLAddr_rw_bitmask;
1250    arg1 = arg1 << env->CP0_LLAddr_shift;
1251    env->lladdr = (env->lladdr & ~mask) | (arg1 & mask);
1252}
1253
1254void helper_mtc0_watchlo (target_ulong arg1, uint32_t sel)
1255{
1256    /* Watch exceptions for instructions, data loads, data stores
1257       not implemented. */
1258    env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1259}
1260
1261void helper_mtc0_watchhi (target_ulong arg1, uint32_t sel)
1262{
1263    env->CP0_WatchHi[sel] = (arg1 & 0x40FF0FF8);
1264    env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1265}
1266
1267void helper_mtc0_xcontext (target_ulong arg1)
1268{
1269    target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1270    env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1271}
1272
1273void helper_mtc0_framemask (target_ulong arg1)
1274{
1275    env->CP0_Framemask = arg1; /* XXX */
1276}
1277
1278void helper_mtc0_debug (target_ulong arg1)
1279{
1280    env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1281    if (arg1 & (1 << CP0DB_DM))
1282        env->hflags |= MIPS_HFLAG_DM;
1283    else
1284        env->hflags &= ~MIPS_HFLAG_DM;
1285}
1286
1287void helper_mttc0_debug(target_ulong arg1)
1288{
1289    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1290    uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1291
1292    /* XXX: Might be wrong, check with EJTAG spec. */
1293    if (other_tc == env->current_tc)
1294        env->active_tc.CP0_Debug_tcstatus = val;
1295    else
1296        env->tcs[other_tc].CP0_Debug_tcstatus = val;
1297    env->CP0_Debug = (env->CP0_Debug & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1298                     (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1299}
1300
1301void helper_mtc0_performance0 (target_ulong arg1)
1302{
1303    env->CP0_Performance0 = arg1 & 0x000007ff;
1304}
1305
1306void helper_mtc0_taglo (target_ulong arg1)
1307{
1308    env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1309}
1310
1311void helper_mtc0_datalo (target_ulong arg1)
1312{
1313    env->CP0_DataLo = arg1; /* XXX */
1314}
1315
1316void helper_mtc0_taghi (target_ulong arg1)
1317{
1318    env->CP0_TagHi = arg1; /* XXX */
1319}
1320
1321void helper_mtc0_datahi (target_ulong arg1)
1322{
1323    env->CP0_DataHi = arg1; /* XXX */
1324}
1325
1326/* MIPS MT functions */
1327target_ulong helper_mftgpr(uint32_t sel)
1328{
1329    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1330
1331    if (other_tc == env->current_tc)
1332        return env->active_tc.gpr[sel];
1333    else
1334        return env->tcs[other_tc].gpr[sel];
1335}
1336
1337target_ulong helper_mftlo(uint32_t sel)
1338{
1339    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1340
1341    if (other_tc == env->current_tc)
1342        return env->active_tc.LO[sel];
1343    else
1344        return env->tcs[other_tc].LO[sel];
1345}
1346
1347target_ulong helper_mfthi(uint32_t sel)
1348{
1349    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1350
1351    if (other_tc == env->current_tc)
1352        return env->active_tc.HI[sel];
1353    else
1354        return env->tcs[other_tc].HI[sel];
1355}
1356
1357target_ulong helper_mftacx(uint32_t sel)
1358{
1359    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1360
1361    if (other_tc == env->current_tc)
1362        return env->active_tc.ACX[sel];
1363    else
1364        return env->tcs[other_tc].ACX[sel];
1365}
1366
1367target_ulong helper_mftdsp(void)
1368{
1369    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1370
1371    if (other_tc == env->current_tc)
1372        return env->active_tc.DSPControl;
1373    else
1374        return env->tcs[other_tc].DSPControl;
1375}
1376
1377void helper_mttgpr(target_ulong arg1, uint32_t sel)
1378{
1379    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1380
1381    if (other_tc == env->current_tc)
1382        env->active_tc.gpr[sel] = arg1;
1383    else
1384        env->tcs[other_tc].gpr[sel] = arg1;
1385}
1386
1387void helper_mttlo(target_ulong arg1, uint32_t sel)
1388{
1389    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1390
1391    if (other_tc == env->current_tc)
1392        env->active_tc.LO[sel] = arg1;
1393    else
1394        env->tcs[other_tc].LO[sel] = arg1;
1395}
1396
1397void helper_mtthi(target_ulong arg1, uint32_t sel)
1398{
1399    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1400
1401    if (other_tc == env->current_tc)
1402        env->active_tc.HI[sel] = arg1;
1403    else
1404        env->tcs[other_tc].HI[sel] = arg1;
1405}
1406
1407void helper_mttacx(target_ulong arg1, uint32_t sel)
1408{
1409    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1410
1411    if (other_tc == env->current_tc)
1412        env->active_tc.ACX[sel] = arg1;
1413    else
1414        env->tcs[other_tc].ACX[sel] = arg1;
1415}
1416
1417void helper_mttdsp(target_ulong arg1)
1418{
1419    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1420
1421    if (other_tc == env->current_tc)
1422        env->active_tc.DSPControl = arg1;
1423    else
1424        env->tcs[other_tc].DSPControl = arg1;
1425}
1426
1427/* MIPS MT functions */
1428target_ulong helper_dmt(target_ulong arg1)
1429{
1430    // TODO
1431    arg1 = 0;
1432    // rt = arg1
1433
1434    return arg1;
1435}
1436
1437target_ulong helper_emt(target_ulong arg1)
1438{
1439    // TODO
1440    arg1 = 0;
1441    // rt = arg1
1442
1443    return arg1;
1444}
1445
1446target_ulong helper_dvpe(target_ulong arg1)
1447{
1448    // TODO
1449    arg1 = 0;
1450    // rt = arg1
1451
1452    return arg1;
1453}
1454
1455target_ulong helper_evpe(target_ulong arg1)
1456{
1457    // TODO
1458    arg1 = 0;
1459    // rt = arg1
1460
1461    return arg1;
1462}
1463#endif /* !CONFIG_USER_ONLY */
1464
1465void helper_fork(target_ulong arg1, target_ulong arg2)
1466{
1467    // arg1 = rt, arg2 = rs
1468    arg1 = 0;
1469    // TODO: store to TC register
1470}
1471
1472target_ulong helper_yield(target_ulong arg1)
1473{
1474    if (arg1 < 0) {
1475        /* No scheduling policy implemented. */
1476        if (arg1 != -2) {
1477            if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1478                env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
1479                env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1480                env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1481                helper_raise_exception(EXCP_THREAD);
1482            }
1483        }
1484    } else if (arg1 == 0) {
1485        if (0 /* TODO: TC underflow */) {
1486            env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1487            helper_raise_exception(EXCP_THREAD);
1488        } else {
1489            // TODO: Deallocate TC
1490        }
1491    } else if (arg1 > 0) {
1492        /* Yield qualifier inputs not implemented. */
1493        env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1494        env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1495        helper_raise_exception(EXCP_THREAD);
1496    }
1497    return env->CP0_YQMask;
1498}
1499
1500#ifndef CONFIG_USER_ONLY
1501static void inline r4k_invalidate_tlb_shadow (CPUState *env, int idx)
1502{
1503    r4k_tlb_t *tlb;
1504    uint8_t ASID = env->CP0_EntryHi & 0xFF;
1505
1506    tlb = &env->tlb->mmu.r4k.tlb[idx];
1507    /* The qemu TLB is flushed when the ASID changes, so no need to
1508    flush these entries again.  */
1509    if (tlb->G == 0 && tlb->ASID != ASID) {
1510        return;
1511    }
1512}
1513
1514static void inline r4k_invalidate_tlb (CPUState *env, int idx)
1515{
1516    r4k_tlb_t *tlb;
1517    target_ulong addr;
1518    target_ulong end;
1519    uint8_t ASID = env->CP0_EntryHi & 0xFF;
1520    target_ulong mask;
1521
1522    tlb = &env->tlb->mmu.r4k.tlb[idx];
1523    /* The qemu TLB is flushed when the ASID changes, so no need to
1524    flush these entries again.  */
1525    if (tlb->G == 0 && tlb->ASID != ASID) {
1526        return;
1527    }
1528
1529    /* 1k pages are not supported. */
1530    mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1531    if (tlb->V0) {
1532        addr = tlb->VPN & ~mask;
1533#if defined(TARGET_MIPS64)
1534        if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
1535            addr |= 0x3FFFFF0000000000ULL;
1536        }
1537#endif
1538        end = addr | (mask >> 1);
1539        while (addr < end) {
1540            tlb_flush_page (env, addr);
1541            addr += TARGET_PAGE_SIZE;
1542        }
1543    }
1544    if (tlb->V1) {
1545        addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1);
1546#if defined(TARGET_MIPS64)
1547        if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
1548            addr |= 0x3FFFFF0000000000ULL;
1549        }
1550#endif
1551        end = addr | mask;
1552        while (addr - 1 < end) {
1553            tlb_flush_page (env, addr);
1554            addr += TARGET_PAGE_SIZE;
1555        }
1556    }
1557}
1558
1559/* TLB management */
1560void cpu_mips_tlb_flush (CPUState *env, int flush_global)
1561{
1562    /* Flush qemu's TLB and discard all shadowed entries.  */
1563    tlb_flush (env, flush_global);
1564}
1565
1566static void r4k_fill_tlb (int idx)
1567{
1568    r4k_tlb_t *tlb;
1569
1570    /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
1571    tlb = &env->tlb->mmu.r4k.tlb[idx];
1572    tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
1573#if defined(TARGET_MIPS64)
1574    tlb->VPN &= env->SEGMask;
1575#endif
1576    tlb->ASID = env->CP0_EntryHi & 0xFF;
1577    tlb->PageMask = env->CP0_PageMask;
1578    tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
1579    tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
1580    tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
1581    tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
1582    tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
1583    tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
1584    tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
1585    tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
1586    tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
1587}
1588
1589void r4k_helper_ptw_tlbrefill(CPUState *target_env)
1590{
1591   CPUState *saved_env;
1592
1593   /* Save current 'env' value */
1594   saved_env = env;
1595   env = target_env;
1596
1597   /* Do TLB load on behalf of Page Table Walk */
1598    int r = cpu_mips_get_random(env);
1599    r4k_invalidate_tlb_shadow(env, r);
1600    r4k_fill_tlb(r);
1601
1602   /* Restore 'env' value */
1603   env = saved_env;
1604}
1605
1606void r4k_helper_tlbwi (void)
1607{
1608    r4k_tlb_t *tlb;
1609    target_ulong tag;
1610    target_ulong VPN;
1611    target_ulong mask;
1612
1613    /* If tlbwi is trying to upgrading access permissions on current entry,
1614     * we do not need to flush tlb hash table.
1615     */
1616    tlb = &env->tlb->mmu.r4k.tlb[env->CP0_Index % env->tlb->nb_tlb];
1617    mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1618    tag = env->CP0_EntryHi & ~mask;
1619    VPN = tlb->VPN & ~mask;
1620    if (VPN == tag)
1621    {
1622        if (tlb->ASID == (env->CP0_EntryHi & 0xFF))
1623        {
1624            tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
1625            tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
1626            tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
1627            tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
1628            tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
1629            tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
1630            tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
1631            tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
1632            return;
1633        }
1634    }
1635
1636    /*flush all the tlb cache */
1637    cpu_mips_tlb_flush (env, 1);
1638
1639    r4k_invalidate_tlb(env, env->CP0_Index % env->tlb->nb_tlb);
1640    r4k_fill_tlb(env->CP0_Index % env->tlb->nb_tlb);
1641}
1642
1643void r4k_helper_tlbwr (void)
1644{
1645    int r = cpu_mips_get_random(env);
1646
1647    r4k_invalidate_tlb_shadow(env, r);
1648    r4k_fill_tlb(r);
1649}
1650
1651void r4k_helper_tlbp (void)
1652{
1653    r4k_tlb_t *tlb;
1654    target_ulong mask;
1655    target_ulong tag;
1656    target_ulong VPN;
1657    uint8_t ASID;
1658    int i;
1659    target_ulong addr;
1660    target_ulong end;
1661
1662    ASID = env->CP0_EntryHi & 0xFF;
1663    for (i = 0; i < env->tlb->nb_tlb; i++) {
1664        tlb = &env->tlb->mmu.r4k.tlb[i];
1665        /* 1k pages are not supported. */
1666        mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1667        tag = env->CP0_EntryHi & ~mask;
1668        VPN = tlb->VPN & ~mask;
1669        /* Check ASID, virtual page number & size */
1670        if (unlikely((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag)) {
1671            /* TLB match */
1672            env->CP0_Index = i;
1673            break;
1674        }
1675    }
1676    if (i == env->tlb->nb_tlb) {
1677        /* No match.  Discard any shadow entries, if any of them match. */
1678        int index = ((env->CP0_EntryHi>>5)&0x1ff00) | ASID;
1679        index |= (env->CP0_EntryHi>>13)&0x20000;
1680        env->CP0_Index |= 0x80000000;
1681    }
1682}
1683
1684void r4k_helper_tlbr (void)
1685{
1686    r4k_tlb_t *tlb;
1687    uint8_t ASID;
1688
1689    ASID = env->CP0_EntryHi & 0xFF;
1690    tlb = &env->tlb->mmu.r4k.tlb[env->CP0_Index % env->tlb->nb_tlb];
1691
1692    /* If this will change the current ASID, flush qemu's TLB.  */
1693    if (ASID != tlb->ASID)
1694        cpu_mips_tlb_flush (env, 1);
1695
1696    /*flush all the tlb cache */
1697    cpu_mips_tlb_flush (env, 1);
1698
1699    env->CP0_EntryHi = tlb->VPN | tlb->ASID;
1700    env->CP0_PageMask = tlb->PageMask;
1701    env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
1702                        (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
1703    env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
1704                        (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
1705}
1706
1707void helper_tlbwi(void)
1708{
1709    env->tlb->helper_tlbwi();
1710}
1711
1712void helper_tlbwr(void)
1713{
1714    env->tlb->helper_tlbwr();
1715}
1716
1717void helper_tlbp(void)
1718{
1719    env->tlb->helper_tlbp();
1720}
1721
1722void helper_tlbr(void)
1723{
1724    env->tlb->helper_tlbr();
1725}
1726
1727/* Specials */
1728target_ulong helper_di (void)
1729{
1730    target_ulong t0 = env->CP0_Status;
1731
1732    env->CP0_Status = t0 & ~(1 << CP0St_IE);
1733    cpu_mips_update_irq(env);
1734
1735    return t0;
1736}
1737
1738target_ulong helper_ei (void)
1739{
1740    target_ulong t0 = env->CP0_Status;
1741
1742    env->CP0_Status = t0 | (1 << CP0St_IE);
1743    cpu_mips_update_irq(env);
1744
1745    return t0;
1746}
1747
1748static void debug_pre_eret (void)
1749{
1750    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1751        qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1752                env->active_tc.PC, env->CP0_EPC);
1753        if (env->CP0_Status & (1 << CP0St_ERL))
1754            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1755        if (env->hflags & MIPS_HFLAG_DM)
1756            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1757        qemu_log("\n");
1758    }
1759}
1760
1761static void debug_post_eret (void)
1762{
1763    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1764        qemu_log("  =>  PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1765                env->active_tc.PC, env->CP0_EPC);
1766        if (env->CP0_Status & (1 << CP0St_ERL))
1767            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1768        if (env->hflags & MIPS_HFLAG_DM)
1769            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1770        switch (env->hflags & MIPS_HFLAG_KSU) {
1771        case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1772        case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1773        case MIPS_HFLAG_KM: qemu_log("\n"); break;
1774        default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1775        }
1776    }
1777}
1778
1779void helper_eret (void)
1780{
1781    debug_pre_eret();
1782    if (env->CP0_Status & (1 << CP0St_ERL)) {
1783        env->active_tc.PC = env->CP0_ErrorEPC;
1784        env->CP0_Status &= ~(1 << CP0St_ERL);
1785    } else {
1786        env->active_tc.PC = env->CP0_EPC;
1787        env->CP0_Status &= ~(1 << CP0St_EXL);
1788    }
1789    compute_hflags(env);
1790    debug_post_eret();
1791    env->lladdr = 1;
1792}
1793
1794void helper_deret (void)
1795{
1796    debug_pre_eret();
1797    env->active_tc.PC = env->CP0_DEPC;
1798    env->hflags &= MIPS_HFLAG_DM;
1799    compute_hflags(env);
1800    debug_post_eret();
1801    env->lladdr = 1;
1802}
1803#endif /* !CONFIG_USER_ONLY */
1804
1805target_ulong helper_rdhwr_cpunum(void)
1806{
1807    if ((env->hflags & MIPS_HFLAG_CP0) ||
1808        (env->CP0_HWREna & (1 << 0)))
1809        return env->CP0_EBase & 0x3ff;
1810    else
1811        helper_raise_exception(EXCP_RI);
1812
1813    return 0;
1814}
1815
1816target_ulong helper_rdhwr_synci_step(void)
1817{
1818    if ((env->hflags & MIPS_HFLAG_CP0) ||
1819        (env->CP0_HWREna & (1 << 1)))
1820        return env->SYNCI_Step;
1821    else
1822        helper_raise_exception(EXCP_RI);
1823
1824    return 0;
1825}
1826
1827target_ulong helper_rdhwr_cc(void)
1828{
1829    if ((env->hflags & MIPS_HFLAG_CP0) ||
1830        (env->CP0_HWREna & (1 << 2)))
1831        return env->CP0_Count;
1832    else
1833        helper_raise_exception(EXCP_RI);
1834
1835    return 0;
1836}
1837
1838target_ulong helper_rdhwr_ccres(void)
1839{
1840    if ((env->hflags & MIPS_HFLAG_CP0) ||
1841        (env->CP0_HWREna & (1 << 3)))
1842        return env->CCRes;
1843    else
1844        helper_raise_exception(EXCP_RI);
1845
1846    return 0;
1847}
1848
1849void helper_pmon (int function)
1850{
1851    function /= 2;
1852    switch (function) {
1853    case 2: /* TODO: char inbyte(int waitflag); */
1854        if (env->active_tc.gpr[4] == 0)
1855            env->active_tc.gpr[2] = -1;
1856        /* Fall through */
1857    case 11: /* TODO: char inbyte (void); */
1858        env->active_tc.gpr[2] = -1;
1859        break;
1860    case 3:
1861    case 12:
1862        printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
1863        break;
1864    case 17:
1865        break;
1866    case 158:
1867        {
1868            unsigned char *fmt = (void *)(unsigned long)env->active_tc.gpr[4];
1869            printf("%s", fmt);
1870        }
1871        break;
1872    }
1873}
1874
1875void helper_wait (void)
1876{
1877    env->halted = 1;
1878    helper_raise_exception(EXCP_HLT);
1879}
1880
1881#if !defined(CONFIG_USER_ONLY)
1882
1883static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
1884
1885#define MMUSUFFIX _mmu
1886#define ALIGNED_ONLY
1887
1888#define SHIFT 0
1889#include "softmmu_template.h"
1890
1891#define SHIFT 1
1892#include "softmmu_template.h"
1893
1894#define SHIFT 2
1895#include "softmmu_template.h"
1896
1897#define SHIFT 3
1898#include "softmmu_template.h"
1899
1900static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
1901{
1902    env->CP0_BadVAddr = addr;
1903    do_restore_state (retaddr);
1904    helper_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
1905}
1906
1907void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1908{
1909    TranslationBlock *tb;
1910    CPUState *saved_env;
1911    unsigned long pc;
1912    int ret;
1913
1914    /* XXX: hack to restore env in all cases, even if not called from
1915       generated code */
1916    saved_env = env;
1917    env = cpu_single_env;
1918    ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1919    if (ret) {
1920        if (retaddr) {
1921            /* now we have a real cpu fault */
1922            pc = (unsigned long)retaddr;
1923            tb = tb_find_pc(pc);
1924            if (tb) {
1925                /* the PC is inside the translated code. It means that we have
1926                   a virtual CPU fault */
1927                cpu_restore_state(tb, env, pc);
1928            }
1929        }
1930        helper_raise_exception_err(env->exception_index, env->error_code);
1931    }
1932    env = saved_env;
1933}
1934
1935void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
1936                          int unused, int size)
1937{
1938    if (is_exec)
1939        helper_raise_exception(EXCP_IBE);
1940    else
1941        helper_raise_exception(EXCP_DBE);
1942}
1943/*
1944 * The following functions are address translation helper functions
1945 * for fast memory access in QEMU.
1946 */
1947static unsigned long v2p_mmu(target_ulong addr, int is_user)
1948{
1949    int index;
1950    target_ulong tlb_addr;
1951    unsigned long physaddr;
1952    void *retaddr;
1953
1954    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1955redo:
1956    tlb_addr = env->tlb_table[is_user][index].addr_read;
1957    if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1958        physaddr = addr + env->tlb_table[is_user][index].addend;
1959    } else {
1960        /* the page is not in the TLB : fill it */
1961        retaddr = GETPC();
1962        tlb_fill(addr, 0, is_user, retaddr);
1963        goto redo;
1964    }
1965    return physaddr;
1966}
1967
1968/*
1969 * translation from virtual address of simulated OS
1970 * to the address of simulation host (not the physical
1971 * address of simulated OS.
1972 */
1973unsigned long v2p(target_ulong ptr, int is_user)
1974{
1975    CPUState *saved_env;
1976    int index;
1977    target_ulong addr;
1978    unsigned long physaddr;
1979
1980    saved_env = env;
1981    env = cpu_single_env;
1982    addr = ptr;
1983    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1984    if (__builtin_expect(env->tlb_table[is_user][index].addr_read !=
1985                (addr & TARGET_PAGE_MASK), 0)) {
1986        physaddr = v2p_mmu(addr, is_user);
1987    } else {
1988        physaddr = addr + env->tlb_table[is_user][index].addend;
1989    }
1990    env = saved_env;
1991    return physaddr;
1992}
1993
1994/* copy a string from the simulated virtual space to a buffer in QEMU */
1995void vstrcpy(target_ulong ptr, char *buf, int max)
1996{
1997    char *phys = 0;
1998    unsigned long page = 0;
1999
2000    if (buf == NULL) return;
2001
2002    while (max) {
2003        if ((ptr & TARGET_PAGE_MASK) != page) {
2004            phys = (char *)v2p(ptr, 0);
2005            page = ptr & TARGET_PAGE_MASK;
2006        }
2007        *buf = *phys;
2008        if (*phys == '\0')
2009            return;
2010        ptr ++;
2011        buf ++;
2012        phys ++;
2013        max --;
2014    }
2015}
2016
2017#endif /* !CONFIG_USER_ONLY */
2018
2019/* Complex FPU operations which may need stack space. */
2020
2021#define FLOAT_ONE32 make_float32(0x3f8 << 20)
2022#define FLOAT_ONE64 make_float64(0x3ffULL << 52)
2023#define FLOAT_TWO32 make_float32(1 << 30)
2024#define FLOAT_TWO64 make_float64(1ULL << 62)
2025#define FLOAT_QNAN32 0x7fbfffff
2026#define FLOAT_QNAN64 0x7ff7ffffffffffffULL
2027#define FLOAT_SNAN32 0x7fffffff
2028#define FLOAT_SNAN64 0x7fffffffffffffffULL
2029
2030/* convert MIPS rounding mode in FCR31 to IEEE library */
2031static unsigned int ieee_rm[] = {
2032    float_round_nearest_even,
2033    float_round_to_zero,
2034    float_round_up,
2035    float_round_down
2036};
2037
2038#define RESTORE_ROUNDING_MODE \
2039    set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
2040
2041#define RESTORE_FLUSH_MODE \
2042    set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, &env->active_fpu.fp_status);
2043
2044target_ulong helper_cfc1 (uint32_t reg)
2045{
2046    target_ulong arg1;
2047
2048    switch (reg) {
2049    case 0:
2050        arg1 = (int32_t)env->active_fpu.fcr0;
2051        break;
2052    case 25:
2053        arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
2054        break;
2055    case 26:
2056        arg1 = env->active_fpu.fcr31 & 0x0003f07c;
2057        break;
2058    case 28:
2059        arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
2060        break;
2061    default:
2062        arg1 = (int32_t)env->active_fpu.fcr31;
2063        break;
2064    }
2065
2066    return arg1;
2067}
2068
2069void helper_ctc1 (target_ulong arg1, uint32_t reg)
2070{
2071    switch(reg) {
2072    case 25:
2073        if (arg1 & 0xffffff00)
2074            return;
2075        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) |
2076                     ((arg1 & 0x1) << 23);
2077        break;
2078    case 26:
2079        if (arg1 & 0x007c0000)
2080            return;
2081        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c);
2082        break;
2083    case 28:
2084        if (arg1 & 0x007c0000)
2085            return;
2086        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) |
2087                     ((arg1 & 0x4) << 22);
2088        break;
2089    case 31:
2090        if (arg1 & 0x007c0000)
2091            return;
2092        env->active_fpu.fcr31 = arg1;
2093        break;
2094    default:
2095        return;
2096    }
2097    /* set rounding mode */
2098    RESTORE_ROUNDING_MODE;
2099    /* set flush-to-zero mode */
2100    RESTORE_FLUSH_MODE;
2101    set_float_exception_flags(0, &env->active_fpu.fp_status);
2102    if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
2103        helper_raise_exception(EXCP_FPE);
2104}
2105
2106static inline char ieee_ex_to_mips(char xcpt)
2107{
2108    return (xcpt & float_flag_inexact) >> 5 |
2109           (xcpt & float_flag_underflow) >> 3 |
2110           (xcpt & float_flag_overflow) >> 1 |
2111           (xcpt & float_flag_divbyzero) << 1 |
2112           (xcpt & float_flag_invalid) << 4;
2113}
2114
2115static inline char mips_ex_to_ieee(char xcpt)
2116{
2117    return (xcpt & FP_INEXACT) << 5 |
2118           (xcpt & FP_UNDERFLOW) << 3 |
2119           (xcpt & FP_OVERFLOW) << 1 |
2120           (xcpt & FP_DIV0) >> 1 |
2121           (xcpt & FP_INVALID) >> 4;
2122}
2123
2124static inline void update_fcr31(void)
2125{
2126    int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
2127
2128    SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
2129    if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp)
2130        helper_raise_exception(EXCP_FPE);
2131    else
2132        UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
2133}
2134
2135/* Float support.
2136   Single precition routines have a "s" suffix, double precision a
2137   "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2138   paired single lower "pl", paired single upper "pu".  */
2139
2140/* unary operations, modifying fp status  */
2141uint64_t helper_float_sqrt_d(uint64_t fdt0)
2142{
2143    return float64_sqrt(fdt0, &env->active_fpu.fp_status);
2144}
2145
2146uint32_t helper_float_sqrt_s(uint32_t fst0)
2147{
2148    return float32_sqrt(fst0, &env->active_fpu.fp_status);
2149}
2150
2151uint64_t helper_float_cvtd_s(uint32_t fst0)
2152{
2153    uint64_t fdt2;
2154
2155    set_float_exception_flags(0, &env->active_fpu.fp_status);
2156    fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
2157    update_fcr31();
2158    return fdt2;
2159}
2160
2161uint64_t helper_float_cvtd_w(uint32_t wt0)
2162{
2163    uint64_t fdt2;
2164
2165    set_float_exception_flags(0, &env->active_fpu.fp_status);
2166    fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
2167    update_fcr31();
2168    return fdt2;
2169}
2170
2171uint64_t helper_float_cvtd_l(uint64_t dt0)
2172{
2173    uint64_t fdt2;
2174
2175    set_float_exception_flags(0, &env->active_fpu.fp_status);
2176    fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
2177    update_fcr31();
2178    return fdt2;
2179}
2180
2181uint64_t helper_float_cvtl_d(uint64_t fdt0)
2182{
2183    uint64_t dt2;
2184
2185    set_float_exception_flags(0, &env->active_fpu.fp_status);
2186    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2187    update_fcr31();
2188    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2189        dt2 = FLOAT_SNAN64;
2190    return dt2;
2191}
2192
2193uint64_t helper_float_cvtl_s(uint32_t fst0)
2194{
2195    uint64_t dt2;
2196
2197    set_float_exception_flags(0, &env->active_fpu.fp_status);
2198    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2199    update_fcr31();
2200    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2201        dt2 = FLOAT_SNAN64;
2202    return dt2;
2203}
2204
2205uint64_t helper_float_cvtps_pw(uint64_t dt0)
2206{
2207    uint32_t fst2;
2208    uint32_t fsth2;
2209
2210    set_float_exception_flags(0, &env->active_fpu.fp_status);
2211    fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2212    fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
2213    update_fcr31();
2214    return ((uint64_t)fsth2 << 32) | fst2;
2215}
2216
2217uint64_t helper_float_cvtpw_ps(uint64_t fdt0)
2218{
2219    uint32_t wt2;
2220    uint32_t wth2;
2221
2222    set_float_exception_flags(0, &env->active_fpu.fp_status);
2223    wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2224    wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
2225    update_fcr31();
2226    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID)) {
2227        wt2 = FLOAT_SNAN32;
2228        wth2 = FLOAT_SNAN32;
2229    }
2230    return ((uint64_t)wth2 << 32) | wt2;
2231}
2232
2233uint32_t helper_float_cvts_d(uint64_t fdt0)
2234{
2235    uint32_t fst2;
2236
2237    set_float_exception_flags(0, &env->active_fpu.fp_status);
2238    fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
2239    update_fcr31();
2240    return fst2;
2241}
2242
2243uint32_t helper_float_cvts_w(uint32_t wt0)
2244{
2245    uint32_t fst2;
2246
2247    set_float_exception_flags(0, &env->active_fpu.fp_status);
2248    fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
2249    update_fcr31();
2250    return fst2;
2251}
2252
2253uint32_t helper_float_cvts_l(uint64_t dt0)
2254{
2255    uint32_t fst2;
2256
2257    set_float_exception_flags(0, &env->active_fpu.fp_status);
2258    fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
2259    update_fcr31();
2260    return fst2;
2261}
2262
2263uint32_t helper_float_cvts_pl(uint32_t wt0)
2264{
2265    uint32_t wt2;
2266
2267    set_float_exception_flags(0, &env->active_fpu.fp_status);
2268    wt2 = wt0;
2269    update_fcr31();
2270    return wt2;
2271}
2272
2273uint32_t helper_float_cvts_pu(uint32_t wth0)
2274{
2275    uint32_t wt2;
2276
2277    set_float_exception_flags(0, &env->active_fpu.fp_status);
2278    wt2 = wth0;
2279    update_fcr31();
2280    return wt2;
2281}
2282
2283uint32_t helper_float_cvtw_s(uint32_t fst0)
2284{
2285    uint32_t wt2;
2286
2287    set_float_exception_flags(0, &env->active_fpu.fp_status);
2288    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2289    update_fcr31();
2290    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2291        wt2 = FLOAT_SNAN32;
2292    return wt2;
2293}
2294
2295uint32_t helper_float_cvtw_d(uint64_t fdt0)
2296{
2297    uint32_t wt2;
2298
2299    set_float_exception_flags(0, &env->active_fpu.fp_status);
2300    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2301    update_fcr31();
2302    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2303        wt2 = FLOAT_SNAN32;
2304    return wt2;
2305}
2306
2307uint64_t helper_float_roundl_d(uint64_t fdt0)
2308{
2309    uint64_t dt2;
2310
2311    set_float_exception_flags(0, &env->active_fpu.fp_status);
2312    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2313    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2314    RESTORE_ROUNDING_MODE;
2315    update_fcr31();
2316    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2317        dt2 = FLOAT_SNAN64;
2318    return dt2;
2319}
2320
2321uint64_t helper_float_roundl_s(uint32_t fst0)
2322{
2323    uint64_t dt2;
2324
2325    set_float_exception_flags(0, &env->active_fpu.fp_status);
2326    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2327    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2328    RESTORE_ROUNDING_MODE;
2329    update_fcr31();
2330    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2331        dt2 = FLOAT_SNAN64;
2332    return dt2;
2333}
2334
2335uint32_t helper_float_roundw_d(uint64_t fdt0)
2336{
2337    uint32_t wt2;
2338
2339    set_float_exception_flags(0, &env->active_fpu.fp_status);
2340    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2341    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2342    RESTORE_ROUNDING_MODE;
2343    update_fcr31();
2344    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2345        wt2 = FLOAT_SNAN32;
2346    return wt2;
2347}
2348
2349uint32_t helper_float_roundw_s(uint32_t fst0)
2350{
2351    uint32_t wt2;
2352
2353    set_float_exception_flags(0, &env->active_fpu.fp_status);
2354    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2355    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2356    RESTORE_ROUNDING_MODE;
2357    update_fcr31();
2358    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2359        wt2 = FLOAT_SNAN32;
2360    return wt2;
2361}
2362
2363uint64_t helper_float_truncl_d(uint64_t fdt0)
2364{
2365    uint64_t dt2;
2366
2367    set_float_exception_flags(0, &env->active_fpu.fp_status);
2368    dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
2369    update_fcr31();
2370    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2371        dt2 = FLOAT_SNAN64;
2372    return dt2;
2373}
2374
2375uint64_t helper_float_truncl_s(uint32_t fst0)
2376{
2377    uint64_t dt2;
2378
2379    set_float_exception_flags(0, &env->active_fpu.fp_status);
2380    dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
2381    update_fcr31();
2382    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2383        dt2 = FLOAT_SNAN64;
2384    return dt2;
2385}
2386
2387uint32_t helper_float_truncw_d(uint64_t fdt0)
2388{
2389    uint32_t wt2;
2390
2391    set_float_exception_flags(0, &env->active_fpu.fp_status);
2392    wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
2393    update_fcr31();
2394    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2395        wt2 = FLOAT_SNAN32;
2396    return wt2;
2397}
2398
2399uint32_t helper_float_truncw_s(uint32_t fst0)
2400{
2401    uint32_t wt2;
2402
2403    set_float_exception_flags(0, &env->active_fpu.fp_status);
2404    wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
2405    update_fcr31();
2406    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2407        wt2 = FLOAT_SNAN32;
2408    return wt2;
2409}
2410
2411uint64_t helper_float_ceill_d(uint64_t fdt0)
2412{
2413    uint64_t dt2;
2414
2415    set_float_exception_flags(0, &env->active_fpu.fp_status);
2416    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2417    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2418    RESTORE_ROUNDING_MODE;
2419    update_fcr31();
2420    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2421        dt2 = FLOAT_SNAN64;
2422    return dt2;
2423}
2424
2425uint64_t helper_float_ceill_s(uint32_t fst0)
2426{
2427    uint64_t dt2;
2428
2429    set_float_exception_flags(0, &env->active_fpu.fp_status);
2430    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2431    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2432    RESTORE_ROUNDING_MODE;
2433    update_fcr31();
2434    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2435        dt2 = FLOAT_SNAN64;
2436    return dt2;
2437}
2438
2439uint32_t helper_float_ceilw_d(uint64_t fdt0)
2440{
2441    uint32_t wt2;
2442
2443    set_float_exception_flags(0, &env->active_fpu.fp_status);
2444    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2445    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2446    RESTORE_ROUNDING_MODE;
2447    update_fcr31();
2448    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2449        wt2 = FLOAT_SNAN32;
2450    return wt2;
2451}
2452
2453uint32_t helper_float_ceilw_s(uint32_t fst0)
2454{
2455    uint32_t wt2;
2456
2457    set_float_exception_flags(0, &env->active_fpu.fp_status);
2458    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2459    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2460    RESTORE_ROUNDING_MODE;
2461    update_fcr31();
2462    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2463        wt2 = FLOAT_SNAN32;
2464    return wt2;
2465}
2466
2467uint64_t helper_float_floorl_d(uint64_t fdt0)
2468{
2469    uint64_t dt2;
2470
2471    set_float_exception_flags(0, &env->active_fpu.fp_status);
2472    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2473    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2474    RESTORE_ROUNDING_MODE;
2475    update_fcr31();
2476    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2477        dt2 = FLOAT_SNAN64;
2478    return dt2;
2479}
2480
2481uint64_t helper_float_floorl_s(uint32_t fst0)
2482{
2483    uint64_t dt2;
2484
2485    set_float_exception_flags(0, &env->active_fpu.fp_status);
2486    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2487    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2488    RESTORE_ROUNDING_MODE;
2489    update_fcr31();
2490    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2491        dt2 = FLOAT_SNAN64;
2492    return dt2;
2493}
2494
2495uint32_t helper_float_floorw_d(uint64_t fdt0)
2496{
2497    uint32_t wt2;
2498
2499    set_float_exception_flags(0, &env->active_fpu.fp_status);
2500    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2501    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2502    RESTORE_ROUNDING_MODE;
2503    update_fcr31();
2504    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2505        wt2 = FLOAT_SNAN32;
2506    return wt2;
2507}
2508
2509uint32_t helper_float_floorw_s(uint32_t fst0)
2510{
2511    uint32_t wt2;
2512
2513    set_float_exception_flags(0, &env->active_fpu.fp_status);
2514    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2515    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2516    RESTORE_ROUNDING_MODE;
2517    update_fcr31();
2518    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2519        wt2 = FLOAT_SNAN32;
2520    return wt2;
2521}
2522
2523/* unary operations, not modifying fp status  */
2524#define FLOAT_UNOP(name)                                       \
2525uint64_t helper_float_ ## name ## _d(uint64_t fdt0)                \
2526{                                                              \
2527    return float64_ ## name(fdt0);                             \
2528}                                                              \
2529uint32_t helper_float_ ## name ## _s(uint32_t fst0)                \
2530{                                                              \
2531    return float32_ ## name(fst0);                             \
2532}                                                              \
2533uint64_t helper_float_ ## name ## _ps(uint64_t fdt0)               \
2534{                                                              \
2535    uint32_t wt0;                                              \
2536    uint32_t wth0;                                             \
2537                                                               \
2538    wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF);                 \
2539    wth0 = float32_ ## name(fdt0 >> 32);                       \
2540    return ((uint64_t)wth0 << 32) | wt0;                       \
2541}
2542FLOAT_UNOP(abs)
2543FLOAT_UNOP(chs)
2544#undef FLOAT_UNOP
2545
2546/* MIPS specific unary operations */
2547uint64_t helper_float_recip_d(uint64_t fdt0)
2548{
2549    uint64_t fdt2;
2550
2551    set_float_exception_flags(0, &env->active_fpu.fp_status);
2552    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2553    update_fcr31();
2554    return fdt2;
2555}
2556
2557uint32_t helper_float_recip_s(uint32_t fst0)
2558{
2559    uint32_t fst2;
2560
2561    set_float_exception_flags(0, &env->active_fpu.fp_status);
2562    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2563    update_fcr31();
2564    return fst2;
2565}
2566
2567uint64_t helper_float_rsqrt_d(uint64_t fdt0)
2568{
2569    uint64_t fdt2;
2570
2571    set_float_exception_flags(0, &env->active_fpu.fp_status);
2572    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2573    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2574    update_fcr31();
2575    return fdt2;
2576}
2577
2578uint32_t helper_float_rsqrt_s(uint32_t fst0)
2579{
2580    uint32_t fst2;
2581
2582    set_float_exception_flags(0, &env->active_fpu.fp_status);
2583    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2584    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2585    update_fcr31();
2586    return fst2;
2587}
2588
2589uint64_t helper_float_recip1_d(uint64_t fdt0)
2590{
2591    uint64_t fdt2;
2592
2593    set_float_exception_flags(0, &env->active_fpu.fp_status);
2594    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2595    update_fcr31();
2596    return fdt2;
2597}
2598
2599uint32_t helper_float_recip1_s(uint32_t fst0)
2600{
2601    uint32_t fst2;
2602
2603    set_float_exception_flags(0, &env->active_fpu.fp_status);
2604    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2605    update_fcr31();
2606    return fst2;
2607}
2608
2609uint64_t helper_float_recip1_ps(uint64_t fdt0)
2610{
2611    uint32_t fst2;
2612    uint32_t fsth2;
2613
2614    set_float_exception_flags(0, &env->active_fpu.fp_status);
2615    fst2 = float32_div(FLOAT_ONE32, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2616    fsth2 = float32_div(FLOAT_ONE32, fdt0 >> 32, &env->active_fpu.fp_status);
2617    update_fcr31();
2618    return ((uint64_t)fsth2 << 32) | fst2;
2619}
2620
2621uint64_t helper_float_rsqrt1_d(uint64_t fdt0)
2622{
2623    uint64_t fdt2;
2624
2625    set_float_exception_flags(0, &env->active_fpu.fp_status);
2626    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2627    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2628    update_fcr31();
2629    return fdt2;
2630}
2631
2632uint32_t helper_float_rsqrt1_s(uint32_t fst0)
2633{
2634    uint32_t fst2;
2635
2636    set_float_exception_flags(0, &env->active_fpu.fp_status);
2637    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2638    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2639    update_fcr31();
2640    return fst2;
2641}
2642
2643uint64_t helper_float_rsqrt1_ps(uint64_t fdt0)
2644{
2645    uint32_t fst2;
2646    uint32_t fsth2;
2647
2648    set_float_exception_flags(0, &env->active_fpu.fp_status);
2649    fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2650    fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
2651    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2652    fsth2 = float32_div(FLOAT_ONE32, fsth2, &env->active_fpu.fp_status);
2653    update_fcr31();
2654    return ((uint64_t)fsth2 << 32) | fst2;
2655}
2656
2657#define FLOAT_OP(name, p) void helper_float_##name##_##p(void)
2658
2659/* binary operations */
2660#define FLOAT_BINOP(name)                                          \
2661uint64_t helper_float_ ## name ## _d(uint64_t fdt0, uint64_t fdt1)     \
2662{                                                                  \
2663    uint64_t dt2;                                                  \
2664                                                                   \
2665    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2666    dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status);     \
2667    update_fcr31();                                                \
2668    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
2669        dt2 = FLOAT_QNAN64;                                        \
2670    return dt2;                                                    \
2671}                                                                  \
2672                                                                   \
2673uint32_t helper_float_ ## name ## _s(uint32_t fst0, uint32_t fst1)     \
2674{                                                                  \
2675    uint32_t wt2;                                                  \
2676                                                                   \
2677    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2678    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
2679    update_fcr31();                                                \
2680    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
2681        wt2 = FLOAT_QNAN32;                                        \
2682    return wt2;                                                    \
2683}                                                                  \
2684                                                                   \
2685uint64_t helper_float_ ## name ## _ps(uint64_t fdt0, uint64_t fdt1)    \
2686{                                                                  \
2687    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                             \
2688    uint32_t fsth0 = fdt0 >> 32;                                   \
2689    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                             \
2690    uint32_t fsth1 = fdt1 >> 32;                                   \
2691    uint32_t wt2;                                                  \
2692    uint32_t wth2;                                                 \
2693                                                                   \
2694    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2695    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
2696    wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status);  \
2697    update_fcr31();                                                \
2698    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) {              \
2699        wt2 = FLOAT_QNAN32;                                        \
2700        wth2 = FLOAT_QNAN32;                                       \
2701    }                                                              \
2702    return ((uint64_t)wth2 << 32) | wt2;                           \
2703}
2704
2705FLOAT_BINOP(add)
2706FLOAT_BINOP(sub)
2707FLOAT_BINOP(mul)
2708FLOAT_BINOP(div)
2709#undef FLOAT_BINOP
2710
2711/* ternary operations */
2712#define FLOAT_TERNOP(name1, name2)                                        \
2713uint64_t helper_float_ ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1,  \
2714                                           uint64_t fdt2)                 \
2715{                                                                         \
2716    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
2717    return float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
2718}                                                                         \
2719                                                                          \
2720uint32_t helper_float_ ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1,  \
2721                                           uint32_t fst2)                 \
2722{                                                                         \
2723    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2724    return float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2725}                                                                         \
2726                                                                          \
2727uint64_t helper_float_ ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1, \
2728                                            uint64_t fdt2)                \
2729{                                                                         \
2730    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
2731    uint32_t fsth0 = fdt0 >> 32;                                          \
2732    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
2733    uint32_t fsth1 = fdt1 >> 32;                                          \
2734    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
2735    uint32_t fsth2 = fdt2 >> 32;                                          \
2736                                                                          \
2737    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2738    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
2739    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2740    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
2741    return ((uint64_t)fsth2 << 32) | fst2;                                \
2742}
2743
2744FLOAT_TERNOP(mul, add)
2745FLOAT_TERNOP(mul, sub)
2746#undef FLOAT_TERNOP
2747
2748/* negated ternary operations */
2749#define FLOAT_NTERNOP(name1, name2)                                       \
2750uint64_t helper_float_n ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
2751                                           uint64_t fdt2)                 \
2752{                                                                         \
2753    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
2754    fdt2 = float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
2755    return float64_chs(fdt2);                                             \
2756}                                                                         \
2757                                                                          \
2758uint32_t helper_float_n ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
2759                                           uint32_t fst2)                 \
2760{                                                                         \
2761    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2762    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2763    return float32_chs(fst2);                                             \
2764}                                                                         \
2765                                                                          \
2766uint64_t helper_float_n ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1,\
2767                                           uint64_t fdt2)                 \
2768{                                                                         \
2769    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
2770    uint32_t fsth0 = fdt0 >> 32;                                          \
2771    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
2772    uint32_t fsth1 = fdt1 >> 32;                                          \
2773    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
2774    uint32_t fsth2 = fdt2 >> 32;                                          \
2775                                                                          \
2776    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2777    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
2778    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2779    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
2780    fst2 = float32_chs(fst2);                                             \
2781    fsth2 = float32_chs(fsth2);                                           \
2782    return ((uint64_t)fsth2 << 32) | fst2;                                \
2783}
2784
2785FLOAT_NTERNOP(mul, add)
2786FLOAT_NTERNOP(mul, sub)
2787#undef FLOAT_NTERNOP
2788
2789/* MIPS specific binary operations */
2790uint64_t helper_float_recip2_d(uint64_t fdt0, uint64_t fdt2)
2791{
2792    set_float_exception_flags(0, &env->active_fpu.fp_status);
2793    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
2794    fdt2 = float64_chs(float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status));
2795    update_fcr31();
2796    return fdt2;
2797}
2798
2799uint32_t helper_float_recip2_s(uint32_t fst0, uint32_t fst2)
2800{
2801    set_float_exception_flags(0, &env->active_fpu.fp_status);
2802    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2803    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
2804    update_fcr31();
2805    return fst2;
2806}
2807
2808uint64_t helper_float_recip2_ps(uint64_t fdt0, uint64_t fdt2)
2809{
2810    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2811    uint32_t fsth0 = fdt0 >> 32;
2812    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
2813    uint32_t fsth2 = fdt2 >> 32;
2814
2815    set_float_exception_flags(0, &env->active_fpu.fp_status);
2816    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2817    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
2818    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
2819    fsth2 = float32_chs(float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status));
2820    update_fcr31();
2821    return ((uint64_t)fsth2 << 32) | fst2;
2822}
2823
2824uint64_t helper_float_rsqrt2_d(uint64_t fdt0, uint64_t fdt2)
2825{
2826    set_float_exception_flags(0, &env->active_fpu.fp_status);
2827    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
2828    fdt2 = float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status);
2829    fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
2830    update_fcr31();
2831    return fdt2;
2832}
2833
2834uint32_t helper_float_rsqrt2_s(uint32_t fst0, uint32_t fst2)
2835{
2836    set_float_exception_flags(0, &env->active_fpu.fp_status);
2837    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2838    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
2839    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
2840    update_fcr31();
2841    return fst2;
2842}
2843
2844uint64_t helper_float_rsqrt2_ps(uint64_t fdt0, uint64_t fdt2)
2845{
2846    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2847    uint32_t fsth0 = fdt0 >> 32;
2848    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
2849    uint32_t fsth2 = fdt2 >> 32;
2850
2851    set_float_exception_flags(0, &env->active_fpu.fp_status);
2852    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2853    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
2854    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
2855    fsth2 = float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status);
2856    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
2857    fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
2858    update_fcr31();
2859    return ((uint64_t)fsth2 << 32) | fst2;
2860}
2861
2862uint64_t helper_float_addr_ps(uint64_t fdt0, uint64_t fdt1)
2863{
2864    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2865    uint32_t fsth0 = fdt0 >> 32;
2866    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
2867    uint32_t fsth1 = fdt1 >> 32;
2868    uint32_t fst2;
2869    uint32_t fsth2;
2870
2871    set_float_exception_flags(0, &env->active_fpu.fp_status);
2872    fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
2873    fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
2874    update_fcr31();
2875    return ((uint64_t)fsth2 << 32) | fst2;
2876}
2877
2878uint64_t helper_float_mulr_ps(uint64_t fdt0, uint64_t fdt1)
2879{
2880    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2881    uint32_t fsth0 = fdt0 >> 32;
2882    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
2883    uint32_t fsth1 = fdt1 >> 32;
2884    uint32_t fst2;
2885    uint32_t fsth2;
2886
2887    set_float_exception_flags(0, &env->active_fpu.fp_status);
2888    fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
2889    fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
2890    update_fcr31();
2891    return ((uint64_t)fsth2 << 32) | fst2;
2892}
2893
2894/* compare operations */
2895#define FOP_COND_D(op, cond)                                   \
2896void helper_cmp_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
2897{                                                              \
2898    int c = cond;                                              \
2899    update_fcr31();                                            \
2900    if (c)                                                     \
2901        SET_FP_COND(cc, env->active_fpu);                      \
2902    else                                                       \
2903        CLEAR_FP_COND(cc, env->active_fpu);                    \
2904}                                                              \
2905void helper_cmpabs_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
2906{                                                              \
2907    int c;                                                     \
2908    fdt0 = float64_abs(fdt0);                                  \
2909    fdt1 = float64_abs(fdt1);                                  \
2910    c = cond;                                                  \
2911    update_fcr31();                                            \
2912    if (c)                                                     \
2913        SET_FP_COND(cc, env->active_fpu);                      \
2914    else                                                       \
2915        CLEAR_FP_COND(cc, env->active_fpu);                    \
2916}
2917
2918static int float64_is_unordered(int sig, float64 a, float64 b STATUS_PARAM)
2919{
2920    if (float64_is_signaling_nan(a) ||
2921        float64_is_signaling_nan(b) ||
2922        (sig && (float64_is_any_nan(a) || float64_is_any_nan(b)))) {
2923        float_raise(float_flag_invalid, status);
2924        return 1;
2925    } else if (float64_is_any_nan(a) || float64_is_any_nan(b)) {
2926        return 1;
2927    } else {
2928        return 0;
2929    }
2930}
2931
2932/* NOTE: the comma operator will make "cond" to eval to false,
2933 * but float*_is_unordered() is still called. */
2934FOP_COND_D(f,   (float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status), 0))
2935FOP_COND_D(un,  float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status))
2936FOP_COND_D(eq,  !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2937FOP_COND_D(ueq, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2938FOP_COND_D(olt, !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2939FOP_COND_D(ult, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2940FOP_COND_D(ole, !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2941FOP_COND_D(ule, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2942/* NOTE: the comma operator will make "cond" to eval to false,
2943 * but float*_is_unordered() is still called. */
2944FOP_COND_D(sf,  (float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status), 0))
2945FOP_COND_D(ngle,float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status))
2946FOP_COND_D(seq, !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2947FOP_COND_D(ngl, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2948FOP_COND_D(lt,  !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2949FOP_COND_D(nge, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2950FOP_COND_D(le,  !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2951FOP_COND_D(ngt, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2952
2953#define FOP_COND_S(op, cond)                                   \
2954void helper_cmp_s_ ## op (uint32_t fst0, uint32_t fst1, int cc)    \
2955{                                                              \
2956    int c = cond;                                              \
2957    update_fcr31();                                            \
2958    if (c)                                                     \
2959        SET_FP_COND(cc, env->active_fpu);                      \
2960    else                                                       \
2961        CLEAR_FP_COND(cc, env->active_fpu);                    \
2962}                                                              \
2963void helper_cmpabs_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
2964{                                                              \
2965    int c;                                                     \
2966    fst0 = float32_abs(fst0);                                  \
2967    fst1 = float32_abs(fst1);                                  \
2968    c = cond;                                                  \
2969    update_fcr31();                                            \
2970    if (c)                                                     \
2971        SET_FP_COND(cc, env->active_fpu);                      \
2972    else                                                       \
2973        CLEAR_FP_COND(cc, env->active_fpu);                    \
2974}
2975
2976static flag float32_is_unordered(int sig, float32 a, float32 b STATUS_PARAM)
2977{
2978    if (float32_is_signaling_nan(a) ||
2979        float32_is_signaling_nan(b) ||
2980        (sig && (float32_is_any_nan(a) || float32_is_any_nan(b)))) {
2981        float_raise(float_flag_invalid, status);
2982        return 1;
2983    } else if (float32_is_any_nan(a) || float32_is_any_nan(b)) {
2984        return 1;
2985    } else {
2986        return 0;
2987    }
2988}
2989
2990/* NOTE: the comma operator will make "cond" to eval to false,
2991 * but float*_is_unordered() is still called. */
2992FOP_COND_S(f,   (float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status), 0))
2993FOP_COND_S(un,  float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status))
2994FOP_COND_S(eq,  !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2995FOP_COND_S(ueq, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2996FOP_COND_S(olt, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2997FOP_COND_S(ult, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2998FOP_COND_S(ole, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_le(fst0, fst1, &env->active_fpu.fp_status))
2999FOP_COND_S(ule, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_le(fst0, fst1, &env->active_fpu.fp_status))
3000/* NOTE: the comma operator will make "cond" to eval to false,
3001 * but float*_is_unordered() is still called. */
3002FOP_COND_S(sf,  (float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status), 0))
3003FOP_COND_S(ngle,float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status))
3004FOP_COND_S(seq, !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3005FOP_COND_S(ngl, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3006FOP_COND_S(lt,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3007FOP_COND_S(nge, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3008FOP_COND_S(le,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_le(fst0, fst1, &env->active_fpu.fp_status))
3009FOP_COND_S(ngt, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_le(fst0, fst1, &env->active_fpu.fp_status))
3010
3011#define FOP_COND_PS(op, condl, condh)                           \
3012void helper_cmp_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
3013{                                                               \
3014    uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF);             \
3015    uint32_t fsth0 = float32_abs(fdt0 >> 32);                   \
3016    uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF);             \
3017    uint32_t fsth1 = float32_abs(fdt1 >> 32);                   \
3018    int cl = condl;                                             \
3019    int ch = condh;                                             \
3020                                                                \
3021    update_fcr31();                                             \
3022    if (cl)                                                     \
3023        SET_FP_COND(cc, env->active_fpu);                       \
3024    else                                                        \
3025        CLEAR_FP_COND(cc, env->active_fpu);                     \
3026    if (ch)                                                     \
3027        SET_FP_COND(cc + 1, env->active_fpu);                   \
3028    else                                                        \
3029        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
3030}                                                               \
3031void helper_cmpabs_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3032{                                                               \
3033    uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF);             \
3034    uint32_t fsth0 = float32_abs(fdt0 >> 32);                   \
3035    uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF);             \
3036    uint32_t fsth1 = float32_abs(fdt1 >> 32);                   \
3037    int cl = condl;                                             \
3038    int ch = condh;                                             \
3039                                                                \
3040    update_fcr31();                                             \
3041    if (cl)                                                     \
3042        SET_FP_COND(cc, env->active_fpu);                       \
3043    else                                                        \
3044        CLEAR_FP_COND(cc, env->active_fpu);                     \
3045    if (ch)                                                     \
3046        SET_FP_COND(cc + 1, env->active_fpu);                   \
3047    else                                                        \
3048        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
3049}
3050
3051/* NOTE: the comma operator will make "cond" to eval to false,
3052 * but float*_is_unordered() is still called. */
3053FOP_COND_PS(f,   (float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status), 0),
3054                 (float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status), 0))
3055FOP_COND_PS(un,  float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status),
3056                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status))
3057FOP_COND_PS(eq,  !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3058                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3059FOP_COND_PS(ueq, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3060                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3061FOP_COND_PS(olt, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3062                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3063FOP_COND_PS(ult, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3064                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3065FOP_COND_PS(ole, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_le(fst0, fst1, &env->active_fpu.fp_status),
3066                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3067FOP_COND_PS(ule, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_le(fst0, fst1, &env->active_fpu.fp_status),
3068                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3069/* NOTE: the comma operator will make "cond" to eval to false,
3070 * but float*_is_unordered() is still called. */
3071FOP_COND_PS(sf,  (float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status), 0),
3072                 (float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status), 0))
3073FOP_COND_PS(ngle,float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status),
3074                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status))
3075FOP_COND_PS(seq, !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3076                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3077FOP_COND_PS(ngl, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3078                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3079FOP_COND_PS(lt,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3080                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3081FOP_COND_PS(nge, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3082                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3083FOP_COND_PS(le,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_le(fst0, fst1, &env->active_fpu.fp_status),
3084                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3085FOP_COND_PS(ngt, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_le(fst0, fst1, &env->active_fpu.fp_status),
3086                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3087