op_helper.c revision 6d1afd3bf737fe15c9ba2a23c2f957ae8ff2e663
1/*
2 *  MIPS emulation helpers for qemu.
3 *
4 *  Copyright (c) 2004-2005 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19#include <stdlib.h>
20#include "exec.h"
21
22#include "qemu/host-utils.h"
23
24#include "helper.h"
25/*****************************************************************************/
26/* Exceptions processing helpers */
27
28void helper_raise_exception_err (uint32_t exception, int error_code)
29{
30#if 1
31    if (exception < 0x100)
32        qemu_log("%s: %d %d\n", __func__, exception, error_code);
33#endif
34    env->exception_index = exception;
35    env->error_code = error_code;
36    cpu_loop_exit(env);
37}
38
39void helper_raise_exception (uint32_t exception)
40{
41    helper_raise_exception_err(exception, 0);
42}
43
44void helper_interrupt_restart (void)
45{
46    if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
47        !(env->CP0_Status & (1 << CP0St_ERL)) &&
48        !(env->hflags & MIPS_HFLAG_DM) &&
49        (env->CP0_Status & (1 << CP0St_IE)) &&
50        (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask)) {
51        env->CP0_Cause &= ~(0x1f << CP0Ca_EC);
52        helper_raise_exception(EXCP_EXT_INTERRUPT);
53    }
54}
55
56#if !defined(CONFIG_USER_ONLY)
57static void do_restore_state (void *pc_ptr)
58{
59    TranslationBlock *tb;
60    unsigned long pc = (unsigned long) pc_ptr;
61
62    tb = tb_find_pc (pc);
63    if (tb) {
64        cpu_restore_state (env, pc);
65    }
66}
67#endif
68
69#if defined(CONFIG_USER_ONLY)
70#define HELPER_LD(name, insn, type)                                     \
71static inline type do_##name(target_ulong addr, int mem_idx)            \
72{                                                                       \
73    return (type) insn##_raw(addr);                                     \
74}
75#else
76#define HELPER_LD(name, insn, type)                                     \
77static inline type do_##name(target_ulong addr, int mem_idx)            \
78{                                                                       \
79    switch (mem_idx)                                                    \
80    {                                                                   \
81    case 0: return (type) insn##_kernel(addr); break;                   \
82    case 1: return (type) insn##_super(addr); break;                    \
83    default:                                                            \
84    case 2: return (type) insn##_user(addr); break;                     \
85    }                                                                   \
86}
87#endif
88HELPER_LD(lbu, ldub, uint8_t)
89HELPER_LD(lw, ldl, int32_t)
90#ifdef TARGET_MIPS64
91HELPER_LD(ld, ldq, int64_t)
92#endif
93#undef HELPER_LD
94
95#if defined(CONFIG_USER_ONLY)
96#define HELPER_ST(name, insn, type)                                     \
97static inline void do_##name(target_ulong addr, type val, int mem_idx)  \
98{                                                                       \
99    insn##_raw(addr, val);                                              \
100}
101#else
102#define HELPER_ST(name, insn, type)                                     \
103static inline void do_##name(target_ulong addr, type val, int mem_idx)  \
104{                                                                       \
105    switch (mem_idx)                                                    \
106    {                                                                   \
107    case 0: insn##_kernel(addr, val); break;                            \
108    case 1: insn##_super(addr, val); break;                             \
109    default:                                                            \
110    case 2: insn##_user(addr, val); break;                              \
111    }                                                                   \
112}
113#endif
114HELPER_ST(sb, stb, uint8_t)
115HELPER_ST(sw, stl, uint32_t)
116#ifdef TARGET_MIPS64
117HELPER_ST(sd, stq, uint64_t)
118#endif
119#undef HELPER_ST
120
121target_ulong helper_clo (target_ulong arg1)
122{
123    return clo32(arg1);
124}
125
126target_ulong helper_clz (target_ulong arg1)
127{
128    return clz32(arg1);
129}
130
131#if defined(TARGET_MIPS64)
132target_ulong helper_dclo (target_ulong arg1)
133{
134    return clo64(arg1);
135}
136
137target_ulong helper_dclz (target_ulong arg1)
138{
139    return clz64(arg1);
140}
141#endif /* TARGET_MIPS64 */
142
143/* 64 bits arithmetic for 32 bits hosts */
144static inline uint64_t get_HILO (void)
145{
146    return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
147}
148
149static inline void set_HILO (uint64_t HILO)
150{
151    env->active_tc.LO[0] = (int32_t)HILO;
152    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
153}
154
155static inline void set_HIT0_LO (target_ulong arg1, uint64_t HILO)
156{
157    env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
158    arg1 = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
159}
160
161static inline void set_HI_LOT0 (target_ulong arg1, uint64_t HILO)
162{
163    arg1 = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
164    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
165}
166
167/* Multiplication variants of the vr54xx. */
168target_ulong helper_muls (target_ulong arg1, target_ulong arg2)
169{
170    set_HI_LOT0(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
171
172    return arg1;
173}
174
175target_ulong helper_mulsu (target_ulong arg1, target_ulong arg2)
176{
177    set_HI_LOT0(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
178
179    return arg1;
180}
181
182target_ulong helper_macc (target_ulong arg1, target_ulong arg2)
183{
184    set_HI_LOT0(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
185
186    return arg1;
187}
188
189target_ulong helper_macchi (target_ulong arg1, target_ulong arg2)
190{
191    set_HIT0_LO(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
192
193    return arg1;
194}
195
196target_ulong helper_maccu (target_ulong arg1, target_ulong arg2)
197{
198    set_HI_LOT0(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
199
200    return arg1;
201}
202
203target_ulong helper_macchiu (target_ulong arg1, target_ulong arg2)
204{
205    set_HIT0_LO(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
206
207    return arg1;
208}
209
210target_ulong helper_msac (target_ulong arg1, target_ulong arg2)
211{
212    set_HI_LOT0(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
213
214    return arg1;
215}
216
217target_ulong helper_msachi (target_ulong arg1, target_ulong arg2)
218{
219    set_HIT0_LO(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
220
221    return arg1;
222}
223
224target_ulong helper_msacu (target_ulong arg1, target_ulong arg2)
225{
226    set_HI_LOT0(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
227
228    return arg1;
229}
230
231target_ulong helper_msachiu (target_ulong arg1, target_ulong arg2)
232{
233    set_HIT0_LO(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
234
235    return arg1;
236}
237
238target_ulong helper_mulhi (target_ulong arg1, target_ulong arg2)
239{
240    set_HIT0_LO(arg1, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
241
242    return arg1;
243}
244
245target_ulong helper_mulhiu (target_ulong arg1, target_ulong arg2)
246{
247    set_HIT0_LO(arg1, (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
248
249    return arg1;
250}
251
252target_ulong helper_mulshi (target_ulong arg1, target_ulong arg2)
253{
254    set_HIT0_LO(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
255
256    return arg1;
257}
258
259target_ulong helper_mulshiu (target_ulong arg1, target_ulong arg2)
260{
261    set_HIT0_LO(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
262
263    return arg1;
264}
265
266#ifdef TARGET_MIPS64
267void helper_dmult (target_ulong arg1, target_ulong arg2)
268{
269    muls64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
270}
271
272void helper_dmultu (target_ulong arg1, target_ulong arg2)
273{
274    mulu64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
275}
276#endif
277
278#ifndef CONFIG_USER_ONLY
279
280static inline hwaddr do_translate_address(target_ulong address, int rw)
281{
282    hwaddr lladdr;
283
284    lladdr = cpu_mips_translate_address(env, address, rw);
285
286    if (lladdr == (hwaddr)-1LL) {
287        cpu_loop_exit(env);
288    } else {
289        return lladdr;
290    }
291}
292
293#define HELPER_LD_ATOMIC(name, insn)                                          \
294target_ulong helper_##name(target_ulong arg, int mem_idx)                     \
295{                                                                             \
296    env->lladdr = do_translate_address(arg, 0);                               \
297    env->llval = do_##insn(arg, mem_idx);                                     \
298    return env->llval;                                                        \
299}
300HELPER_LD_ATOMIC(ll, lw)
301#ifdef TARGET_MIPS64
302HELPER_LD_ATOMIC(lld, ld)
303#endif
304#undef HELPER_LD_ATOMIC
305
306#define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask)                      \
307target_ulong helper_##name(target_ulong arg1, target_ulong arg2, int mem_idx) \
308{                                                                             \
309    target_long tmp;                                                          \
310                                                                              \
311    if (arg2 & almask) {                                                      \
312        env->CP0_BadVAddr = arg2;                                             \
313        helper_raise_exception(EXCP_AdES);                                    \
314    }                                                                         \
315    if (do_translate_address(arg2, 1) == env->lladdr) {                       \
316        tmp = do_##ld_insn(arg2, mem_idx);                                    \
317        if (tmp == env->llval) {                                              \
318            do_##st_insn(arg2, arg1, mem_idx);                                \
319            return 1;                                                         \
320        }                                                                     \
321    }                                                                         \
322    return 0;                                                                 \
323}
324HELPER_ST_ATOMIC(sc, lw, sw, 0x3)
325#ifdef TARGET_MIPS64
326HELPER_ST_ATOMIC(scd, ld, sd, 0x7)
327#endif
328#undef HELPER_ST_ATOMIC
329#endif
330
331#ifdef TARGET_WORDS_BIGENDIAN
332#define GET_LMASK(v) ((v) & 3)
333#define GET_OFFSET(addr, offset) (addr + (offset))
334#else
335#define GET_LMASK(v) (((v) & 3) ^ 3)
336#define GET_OFFSET(addr, offset) (addr - (offset))
337#endif
338
339target_ulong helper_lwl(target_ulong arg1, target_ulong arg2, int mem_idx)
340{
341    target_ulong tmp;
342
343    tmp = do_lbu(arg2, mem_idx);
344    arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
345
346    if (GET_LMASK(arg2) <= 2) {
347        tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
348        arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
349    }
350
351    if (GET_LMASK(arg2) <= 1) {
352        tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
353        arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
354    }
355
356    if (GET_LMASK(arg2) == 0) {
357        tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
358        arg1 = (arg1 & 0xFFFFFF00) | tmp;
359    }
360    return (int32_t)arg1;
361}
362
363target_ulong helper_lwr(target_ulong arg1, target_ulong arg2, int mem_idx)
364{
365    target_ulong tmp;
366
367    tmp = do_lbu(arg2, mem_idx);
368    arg1 = (arg1 & 0xFFFFFF00) | tmp;
369
370    if (GET_LMASK(arg2) >= 1) {
371        tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
372        arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
373    }
374
375    if (GET_LMASK(arg2) >= 2) {
376        tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
377        arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
378    }
379
380    if (GET_LMASK(arg2) == 3) {
381        tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
382        arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
383    }
384    return (int32_t)arg1;
385}
386
387void helper_swl(target_ulong arg1, target_ulong arg2, int mem_idx)
388{
389    do_sb(arg2, (uint8_t)(arg1 >> 24), mem_idx);
390
391    if (GET_LMASK(arg2) <= 2)
392        do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx);
393
394    if (GET_LMASK(arg2) <= 1)
395        do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx);
396
397    if (GET_LMASK(arg2) == 0)
398        do_sb(GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx);
399}
400
401void helper_swr(target_ulong arg1, target_ulong arg2, int mem_idx)
402{
403    do_sb(arg2, (uint8_t)arg1, mem_idx);
404
405    if (GET_LMASK(arg2) >= 1)
406        do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
407
408    if (GET_LMASK(arg2) >= 2)
409        do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
410
411    if (GET_LMASK(arg2) == 3)
412        do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
413}
414
415#if defined(TARGET_MIPS64)
416/* "half" load and stores.  We must do the memory access inline,
417   or fault handling won't work.  */
418
419#ifdef TARGET_WORDS_BIGENDIAN
420#define GET_LMASK64(v) ((v) & 7)
421#else
422#define GET_LMASK64(v) (((v) & 7) ^ 7)
423#endif
424
425target_ulong helper_ldl(target_ulong arg1, target_ulong arg2, int mem_idx)
426{
427    uint64_t tmp;
428
429    tmp = do_lbu(arg2, mem_idx);
430    arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
431
432    if (GET_LMASK64(arg2) <= 6) {
433        tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
434        arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
435    }
436
437    if (GET_LMASK64(arg2) <= 5) {
438        tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
439        arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
440    }
441
442    if (GET_LMASK64(arg2) <= 4) {
443        tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
444        arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
445    }
446
447    if (GET_LMASK64(arg2) <= 3) {
448        tmp = do_lbu(GET_OFFSET(arg2, 4), mem_idx);
449        arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
450    }
451
452    if (GET_LMASK64(arg2) <= 2) {
453        tmp = do_lbu(GET_OFFSET(arg2, 5), mem_idx);
454        arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
455    }
456
457    if (GET_LMASK64(arg2) <= 1) {
458        tmp = do_lbu(GET_OFFSET(arg2, 6), mem_idx);
459        arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
460    }
461
462    if (GET_LMASK64(arg2) == 0) {
463        tmp = do_lbu(GET_OFFSET(arg2, 7), mem_idx);
464        arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
465    }
466
467    return arg1;
468}
469
470target_ulong helper_ldr(target_ulong arg1, target_ulong arg2, int mem_idx)
471{
472    uint64_t tmp;
473
474    tmp = do_lbu(arg2, mem_idx);
475    arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
476
477    if (GET_LMASK64(arg2) >= 1) {
478        tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
479        arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp  << 8);
480    }
481
482    if (GET_LMASK64(arg2) >= 2) {
483        tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
484        arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
485    }
486
487    if (GET_LMASK64(arg2) >= 3) {
488        tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
489        arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
490    }
491
492    if (GET_LMASK64(arg2) >= 4) {
493        tmp = do_lbu(GET_OFFSET(arg2, -4), mem_idx);
494        arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
495    }
496
497    if (GET_LMASK64(arg2) >= 5) {
498        tmp = do_lbu(GET_OFFSET(arg2, -5), mem_idx);
499        arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
500    }
501
502    if (GET_LMASK64(arg2) >= 6) {
503        tmp = do_lbu(GET_OFFSET(arg2, -6), mem_idx);
504        arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
505    }
506
507    if (GET_LMASK64(arg2) == 7) {
508        tmp = do_lbu(GET_OFFSET(arg2, -7), mem_idx);
509        arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
510    }
511
512    return arg1;
513}
514
515void helper_sdl(target_ulong arg1, target_ulong arg2, int mem_idx)
516{
517    do_sb(arg2, (uint8_t)(arg1 >> 56), mem_idx);
518
519    if (GET_LMASK64(arg2) <= 6)
520        do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx);
521
522    if (GET_LMASK64(arg2) <= 5)
523        do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx);
524
525    if (GET_LMASK64(arg2) <= 4)
526        do_sb(GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx);
527
528    if (GET_LMASK64(arg2) <= 3)
529        do_sb(GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx);
530
531    if (GET_LMASK64(arg2) <= 2)
532        do_sb(GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx);
533
534    if (GET_LMASK64(arg2) <= 1)
535        do_sb(GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx);
536
537    if (GET_LMASK64(arg2) <= 0)
538        do_sb(GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx);
539}
540
541void helper_sdr(target_ulong arg1, target_ulong arg2, int mem_idx)
542{
543    do_sb(arg2, (uint8_t)arg1, mem_idx);
544
545    if (GET_LMASK64(arg2) >= 1)
546        do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
547
548    if (GET_LMASK64(arg2) >= 2)
549        do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
550
551    if (GET_LMASK64(arg2) >= 3)
552        do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
553
554    if (GET_LMASK64(arg2) >= 4)
555        do_sb(GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx);
556
557    if (GET_LMASK64(arg2) >= 5)
558        do_sb(GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx);
559
560    if (GET_LMASK64(arg2) >= 6)
561        do_sb(GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx);
562
563    if (GET_LMASK64(arg2) == 7)
564        do_sb(GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx);
565}
566#endif /* TARGET_MIPS64 */
567
568#ifndef CONFIG_USER_ONLY
569/* CP0 helpers */
570target_ulong helper_mfc0_mvpcontrol (void)
571{
572    return env->mvp->CP0_MVPControl;
573}
574
575target_ulong helper_mfc0_mvpconf0 (void)
576{
577    return env->mvp->CP0_MVPConf0;
578}
579
580target_ulong helper_mfc0_mvpconf1 (void)
581{
582    return env->mvp->CP0_MVPConf1;
583}
584
585target_ulong helper_mfc0_random (void)
586{
587    return (int32_t)cpu_mips_get_random(env);
588}
589
590target_ulong helper_mfc0_tcstatus (void)
591{
592    return env->active_tc.CP0_TCStatus;
593}
594
595target_ulong helper_mftc0_tcstatus(void)
596{
597    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
598
599    if (other_tc == env->current_tc)
600        return env->active_tc.CP0_TCStatus;
601    else
602        return env->tcs[other_tc].CP0_TCStatus;
603}
604
605target_ulong helper_mfc0_tcbind (void)
606{
607    return env->active_tc.CP0_TCBind;
608}
609
610target_ulong helper_mftc0_tcbind(void)
611{
612    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
613
614    if (other_tc == env->current_tc)
615        return env->active_tc.CP0_TCBind;
616    else
617        return env->tcs[other_tc].CP0_TCBind;
618}
619
620target_ulong helper_mfc0_tcrestart (void)
621{
622    return env->active_tc.PC;
623}
624
625target_ulong helper_mftc0_tcrestart(void)
626{
627    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
628
629    if (other_tc == env->current_tc)
630        return env->active_tc.PC;
631    else
632        return env->tcs[other_tc].PC;
633}
634
635target_ulong helper_mfc0_tchalt (void)
636{
637    return env->active_tc.CP0_TCHalt;
638}
639
640target_ulong helper_mftc0_tchalt(void)
641{
642    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
643
644    if (other_tc == env->current_tc)
645        return env->active_tc.CP0_TCHalt;
646    else
647        return env->tcs[other_tc].CP0_TCHalt;
648}
649
650target_ulong helper_mfc0_tccontext (void)
651{
652    return env->active_tc.CP0_TCContext;
653}
654
655target_ulong helper_mftc0_tccontext(void)
656{
657    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
658
659    if (other_tc == env->current_tc)
660        return env->active_tc.CP0_TCContext;
661    else
662        return env->tcs[other_tc].CP0_TCContext;
663}
664
665target_ulong helper_mfc0_tcschedule (void)
666{
667    return env->active_tc.CP0_TCSchedule;
668}
669
670target_ulong helper_mftc0_tcschedule(void)
671{
672    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
673
674    if (other_tc == env->current_tc)
675        return env->active_tc.CP0_TCSchedule;
676    else
677        return env->tcs[other_tc].CP0_TCSchedule;
678}
679
680target_ulong helper_mfc0_tcschefback (void)
681{
682    return env->active_tc.CP0_TCScheFBack;
683}
684
685target_ulong helper_mftc0_tcschefback(void)
686{
687    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
688
689    if (other_tc == env->current_tc)
690        return env->active_tc.CP0_TCScheFBack;
691    else
692        return env->tcs[other_tc].CP0_TCScheFBack;
693}
694
695target_ulong helper_mfc0_count (void)
696{
697    return (int32_t)cpu_mips_get_count(env);
698}
699
700target_ulong helper_mftc0_entryhi(void)
701{
702    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
703    int32_t tcstatus;
704
705    if (other_tc == env->current_tc)
706        tcstatus = env->active_tc.CP0_TCStatus;
707    else
708        tcstatus = env->tcs[other_tc].CP0_TCStatus;
709
710    return (env->CP0_EntryHi & ~0xff) | (tcstatus & 0xff);
711}
712
713target_ulong helper_mftc0_status(void)
714{
715    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
716    target_ulong t0;
717    int32_t tcstatus;
718
719    if (other_tc == env->current_tc)
720        tcstatus = env->active_tc.CP0_TCStatus;
721    else
722        tcstatus = env->tcs[other_tc].CP0_TCStatus;
723
724    t0 = env->CP0_Status & ~0xf1000018;
725    t0 |= tcstatus & (0xf << CP0TCSt_TCU0);
726    t0 |= (tcstatus & (1 << CP0TCSt_TMX)) >> (CP0TCSt_TMX - CP0St_MX);
727    t0 |= (tcstatus & (0x3 << CP0TCSt_TKSU)) >> (CP0TCSt_TKSU - CP0St_KSU);
728
729    return t0;
730}
731
732target_ulong helper_mfc0_lladdr (void)
733{
734    return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift);
735}
736
737target_ulong helper_mfc0_watchlo (uint32_t sel)
738{
739    return (int32_t)env->CP0_WatchLo[sel];
740}
741
742target_ulong helper_mfc0_watchhi (uint32_t sel)
743{
744    return env->CP0_WatchHi[sel];
745}
746
747target_ulong helper_mfc0_debug (void)
748{
749    target_ulong t0 = env->CP0_Debug;
750    if (env->hflags & MIPS_HFLAG_DM)
751        t0 |= 1 << CP0DB_DM;
752
753    return t0;
754}
755
756target_ulong helper_mftc0_debug(void)
757{
758    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
759    int32_t tcstatus;
760
761    if (other_tc == env->current_tc)
762        tcstatus = env->active_tc.CP0_Debug_tcstatus;
763    else
764        tcstatus = env->tcs[other_tc].CP0_Debug_tcstatus;
765
766    /* XXX: Might be wrong, check with EJTAG spec. */
767    return (env->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
768            (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
769}
770
771#if defined(TARGET_MIPS64)
772target_ulong helper_dmfc0_tcrestart (void)
773{
774    return env->active_tc.PC;
775}
776
777target_ulong helper_dmfc0_tchalt (void)
778{
779    return env->active_tc.CP0_TCHalt;
780}
781
782target_ulong helper_dmfc0_tccontext (void)
783{
784    return env->active_tc.CP0_TCContext;
785}
786
787target_ulong helper_dmfc0_tcschedule (void)
788{
789    return env->active_tc.CP0_TCSchedule;
790}
791
792target_ulong helper_dmfc0_tcschefback (void)
793{
794    return env->active_tc.CP0_TCScheFBack;
795}
796
797target_ulong helper_dmfc0_lladdr (void)
798{
799    return env->lladdr >> env->CP0_LLAddr_shift;
800}
801
802target_ulong helper_dmfc0_watchlo (uint32_t sel)
803{
804    return env->CP0_WatchLo[sel];
805}
806#endif /* TARGET_MIPS64 */
807
808void helper_mtc0_index (target_ulong arg1)
809{
810    int num = 1;
811    unsigned int tmp = env->tlb->nb_tlb;
812
813    do {
814        tmp >>= 1;
815        num <<= 1;
816    } while (tmp);
817    env->CP0_Index = (env->CP0_Index & 0x80000000) | (arg1 & (num - 1));
818}
819
820void helper_mtc0_mvpcontrol (target_ulong arg1)
821{
822    uint32_t mask = 0;
823    uint32_t newval;
824
825    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
826        mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
827                (1 << CP0MVPCo_EVP);
828    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
829        mask |= (1 << CP0MVPCo_STLB);
830    newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
831
832    // TODO: Enable/disable shared TLB, enable/disable VPEs.
833
834    env->mvp->CP0_MVPControl = newval;
835}
836
837void helper_mtc0_vpecontrol (target_ulong arg1)
838{
839    uint32_t mask;
840    uint32_t newval;
841
842    mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
843           (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
844    newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
845
846    /* Yield scheduler intercept not implemented. */
847    /* Gating storage scheduler intercept not implemented. */
848
849    // TODO: Enable/disable TCs.
850
851    env->CP0_VPEControl = newval;
852}
853
854void helper_mtc0_vpeconf0 (target_ulong arg1)
855{
856    uint32_t mask = 0;
857    uint32_t newval;
858
859    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
860        if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
861            mask |= (0xff << CP0VPEC0_XTC);
862        mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
863    }
864    newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
865
866    // TODO: TC exclusive handling due to ERL/EXL.
867
868    env->CP0_VPEConf0 = newval;
869}
870
871void helper_mtc0_vpeconf1 (target_ulong arg1)
872{
873    uint32_t mask = 0;
874    uint32_t newval;
875
876    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
877        mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
878                (0xff << CP0VPEC1_NCP1);
879    newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
880
881    /* UDI not implemented. */
882    /* CP2 not implemented. */
883
884    // TODO: Handle FPU (CP1) binding.
885
886    env->CP0_VPEConf1 = newval;
887}
888
889void helper_mtc0_yqmask (target_ulong arg1)
890{
891    /* Yield qualifier inputs not implemented. */
892    env->CP0_YQMask = 0x00000000;
893}
894
895void helper_mtc0_vpeopt (target_ulong arg1)
896{
897    env->CP0_VPEOpt = arg1 & 0x0000ffff;
898}
899
900void helper_mtc0_entrylo0 (target_ulong arg1)
901{
902    /* Large physaddr (PABITS) not implemented */
903    /* 1k pages not implemented */
904    env->CP0_EntryLo0 = arg1 & 0x3FFFFFFF;
905}
906
907void helper_mtc0_tcstatus (target_ulong arg1)
908{
909    uint32_t mask = env->CP0_TCStatus_rw_bitmask;
910    uint32_t newval;
911
912    newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
913
914    // TODO: Sync with CP0_Status.
915
916    env->active_tc.CP0_TCStatus = newval;
917}
918
919void helper_mttc0_tcstatus (target_ulong arg1)
920{
921    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
922
923    // TODO: Sync with CP0_Status.
924
925    if (other_tc == env->current_tc)
926        env->active_tc.CP0_TCStatus = arg1;
927    else
928        env->tcs[other_tc].CP0_TCStatus = arg1;
929}
930
931void helper_mtc0_tcbind (target_ulong arg1)
932{
933    uint32_t mask = (1 << CP0TCBd_TBE);
934    uint32_t newval;
935
936    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
937        mask |= (1 << CP0TCBd_CurVPE);
938    newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
939    env->active_tc.CP0_TCBind = newval;
940}
941
942void helper_mttc0_tcbind (target_ulong arg1)
943{
944    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
945    uint32_t mask = (1 << CP0TCBd_TBE);
946    uint32_t newval;
947
948    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
949        mask |= (1 << CP0TCBd_CurVPE);
950    if (other_tc == env->current_tc) {
951        newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
952        env->active_tc.CP0_TCBind = newval;
953    } else {
954        newval = (env->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
955        env->tcs[other_tc].CP0_TCBind = newval;
956    }
957}
958
959void helper_mtc0_tcrestart (target_ulong arg1)
960{
961    env->active_tc.PC = arg1;
962    env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
963    env->lladdr = 0ULL;
964    /* MIPS16 not implemented. */
965}
966
967void helper_mttc0_tcrestart (target_ulong arg1)
968{
969    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
970
971    if (other_tc == env->current_tc) {
972        env->active_tc.PC = arg1;
973        env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
974        env->lladdr = 0ULL;
975        /* MIPS16 not implemented. */
976    } else {
977        env->tcs[other_tc].PC = arg1;
978        env->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
979        env->lladdr = 0ULL;
980        /* MIPS16 not implemented. */
981    }
982}
983
984void helper_mtc0_tchalt (target_ulong arg1)
985{
986    env->active_tc.CP0_TCHalt = arg1 & 0x1;
987
988    // TODO: Halt TC / Restart (if allocated+active) TC.
989}
990
991void helper_mttc0_tchalt (target_ulong arg1)
992{
993    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
994
995    // TODO: Halt TC / Restart (if allocated+active) TC.
996
997    if (other_tc == env->current_tc)
998        env->active_tc.CP0_TCHalt = arg1;
999    else
1000        env->tcs[other_tc].CP0_TCHalt = arg1;
1001}
1002
1003void helper_mtc0_tccontext (target_ulong arg1)
1004{
1005    env->active_tc.CP0_TCContext = arg1;
1006}
1007
1008void helper_mttc0_tccontext (target_ulong arg1)
1009{
1010    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1011
1012    if (other_tc == env->current_tc)
1013        env->active_tc.CP0_TCContext = arg1;
1014    else
1015        env->tcs[other_tc].CP0_TCContext = arg1;
1016}
1017
1018void helper_mtc0_tcschedule (target_ulong arg1)
1019{
1020    env->active_tc.CP0_TCSchedule = arg1;
1021}
1022
1023void helper_mttc0_tcschedule (target_ulong arg1)
1024{
1025    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1026
1027    if (other_tc == env->current_tc)
1028        env->active_tc.CP0_TCSchedule = arg1;
1029    else
1030        env->tcs[other_tc].CP0_TCSchedule = arg1;
1031}
1032
1033void helper_mtc0_tcschefback (target_ulong arg1)
1034{
1035    env->active_tc.CP0_TCScheFBack = arg1;
1036}
1037
1038void helper_mttc0_tcschefback (target_ulong arg1)
1039{
1040    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1041
1042    if (other_tc == env->current_tc)
1043        env->active_tc.CP0_TCScheFBack = arg1;
1044    else
1045        env->tcs[other_tc].CP0_TCScheFBack = arg1;
1046}
1047
1048void helper_mtc0_entrylo1 (target_ulong arg1)
1049{
1050    /* Large physaddr (PABITS) not implemented */
1051    /* 1k pages not implemented */
1052    env->CP0_EntryLo1 = arg1 & 0x3FFFFFFF;
1053}
1054
1055void helper_mtc0_context (target_ulong arg1)
1056{
1057    env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
1058}
1059
1060void helper_mtc0_pagemask (target_ulong arg1)
1061{
1062    /* 1k pages not implemented */
1063    env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1064}
1065
1066void helper_mtc0_pagegrain (target_ulong arg1)
1067{
1068    /* SmartMIPS not implemented */
1069    /* Large physaddr (PABITS) not implemented */
1070    /* 1k pages not implemented */
1071    env->CP0_PageGrain = 0;
1072}
1073
1074void helper_mtc0_wired (target_ulong arg1)
1075{
1076    env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1077}
1078
1079void helper_mtc0_srsconf0 (target_ulong arg1)
1080{
1081    env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1082}
1083
1084void helper_mtc0_srsconf1 (target_ulong arg1)
1085{
1086    env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1087}
1088
1089void helper_mtc0_srsconf2 (target_ulong arg1)
1090{
1091    env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1092}
1093
1094void helper_mtc0_srsconf3 (target_ulong arg1)
1095{
1096    env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1097}
1098
1099void helper_mtc0_srsconf4 (target_ulong arg1)
1100{
1101    env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1102}
1103
1104void helper_mtc0_hwrena (target_ulong arg1)
1105{
1106    env->CP0_HWREna = arg1 & 0x0000000F;
1107}
1108
1109void helper_mtc0_count (target_ulong arg1)
1110{
1111    cpu_mips_store_count(env, arg1);
1112}
1113
1114void helper_mtc0_entryhi (target_ulong arg1)
1115{
1116    target_ulong old, val;
1117
1118    /* 1k pages not implemented */
1119    val = arg1 & ((TARGET_PAGE_MASK << 1) | 0xFF);
1120#if defined(TARGET_MIPS64)
1121    val &= env->SEGMask;
1122#endif
1123    old = env->CP0_EntryHi;
1124    env->CP0_EntryHi = val;
1125    if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1126        uint32_t tcst = env->active_tc.CP0_TCStatus & ~0xff;
1127        env->active_tc.CP0_TCStatus = tcst | (val & 0xff);
1128    }
1129    /* If the ASID changes, flush qemu's TLB.  */
1130    if ((old & 0xFF) != (val & 0xFF))
1131        cpu_mips_tlb_flush(env, 1);
1132}
1133
1134void helper_mttc0_entryhi(target_ulong arg1)
1135{
1136    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1137    int32_t tcstatus;
1138
1139    env->CP0_EntryHi = (env->CP0_EntryHi & 0xff) | (arg1 & ~0xff);
1140    if (other_tc == env->current_tc) {
1141        tcstatus = (env->active_tc.CP0_TCStatus & ~0xff) | (arg1 & 0xff);
1142        env->active_tc.CP0_TCStatus = tcstatus;
1143    } else {
1144        tcstatus = (env->tcs[other_tc].CP0_TCStatus & ~0xff) | (arg1 & 0xff);
1145        env->tcs[other_tc].CP0_TCStatus = tcstatus;
1146    }
1147}
1148
1149void helper_mtc0_compare (target_ulong arg1)
1150{
1151    cpu_mips_store_compare(env, arg1);
1152}
1153
1154void helper_mtc0_status (target_ulong arg1)
1155{
1156    uint32_t val, old;
1157    uint32_t mask = env->CP0_Status_rw_bitmask;
1158
1159    val = arg1 & mask;
1160    old = env->CP0_Status;
1161    env->CP0_Status = (env->CP0_Status & ~mask) | val;
1162    compute_hflags(env);
1163    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1164        qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1165                old, old & env->CP0_Cause & CP0Ca_IP_mask,
1166                val, val & env->CP0_Cause & CP0Ca_IP_mask,
1167                env->CP0_Cause);
1168        switch (env->hflags & MIPS_HFLAG_KSU) {
1169        case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1170        case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1171        case MIPS_HFLAG_KM: qemu_log("\n"); break;
1172        default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1173        }
1174    }
1175    cpu_mips_update_irq(env);
1176}
1177
1178void helper_mttc0_status(target_ulong arg1)
1179{
1180    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1181    int32_t tcstatus = env->tcs[other_tc].CP0_TCStatus;
1182
1183    env->CP0_Status = arg1 & ~0xf1000018;
1184    tcstatus = (tcstatus & ~(0xf << CP0TCSt_TCU0)) | (arg1 & (0xf << CP0St_CU0));
1185    tcstatus = (tcstatus & ~(1 << CP0TCSt_TMX)) | ((arg1 & (1 << CP0St_MX)) << (CP0TCSt_TMX - CP0St_MX));
1186    tcstatus = (tcstatus & ~(0x3 << CP0TCSt_TKSU)) | ((arg1 & (0x3 << CP0St_KSU)) << (CP0TCSt_TKSU - CP0St_KSU));
1187    if (other_tc == env->current_tc)
1188        env->active_tc.CP0_TCStatus = tcstatus;
1189    else
1190        env->tcs[other_tc].CP0_TCStatus = tcstatus;
1191}
1192
1193void helper_mtc0_intctl (target_ulong arg1)
1194{
1195    /* vectored interrupts not implemented, no performance counters. */
1196    env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000002e0) | (arg1 & 0x000002e0);
1197}
1198
1199void helper_mtc0_srsctl (target_ulong arg1)
1200{
1201    uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1202    env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1203}
1204
1205void helper_mtc0_cause (target_ulong arg1)
1206{
1207    uint32_t mask = 0x00C00300;
1208    uint32_t old = env->CP0_Cause;
1209
1210    if (env->insn_flags & ISA_MIPS32R2)
1211        mask |= 1 << CP0Ca_DC;
1212
1213    env->CP0_Cause = (env->CP0_Cause & ~mask) | (arg1 & mask);
1214
1215    if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
1216        if (env->CP0_Cause & (1 << CP0Ca_DC))
1217            cpu_mips_stop_count(env);
1218        else
1219            cpu_mips_start_count(env);
1220    }
1221
1222    /* Handle the software interrupt as an hardware one, as they
1223       are very similar */
1224    if (arg1 & CP0Ca_IP_mask) {
1225        cpu_mips_update_irq(env);
1226    }
1227}
1228
1229void helper_mtc0_ebase (target_ulong arg1)
1230{
1231    /* vectored interrupts not implemented */
1232    /* Multi-CPU not implemented */
1233    env->CP0_EBase = 0x80000000 | (arg1 & 0x3FFFF000);
1234}
1235
1236void helper_mtc0_config0 (target_ulong arg1)
1237{
1238    env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1239}
1240
1241void helper_mtc0_config2 (target_ulong arg1)
1242{
1243    /* tertiary/secondary caches not implemented */
1244    env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1245}
1246
1247void helper_mtc0_lladdr (target_ulong arg1)
1248{
1249    target_long mask = env->CP0_LLAddr_rw_bitmask;
1250    arg1 = arg1 << env->CP0_LLAddr_shift;
1251    env->lladdr = (env->lladdr & ~mask) | (arg1 & mask);
1252}
1253
1254void helper_mtc0_watchlo (target_ulong arg1, uint32_t sel)
1255{
1256    /* Watch exceptions for instructions, data loads, data stores
1257       not implemented. */
1258    env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1259}
1260
1261void helper_mtc0_watchhi (target_ulong arg1, uint32_t sel)
1262{
1263    env->CP0_WatchHi[sel] = (arg1 & 0x40FF0FF8);
1264    env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1265}
1266
1267void helper_mtc0_xcontext (target_ulong arg1)
1268{
1269    target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1270    env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1271}
1272
1273void helper_mtc0_framemask (target_ulong arg1)
1274{
1275    env->CP0_Framemask = arg1; /* XXX */
1276}
1277
1278void helper_mtc0_debug (target_ulong arg1)
1279{
1280    env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1281    if (arg1 & (1 << CP0DB_DM))
1282        env->hflags |= MIPS_HFLAG_DM;
1283    else
1284        env->hflags &= ~MIPS_HFLAG_DM;
1285}
1286
1287void helper_mttc0_debug(target_ulong arg1)
1288{
1289    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1290    uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1291
1292    /* XXX: Might be wrong, check with EJTAG spec. */
1293    if (other_tc == env->current_tc)
1294        env->active_tc.CP0_Debug_tcstatus = val;
1295    else
1296        env->tcs[other_tc].CP0_Debug_tcstatus = val;
1297    env->CP0_Debug = (env->CP0_Debug & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1298                     (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1299}
1300
1301void helper_mtc0_performance0 (target_ulong arg1)
1302{
1303    env->CP0_Performance0 = arg1 & 0x000007ff;
1304}
1305
1306void helper_mtc0_taglo (target_ulong arg1)
1307{
1308    env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1309}
1310
1311void helper_mtc0_datalo (target_ulong arg1)
1312{
1313    env->CP0_DataLo = arg1; /* XXX */
1314}
1315
1316void helper_mtc0_taghi (target_ulong arg1)
1317{
1318    env->CP0_TagHi = arg1; /* XXX */
1319}
1320
1321void helper_mtc0_datahi (target_ulong arg1)
1322{
1323    env->CP0_DataHi = arg1; /* XXX */
1324}
1325
1326/* MIPS MT functions */
1327target_ulong helper_mftgpr(uint32_t sel)
1328{
1329    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1330
1331    if (other_tc == env->current_tc)
1332        return env->active_tc.gpr[sel];
1333    else
1334        return env->tcs[other_tc].gpr[sel];
1335}
1336
1337target_ulong helper_mftlo(uint32_t sel)
1338{
1339    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1340
1341    if (other_tc == env->current_tc)
1342        return env->active_tc.LO[sel];
1343    else
1344        return env->tcs[other_tc].LO[sel];
1345}
1346
1347target_ulong helper_mfthi(uint32_t sel)
1348{
1349    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1350
1351    if (other_tc == env->current_tc)
1352        return env->active_tc.HI[sel];
1353    else
1354        return env->tcs[other_tc].HI[sel];
1355}
1356
1357target_ulong helper_mftacx(uint32_t sel)
1358{
1359    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1360
1361    if (other_tc == env->current_tc)
1362        return env->active_tc.ACX[sel];
1363    else
1364        return env->tcs[other_tc].ACX[sel];
1365}
1366
1367target_ulong helper_mftdsp(void)
1368{
1369    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1370
1371    if (other_tc == env->current_tc)
1372        return env->active_tc.DSPControl;
1373    else
1374        return env->tcs[other_tc].DSPControl;
1375}
1376
1377void helper_mttgpr(target_ulong arg1, uint32_t sel)
1378{
1379    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1380
1381    if (other_tc == env->current_tc)
1382        env->active_tc.gpr[sel] = arg1;
1383    else
1384        env->tcs[other_tc].gpr[sel] = arg1;
1385}
1386
1387void helper_mttlo(target_ulong arg1, uint32_t sel)
1388{
1389    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1390
1391    if (other_tc == env->current_tc)
1392        env->active_tc.LO[sel] = arg1;
1393    else
1394        env->tcs[other_tc].LO[sel] = arg1;
1395}
1396
1397void helper_mtthi(target_ulong arg1, uint32_t sel)
1398{
1399    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1400
1401    if (other_tc == env->current_tc)
1402        env->active_tc.HI[sel] = arg1;
1403    else
1404        env->tcs[other_tc].HI[sel] = arg1;
1405}
1406
1407void helper_mttacx(target_ulong arg1, uint32_t sel)
1408{
1409    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1410
1411    if (other_tc == env->current_tc)
1412        env->active_tc.ACX[sel] = arg1;
1413    else
1414        env->tcs[other_tc].ACX[sel] = arg1;
1415}
1416
1417void helper_mttdsp(target_ulong arg1)
1418{
1419    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1420
1421    if (other_tc == env->current_tc)
1422        env->active_tc.DSPControl = arg1;
1423    else
1424        env->tcs[other_tc].DSPControl = arg1;
1425}
1426
1427/* MIPS MT functions */
1428target_ulong helper_dmt(target_ulong arg1)
1429{
1430    // TODO
1431    arg1 = 0;
1432    // rt = arg1
1433
1434    return arg1;
1435}
1436
1437target_ulong helper_emt(target_ulong arg1)
1438{
1439    // TODO
1440    arg1 = 0;
1441    // rt = arg1
1442
1443    return arg1;
1444}
1445
1446target_ulong helper_dvpe(target_ulong arg1)
1447{
1448    // TODO
1449    arg1 = 0;
1450    // rt = arg1
1451
1452    return arg1;
1453}
1454
1455target_ulong helper_evpe(target_ulong arg1)
1456{
1457    // TODO
1458    arg1 = 0;
1459    // rt = arg1
1460
1461    return arg1;
1462}
1463#endif /* !CONFIG_USER_ONLY */
1464
1465void helper_fork(target_ulong arg1, target_ulong arg2)
1466{
1467    // arg1 = rt, arg2 = rs
1468    arg1 = 0;
1469    // TODO: store to TC register
1470}
1471
1472target_ulong helper_yield(target_ulong arg1)
1473{
1474    if (arg1 < 0) {
1475        /* No scheduling policy implemented. */
1476        if (arg1 != -2) {
1477            if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1478                env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
1479                env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1480                env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1481                helper_raise_exception(EXCP_THREAD);
1482            }
1483        }
1484    } else if (arg1 == 0) {
1485        if (0 /* TODO: TC underflow */) {
1486            env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1487            helper_raise_exception(EXCP_THREAD);
1488        } else {
1489            // TODO: Deallocate TC
1490        }
1491    } else if (arg1 > 0) {
1492        /* Yield qualifier inputs not implemented. */
1493        env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1494        env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1495        helper_raise_exception(EXCP_THREAD);
1496    }
1497    return env->CP0_YQMask;
1498}
1499
1500#ifndef CONFIG_USER_ONLY
1501static void inline r4k_invalidate_tlb_shadow (CPUMIPSState *env, int idx)
1502{
1503    r4k_tlb_t *tlb;
1504    uint8_t ASID = env->CP0_EntryHi & 0xFF;
1505
1506    tlb = &env->tlb->mmu.r4k.tlb[idx];
1507    /* The qemu TLB is flushed when the ASID changes, so no need to
1508    flush these entries again.  */
1509    if (tlb->G == 0 && tlb->ASID != ASID) {
1510        return;
1511    }
1512}
1513
1514static void inline r4k_invalidate_tlb (CPUMIPSState *env, int idx)
1515{
1516    r4k_tlb_t *tlb;
1517    target_ulong addr;
1518    target_ulong end;
1519    uint8_t ASID = env->CP0_EntryHi & 0xFF;
1520    target_ulong mask;
1521
1522    tlb = &env->tlb->mmu.r4k.tlb[idx];
1523    /* The qemu TLB is flushed when the ASID changes, so no need to
1524    flush these entries again.  */
1525    if (tlb->G == 0 && tlb->ASID != ASID) {
1526        return;
1527    }
1528
1529    /* 1k pages are not supported. */
1530    mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1531    if (tlb->V0) {
1532        addr = tlb->VPN & ~mask;
1533#if defined(TARGET_MIPS64)
1534        if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
1535            addr |= 0x3FFFFF0000000000ULL;
1536        }
1537#endif
1538        end = addr | (mask >> 1);
1539        while (addr < end) {
1540            tlb_flush_page (env, addr);
1541            addr += TARGET_PAGE_SIZE;
1542        }
1543    }
1544    if (tlb->V1) {
1545        addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1);
1546#if defined(TARGET_MIPS64)
1547        if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
1548            addr |= 0x3FFFFF0000000000ULL;
1549        }
1550#endif
1551        end = addr | mask;
1552        while (addr - 1 < end) {
1553            tlb_flush_page (env, addr);
1554            addr += TARGET_PAGE_SIZE;
1555        }
1556    }
1557}
1558
1559/* TLB management */
1560void cpu_mips_tlb_flush (CPUMIPSState *env, int flush_global)
1561{
1562    /* Flush qemu's TLB and discard all shadowed entries.  */
1563    tlb_flush (env, flush_global);
1564}
1565
1566static void r4k_fill_tlb (int idx)
1567{
1568    r4k_tlb_t *tlb;
1569
1570    /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
1571    tlb = &env->tlb->mmu.r4k.tlb[idx];
1572    tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
1573#if defined(TARGET_MIPS64)
1574    tlb->VPN &= env->SEGMask;
1575#endif
1576    tlb->ASID = env->CP0_EntryHi & 0xFF;
1577    tlb->PageMask = env->CP0_PageMask;
1578    tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
1579    tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
1580    tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
1581    tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
1582    tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
1583    tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
1584    tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
1585    tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
1586    tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
1587}
1588
1589void r4k_helper_ptw_tlbrefill(CPUMIPSState *target_env)
1590{
1591   CPUMIPSState *saved_env;
1592
1593   /* Save current 'env' value */
1594   saved_env = env;
1595   env = target_env;
1596
1597   /* Do TLB load on behalf of Page Table Walk */
1598    int r = cpu_mips_get_random(env);
1599    r4k_invalidate_tlb_shadow(env, r);
1600    r4k_fill_tlb(r);
1601
1602   /* Restore 'env' value */
1603   env = saved_env;
1604}
1605
1606void r4k_helper_tlbwi (void)
1607{
1608    r4k_tlb_t *tlb;
1609    target_ulong tag;
1610    target_ulong VPN;
1611    target_ulong mask;
1612
1613    /* If tlbwi is trying to upgrading access permissions on current entry,
1614     * we do not need to flush tlb hash table.
1615     */
1616    tlb = &env->tlb->mmu.r4k.tlb[env->CP0_Index % env->tlb->nb_tlb];
1617    mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1618    tag = env->CP0_EntryHi & ~mask;
1619    VPN = tlb->VPN & ~mask;
1620    if (VPN == tag)
1621    {
1622        if (tlb->ASID == (env->CP0_EntryHi & 0xFF))
1623        {
1624            tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
1625            tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
1626            tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
1627            tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
1628            tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
1629            tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
1630            tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
1631            tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
1632            return;
1633        }
1634    }
1635
1636    /*flush all the tlb cache */
1637    cpu_mips_tlb_flush (env, 1);
1638
1639    r4k_invalidate_tlb(env, env->CP0_Index % env->tlb->nb_tlb);
1640    r4k_fill_tlb(env->CP0_Index % env->tlb->nb_tlb);
1641}
1642
1643void r4k_helper_tlbwr (void)
1644{
1645    int r = cpu_mips_get_random(env);
1646
1647    r4k_invalidate_tlb_shadow(env, r);
1648    r4k_fill_tlb(r);
1649}
1650
1651void r4k_helper_tlbp (void)
1652{
1653    r4k_tlb_t *tlb;
1654    target_ulong mask;
1655    target_ulong tag;
1656    target_ulong VPN;
1657    uint8_t ASID;
1658    int i;
1659
1660    ASID = env->CP0_EntryHi & 0xFF;
1661    for (i = 0; i < env->tlb->nb_tlb; i++) {
1662        tlb = &env->tlb->mmu.r4k.tlb[i];
1663        /* 1k pages are not supported. */
1664        mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1665        tag = env->CP0_EntryHi & ~mask;
1666        VPN = tlb->VPN & ~mask;
1667        /* Check ASID, virtual page number & size */
1668        if (unlikely((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag)) {
1669            /* TLB match */
1670            env->CP0_Index = i;
1671            break;
1672        }
1673    }
1674    if (i == env->tlb->nb_tlb) {
1675        /* No match.  Discard any shadow entries, if any of them match. */
1676        int index = ((env->CP0_EntryHi>>5)&0x1ff00) | ASID;
1677        index |= (env->CP0_EntryHi>>13)&0x20000;
1678        env->CP0_Index |= 0x80000000;
1679    }
1680}
1681
1682void r4k_helper_tlbr (void)
1683{
1684    r4k_tlb_t *tlb;
1685    uint8_t ASID;
1686
1687    ASID = env->CP0_EntryHi & 0xFF;
1688    tlb = &env->tlb->mmu.r4k.tlb[env->CP0_Index % env->tlb->nb_tlb];
1689
1690    /* If this will change the current ASID, flush qemu's TLB.  */
1691    if (ASID != tlb->ASID)
1692        cpu_mips_tlb_flush (env, 1);
1693
1694    /*flush all the tlb cache */
1695    cpu_mips_tlb_flush (env, 1);
1696
1697    env->CP0_EntryHi = tlb->VPN | tlb->ASID;
1698    env->CP0_PageMask = tlb->PageMask;
1699    env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
1700                        (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
1701    env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
1702                        (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
1703}
1704
1705void helper_tlbwi(void)
1706{
1707    env->tlb->helper_tlbwi();
1708}
1709
1710void helper_tlbwr(void)
1711{
1712    env->tlb->helper_tlbwr();
1713}
1714
1715void helper_tlbp(void)
1716{
1717    env->tlb->helper_tlbp();
1718}
1719
1720void helper_tlbr(void)
1721{
1722    env->tlb->helper_tlbr();
1723}
1724
1725/* Specials */
1726target_ulong helper_di (void)
1727{
1728    target_ulong t0 = env->CP0_Status;
1729
1730    env->CP0_Status = t0 & ~(1 << CP0St_IE);
1731    cpu_mips_update_irq(env);
1732
1733    return t0;
1734}
1735
1736target_ulong helper_ei (void)
1737{
1738    target_ulong t0 = env->CP0_Status;
1739
1740    env->CP0_Status = t0 | (1 << CP0St_IE);
1741    cpu_mips_update_irq(env);
1742
1743    return t0;
1744}
1745
1746static void debug_pre_eret (void)
1747{
1748    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1749        qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1750                env->active_tc.PC, env->CP0_EPC);
1751        if (env->CP0_Status & (1 << CP0St_ERL))
1752            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1753        if (env->hflags & MIPS_HFLAG_DM)
1754            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1755        qemu_log("\n");
1756    }
1757}
1758
1759static void debug_post_eret (void)
1760{
1761    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1762        qemu_log("  =>  PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1763                env->active_tc.PC, env->CP0_EPC);
1764        if (env->CP0_Status & (1 << CP0St_ERL))
1765            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1766        if (env->hflags & MIPS_HFLAG_DM)
1767            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1768        switch (env->hflags & MIPS_HFLAG_KSU) {
1769        case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1770        case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1771        case MIPS_HFLAG_KM: qemu_log("\n"); break;
1772        default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1773        }
1774    }
1775}
1776
1777void helper_eret (void)
1778{
1779    debug_pre_eret();
1780    if (env->CP0_Status & (1 << CP0St_ERL)) {
1781        env->active_tc.PC = env->CP0_ErrorEPC;
1782        env->CP0_Status &= ~(1 << CP0St_ERL);
1783    } else {
1784        env->active_tc.PC = env->CP0_EPC;
1785        env->CP0_Status &= ~(1 << CP0St_EXL);
1786    }
1787    compute_hflags(env);
1788    debug_post_eret();
1789    env->lladdr = 1;
1790}
1791
1792void helper_deret (void)
1793{
1794    debug_pre_eret();
1795    env->active_tc.PC = env->CP0_DEPC;
1796    env->hflags &= MIPS_HFLAG_DM;
1797    compute_hflags(env);
1798    debug_post_eret();
1799    env->lladdr = 1;
1800}
1801#endif /* !CONFIG_USER_ONLY */
1802
1803target_ulong helper_rdhwr_cpunum(void)
1804{
1805    if ((env->hflags & MIPS_HFLAG_CP0) ||
1806        (env->CP0_HWREna & (1 << 0)))
1807        return env->CP0_EBase & 0x3ff;
1808    else
1809        helper_raise_exception(EXCP_RI);
1810
1811    return 0;
1812}
1813
1814target_ulong helper_rdhwr_synci_step(void)
1815{
1816    if ((env->hflags & MIPS_HFLAG_CP0) ||
1817        (env->CP0_HWREna & (1 << 1)))
1818        return env->SYNCI_Step;
1819    else
1820        helper_raise_exception(EXCP_RI);
1821
1822    return 0;
1823}
1824
1825target_ulong helper_rdhwr_cc(void)
1826{
1827    if ((env->hflags & MIPS_HFLAG_CP0) ||
1828        (env->CP0_HWREna & (1 << 2)))
1829        return env->CP0_Count;
1830    else
1831        helper_raise_exception(EXCP_RI);
1832
1833    return 0;
1834}
1835
1836target_ulong helper_rdhwr_ccres(void)
1837{
1838    if ((env->hflags & MIPS_HFLAG_CP0) ||
1839        (env->CP0_HWREna & (1 << 3)))
1840        return env->CCRes;
1841    else
1842        helper_raise_exception(EXCP_RI);
1843
1844    return 0;
1845}
1846
1847void helper_pmon (int function)
1848{
1849    function /= 2;
1850    switch (function) {
1851    case 2: /* TODO: char inbyte(int waitflag); */
1852        if (env->active_tc.gpr[4] == 0)
1853            env->active_tc.gpr[2] = -1;
1854        /* Fall through */
1855    case 11: /* TODO: char inbyte (void); */
1856        env->active_tc.gpr[2] = -1;
1857        break;
1858    case 3:
1859    case 12:
1860        printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
1861        break;
1862    case 17:
1863        break;
1864    case 158:
1865        {
1866            unsigned char *fmt = (void *)(unsigned long)env->active_tc.gpr[4];
1867            printf("%s", fmt);
1868        }
1869        break;
1870    }
1871}
1872
1873void helper_wait (void)
1874{
1875    env->halted = 1;
1876    helper_raise_exception(EXCP_HLT);
1877}
1878
1879#if !defined(CONFIG_USER_ONLY)
1880
1881static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
1882
1883#define MMUSUFFIX _mmu
1884#define ALIGNED_ONLY
1885
1886#define SHIFT 0
1887#include "exec/softmmu_template.h"
1888
1889#define SHIFT 1
1890#include "exec/softmmu_template.h"
1891
1892#define SHIFT 2
1893#include "exec/softmmu_template.h"
1894
1895#define SHIFT 3
1896#include "exec/softmmu_template.h"
1897
1898static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
1899{
1900    env->CP0_BadVAddr = addr;
1901    do_restore_state (retaddr);
1902    helper_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
1903}
1904
1905void tlb_fill (CPUMIPSState* env1, target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1906{
1907    TranslationBlock *tb;
1908    CPUMIPSState *saved_env;
1909    unsigned long pc;
1910    int ret;
1911
1912    /* XXX: hack to restore env in all cases, even if not called from
1913       generated code */
1914    saved_env = env;
1915    env = env1;
1916    ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1917    if (ret) {
1918        if (retaddr) {
1919            /* now we have a real cpu fault */
1920            pc = (unsigned long)retaddr;
1921            tb = tb_find_pc(pc);
1922            if (tb) {
1923                /* the PC is inside the translated code. It means that we have
1924                   a virtual CPU fault */
1925                cpu_restore_state(env, pc);
1926            }
1927        }
1928        helper_raise_exception_err(env->exception_index, env->error_code);
1929    }
1930    env = saved_env;
1931}
1932
1933void do_unassigned_access(hwaddr addr, int is_write, int is_exec,
1934                          int unused, int size)
1935{
1936    if (is_exec)
1937        helper_raise_exception(EXCP_IBE);
1938    else
1939        helper_raise_exception(EXCP_DBE);
1940}
1941/*
1942 * The following functions are address translation helper functions
1943 * for fast memory access in QEMU.
1944 */
1945static unsigned long v2p_mmu(target_ulong addr, int is_user)
1946{
1947    int index;
1948    target_ulong tlb_addr;
1949    unsigned long physaddr;
1950    void *retaddr;
1951
1952    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1953redo:
1954    tlb_addr = env->tlb_table[is_user][index].addr_read;
1955    if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1956        physaddr = addr + env->tlb_table[is_user][index].addend;
1957    } else {
1958        /* the page is not in the TLB : fill it */
1959        retaddr = GETPC();
1960        tlb_fill(env, addr, 0, is_user, retaddr);
1961        goto redo;
1962    }
1963    return physaddr;
1964}
1965
1966/*
1967 * translation from virtual address of simulated OS
1968 * to the address of simulation host (not the physical
1969 * address of simulated OS.
1970 */
1971unsigned long v2p(target_ulong ptr, int is_user)
1972{
1973    CPUMIPSState *saved_env;
1974    int index;
1975    target_ulong addr;
1976    unsigned long physaddr;
1977
1978    saved_env = env;
1979    env = cpu_single_env;
1980    addr = ptr;
1981    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1982    if (__builtin_expect(env->tlb_table[is_user][index].addr_read !=
1983                (addr & TARGET_PAGE_MASK), 0)) {
1984        physaddr = v2p_mmu(addr, is_user);
1985    } else {
1986        physaddr = addr + env->tlb_table[is_user][index].addend;
1987    }
1988    env = saved_env;
1989    return physaddr;
1990}
1991
1992/* copy a string from the simulated virtual space to a buffer in QEMU */
1993void vstrcpy(target_ulong ptr, char *buf, int max)
1994{
1995    char *phys = 0;
1996    unsigned long page = 0;
1997
1998    if (buf == NULL) return;
1999
2000    while (max) {
2001        if ((ptr & TARGET_PAGE_MASK) != page) {
2002            phys = (char *)v2p(ptr, 0);
2003            page = ptr & TARGET_PAGE_MASK;
2004        }
2005        *buf = *phys;
2006        if (*phys == '\0')
2007            return;
2008        ptr ++;
2009        buf ++;
2010        phys ++;
2011        max --;
2012    }
2013}
2014
2015#endif /* !CONFIG_USER_ONLY */
2016
2017/* Complex FPU operations which may need stack space. */
2018
2019#define FLOAT_ONE32 make_float32(0x3f8 << 20)
2020#define FLOAT_ONE64 make_float64(0x3ffULL << 52)
2021#define FLOAT_TWO32 make_float32(1 << 30)
2022#define FLOAT_TWO64 make_float64(1ULL << 62)
2023#define FLOAT_QNAN32 0x7fbfffff
2024#define FLOAT_QNAN64 0x7ff7ffffffffffffULL
2025#define FLOAT_SNAN32 0x7fffffff
2026#define FLOAT_SNAN64 0x7fffffffffffffffULL
2027
2028/* convert MIPS rounding mode in FCR31 to IEEE library */
2029static unsigned int ieee_rm[] = {
2030    float_round_nearest_even,
2031    float_round_to_zero,
2032    float_round_up,
2033    float_round_down
2034};
2035
2036#define RESTORE_ROUNDING_MODE \
2037    set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
2038
2039#define RESTORE_FLUSH_MODE \
2040    set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, &env->active_fpu.fp_status);
2041
2042target_ulong helper_cfc1 (uint32_t reg)
2043{
2044    target_ulong arg1;
2045
2046    switch (reg) {
2047    case 0:
2048        arg1 = (int32_t)env->active_fpu.fcr0;
2049        break;
2050    case 25:
2051        arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
2052        break;
2053    case 26:
2054        arg1 = env->active_fpu.fcr31 & 0x0003f07c;
2055        break;
2056    case 28:
2057        arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
2058        break;
2059    default:
2060        arg1 = (int32_t)env->active_fpu.fcr31;
2061        break;
2062    }
2063
2064    return arg1;
2065}
2066
2067void helper_ctc1 (target_ulong arg1, uint32_t reg)
2068{
2069    switch(reg) {
2070    case 25:
2071        if (arg1 & 0xffffff00)
2072            return;
2073        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) |
2074                     ((arg1 & 0x1) << 23);
2075        break;
2076    case 26:
2077        if (arg1 & 0x007c0000)
2078            return;
2079        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c);
2080        break;
2081    case 28:
2082        if (arg1 & 0x007c0000)
2083            return;
2084        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) |
2085                     ((arg1 & 0x4) << 22);
2086        break;
2087    case 31:
2088        if (arg1 & 0x007c0000)
2089            return;
2090        env->active_fpu.fcr31 = arg1;
2091        break;
2092    default:
2093        return;
2094    }
2095    /* set rounding mode */
2096    RESTORE_ROUNDING_MODE;
2097    /* set flush-to-zero mode */
2098    RESTORE_FLUSH_MODE;
2099    set_float_exception_flags(0, &env->active_fpu.fp_status);
2100    if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
2101        helper_raise_exception(EXCP_FPE);
2102}
2103
2104static inline char ieee_ex_to_mips(char xcpt)
2105{
2106    return (xcpt & float_flag_inexact) >> 5 |
2107           (xcpt & float_flag_underflow) >> 3 |
2108           (xcpt & float_flag_overflow) >> 1 |
2109           (xcpt & float_flag_divbyzero) << 1 |
2110           (xcpt & float_flag_invalid) << 4;
2111}
2112
2113static inline char mips_ex_to_ieee(char xcpt)
2114{
2115    return (xcpt & FP_INEXACT) << 5 |
2116           (xcpt & FP_UNDERFLOW) << 3 |
2117           (xcpt & FP_OVERFLOW) << 1 |
2118           (xcpt & FP_DIV0) >> 1 |
2119           (xcpt & FP_INVALID) >> 4;
2120}
2121
2122static inline void update_fcr31(void)
2123{
2124    int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
2125
2126    SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
2127    if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp)
2128        helper_raise_exception(EXCP_FPE);
2129    else
2130        UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
2131}
2132
2133/* Float support.
2134   Single precition routines have a "s" suffix, double precision a
2135   "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2136   paired single lower "pl", paired single upper "pu".  */
2137
2138/* unary operations, modifying fp status  */
2139uint64_t helper_float_sqrt_d(uint64_t fdt0)
2140{
2141    return float64_sqrt(fdt0, &env->active_fpu.fp_status);
2142}
2143
2144uint32_t helper_float_sqrt_s(uint32_t fst0)
2145{
2146    return float32_sqrt(fst0, &env->active_fpu.fp_status);
2147}
2148
2149uint64_t helper_float_cvtd_s(uint32_t fst0)
2150{
2151    uint64_t fdt2;
2152
2153    set_float_exception_flags(0, &env->active_fpu.fp_status);
2154    fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
2155    update_fcr31();
2156    return fdt2;
2157}
2158
2159uint64_t helper_float_cvtd_w(uint32_t wt0)
2160{
2161    uint64_t fdt2;
2162
2163    set_float_exception_flags(0, &env->active_fpu.fp_status);
2164    fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
2165    update_fcr31();
2166    return fdt2;
2167}
2168
2169uint64_t helper_float_cvtd_l(uint64_t dt0)
2170{
2171    uint64_t fdt2;
2172
2173    set_float_exception_flags(0, &env->active_fpu.fp_status);
2174    fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
2175    update_fcr31();
2176    return fdt2;
2177}
2178
2179uint64_t helper_float_cvtl_d(uint64_t fdt0)
2180{
2181    uint64_t dt2;
2182
2183    set_float_exception_flags(0, &env->active_fpu.fp_status);
2184    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2185    update_fcr31();
2186    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2187        dt2 = FLOAT_SNAN64;
2188    return dt2;
2189}
2190
2191uint64_t helper_float_cvtl_s(uint32_t fst0)
2192{
2193    uint64_t dt2;
2194
2195    set_float_exception_flags(0, &env->active_fpu.fp_status);
2196    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2197    update_fcr31();
2198    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2199        dt2 = FLOAT_SNAN64;
2200    return dt2;
2201}
2202
2203uint64_t helper_float_cvtps_pw(uint64_t dt0)
2204{
2205    uint32_t fst2;
2206    uint32_t fsth2;
2207
2208    set_float_exception_flags(0, &env->active_fpu.fp_status);
2209    fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2210    fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
2211    update_fcr31();
2212    return ((uint64_t)fsth2 << 32) | fst2;
2213}
2214
2215uint64_t helper_float_cvtpw_ps(uint64_t fdt0)
2216{
2217    uint32_t wt2;
2218    uint32_t wth2;
2219
2220    set_float_exception_flags(0, &env->active_fpu.fp_status);
2221    wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2222    wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
2223    update_fcr31();
2224    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID)) {
2225        wt2 = FLOAT_SNAN32;
2226        wth2 = FLOAT_SNAN32;
2227    }
2228    return ((uint64_t)wth2 << 32) | wt2;
2229}
2230
2231uint32_t helper_float_cvts_d(uint64_t fdt0)
2232{
2233    uint32_t fst2;
2234
2235    set_float_exception_flags(0, &env->active_fpu.fp_status);
2236    fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
2237    update_fcr31();
2238    return fst2;
2239}
2240
2241uint32_t helper_float_cvts_w(uint32_t wt0)
2242{
2243    uint32_t fst2;
2244
2245    set_float_exception_flags(0, &env->active_fpu.fp_status);
2246    fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
2247    update_fcr31();
2248    return fst2;
2249}
2250
2251uint32_t helper_float_cvts_l(uint64_t dt0)
2252{
2253    uint32_t fst2;
2254
2255    set_float_exception_flags(0, &env->active_fpu.fp_status);
2256    fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
2257    update_fcr31();
2258    return fst2;
2259}
2260
2261uint32_t helper_float_cvts_pl(uint32_t wt0)
2262{
2263    uint32_t wt2;
2264
2265    set_float_exception_flags(0, &env->active_fpu.fp_status);
2266    wt2 = wt0;
2267    update_fcr31();
2268    return wt2;
2269}
2270
2271uint32_t helper_float_cvts_pu(uint32_t wth0)
2272{
2273    uint32_t wt2;
2274
2275    set_float_exception_flags(0, &env->active_fpu.fp_status);
2276    wt2 = wth0;
2277    update_fcr31();
2278    return wt2;
2279}
2280
2281uint32_t helper_float_cvtw_s(uint32_t fst0)
2282{
2283    uint32_t wt2;
2284
2285    set_float_exception_flags(0, &env->active_fpu.fp_status);
2286    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2287    update_fcr31();
2288    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2289        wt2 = FLOAT_SNAN32;
2290    return wt2;
2291}
2292
2293uint32_t helper_float_cvtw_d(uint64_t fdt0)
2294{
2295    uint32_t wt2;
2296
2297    set_float_exception_flags(0, &env->active_fpu.fp_status);
2298    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2299    update_fcr31();
2300    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2301        wt2 = FLOAT_SNAN32;
2302    return wt2;
2303}
2304
2305uint64_t helper_float_roundl_d(uint64_t fdt0)
2306{
2307    uint64_t dt2;
2308
2309    set_float_exception_flags(0, &env->active_fpu.fp_status);
2310    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2311    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2312    RESTORE_ROUNDING_MODE;
2313    update_fcr31();
2314    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2315        dt2 = FLOAT_SNAN64;
2316    return dt2;
2317}
2318
2319uint64_t helper_float_roundl_s(uint32_t fst0)
2320{
2321    uint64_t dt2;
2322
2323    set_float_exception_flags(0, &env->active_fpu.fp_status);
2324    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2325    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2326    RESTORE_ROUNDING_MODE;
2327    update_fcr31();
2328    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2329        dt2 = FLOAT_SNAN64;
2330    return dt2;
2331}
2332
2333uint32_t helper_float_roundw_d(uint64_t fdt0)
2334{
2335    uint32_t wt2;
2336
2337    set_float_exception_flags(0, &env->active_fpu.fp_status);
2338    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2339    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2340    RESTORE_ROUNDING_MODE;
2341    update_fcr31();
2342    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2343        wt2 = FLOAT_SNAN32;
2344    return wt2;
2345}
2346
2347uint32_t helper_float_roundw_s(uint32_t fst0)
2348{
2349    uint32_t wt2;
2350
2351    set_float_exception_flags(0, &env->active_fpu.fp_status);
2352    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2353    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2354    RESTORE_ROUNDING_MODE;
2355    update_fcr31();
2356    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2357        wt2 = FLOAT_SNAN32;
2358    return wt2;
2359}
2360
2361uint64_t helper_float_truncl_d(uint64_t fdt0)
2362{
2363    uint64_t dt2;
2364
2365    set_float_exception_flags(0, &env->active_fpu.fp_status);
2366    dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
2367    update_fcr31();
2368    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2369        dt2 = FLOAT_SNAN64;
2370    return dt2;
2371}
2372
2373uint64_t helper_float_truncl_s(uint32_t fst0)
2374{
2375    uint64_t dt2;
2376
2377    set_float_exception_flags(0, &env->active_fpu.fp_status);
2378    dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
2379    update_fcr31();
2380    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2381        dt2 = FLOAT_SNAN64;
2382    return dt2;
2383}
2384
2385uint32_t helper_float_truncw_d(uint64_t fdt0)
2386{
2387    uint32_t wt2;
2388
2389    set_float_exception_flags(0, &env->active_fpu.fp_status);
2390    wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
2391    update_fcr31();
2392    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2393        wt2 = FLOAT_SNAN32;
2394    return wt2;
2395}
2396
2397uint32_t helper_float_truncw_s(uint32_t fst0)
2398{
2399    uint32_t wt2;
2400
2401    set_float_exception_flags(0, &env->active_fpu.fp_status);
2402    wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
2403    update_fcr31();
2404    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2405        wt2 = FLOAT_SNAN32;
2406    return wt2;
2407}
2408
2409uint64_t helper_float_ceill_d(uint64_t fdt0)
2410{
2411    uint64_t dt2;
2412
2413    set_float_exception_flags(0, &env->active_fpu.fp_status);
2414    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2415    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2416    RESTORE_ROUNDING_MODE;
2417    update_fcr31();
2418    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2419        dt2 = FLOAT_SNAN64;
2420    return dt2;
2421}
2422
2423uint64_t helper_float_ceill_s(uint32_t fst0)
2424{
2425    uint64_t dt2;
2426
2427    set_float_exception_flags(0, &env->active_fpu.fp_status);
2428    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2429    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2430    RESTORE_ROUNDING_MODE;
2431    update_fcr31();
2432    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2433        dt2 = FLOAT_SNAN64;
2434    return dt2;
2435}
2436
2437uint32_t helper_float_ceilw_d(uint64_t fdt0)
2438{
2439    uint32_t wt2;
2440
2441    set_float_exception_flags(0, &env->active_fpu.fp_status);
2442    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2443    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2444    RESTORE_ROUNDING_MODE;
2445    update_fcr31();
2446    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2447        wt2 = FLOAT_SNAN32;
2448    return wt2;
2449}
2450
2451uint32_t helper_float_ceilw_s(uint32_t fst0)
2452{
2453    uint32_t wt2;
2454
2455    set_float_exception_flags(0, &env->active_fpu.fp_status);
2456    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2457    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2458    RESTORE_ROUNDING_MODE;
2459    update_fcr31();
2460    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2461        wt2 = FLOAT_SNAN32;
2462    return wt2;
2463}
2464
2465uint64_t helper_float_floorl_d(uint64_t fdt0)
2466{
2467    uint64_t dt2;
2468
2469    set_float_exception_flags(0, &env->active_fpu.fp_status);
2470    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2471    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2472    RESTORE_ROUNDING_MODE;
2473    update_fcr31();
2474    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2475        dt2 = FLOAT_SNAN64;
2476    return dt2;
2477}
2478
2479uint64_t helper_float_floorl_s(uint32_t fst0)
2480{
2481    uint64_t dt2;
2482
2483    set_float_exception_flags(0, &env->active_fpu.fp_status);
2484    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2485    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2486    RESTORE_ROUNDING_MODE;
2487    update_fcr31();
2488    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2489        dt2 = FLOAT_SNAN64;
2490    return dt2;
2491}
2492
2493uint32_t helper_float_floorw_d(uint64_t fdt0)
2494{
2495    uint32_t wt2;
2496
2497    set_float_exception_flags(0, &env->active_fpu.fp_status);
2498    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2499    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2500    RESTORE_ROUNDING_MODE;
2501    update_fcr31();
2502    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2503        wt2 = FLOAT_SNAN32;
2504    return wt2;
2505}
2506
2507uint32_t helper_float_floorw_s(uint32_t fst0)
2508{
2509    uint32_t wt2;
2510
2511    set_float_exception_flags(0, &env->active_fpu.fp_status);
2512    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2513    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2514    RESTORE_ROUNDING_MODE;
2515    update_fcr31();
2516    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2517        wt2 = FLOAT_SNAN32;
2518    return wt2;
2519}
2520
2521/* unary operations, not modifying fp status  */
2522#define FLOAT_UNOP(name)                                       \
2523uint64_t helper_float_ ## name ## _d(uint64_t fdt0)                \
2524{                                                              \
2525    return float64_ ## name(fdt0);                             \
2526}                                                              \
2527uint32_t helper_float_ ## name ## _s(uint32_t fst0)                \
2528{                                                              \
2529    return float32_ ## name(fst0);                             \
2530}                                                              \
2531uint64_t helper_float_ ## name ## _ps(uint64_t fdt0)               \
2532{                                                              \
2533    uint32_t wt0;                                              \
2534    uint32_t wth0;                                             \
2535                                                               \
2536    wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF);                 \
2537    wth0 = float32_ ## name(fdt0 >> 32);                       \
2538    return ((uint64_t)wth0 << 32) | wt0;                       \
2539}
2540FLOAT_UNOP(abs)
2541FLOAT_UNOP(chs)
2542#undef FLOAT_UNOP
2543
2544/* MIPS specific unary operations */
2545uint64_t helper_float_recip_d(uint64_t fdt0)
2546{
2547    uint64_t fdt2;
2548
2549    set_float_exception_flags(0, &env->active_fpu.fp_status);
2550    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2551    update_fcr31();
2552    return fdt2;
2553}
2554
2555uint32_t helper_float_recip_s(uint32_t fst0)
2556{
2557    uint32_t fst2;
2558
2559    set_float_exception_flags(0, &env->active_fpu.fp_status);
2560    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2561    update_fcr31();
2562    return fst2;
2563}
2564
2565uint64_t helper_float_rsqrt_d(uint64_t fdt0)
2566{
2567    uint64_t fdt2;
2568
2569    set_float_exception_flags(0, &env->active_fpu.fp_status);
2570    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2571    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2572    update_fcr31();
2573    return fdt2;
2574}
2575
2576uint32_t helper_float_rsqrt_s(uint32_t fst0)
2577{
2578    uint32_t fst2;
2579
2580    set_float_exception_flags(0, &env->active_fpu.fp_status);
2581    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2582    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2583    update_fcr31();
2584    return fst2;
2585}
2586
2587uint64_t helper_float_recip1_d(uint64_t fdt0)
2588{
2589    uint64_t fdt2;
2590
2591    set_float_exception_flags(0, &env->active_fpu.fp_status);
2592    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2593    update_fcr31();
2594    return fdt2;
2595}
2596
2597uint32_t helper_float_recip1_s(uint32_t fst0)
2598{
2599    uint32_t fst2;
2600
2601    set_float_exception_flags(0, &env->active_fpu.fp_status);
2602    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2603    update_fcr31();
2604    return fst2;
2605}
2606
2607uint64_t helper_float_recip1_ps(uint64_t fdt0)
2608{
2609    uint32_t fst2;
2610    uint32_t fsth2;
2611
2612    set_float_exception_flags(0, &env->active_fpu.fp_status);
2613    fst2 = float32_div(FLOAT_ONE32, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2614    fsth2 = float32_div(FLOAT_ONE32, fdt0 >> 32, &env->active_fpu.fp_status);
2615    update_fcr31();
2616    return ((uint64_t)fsth2 << 32) | fst2;
2617}
2618
2619uint64_t helper_float_rsqrt1_d(uint64_t fdt0)
2620{
2621    uint64_t fdt2;
2622
2623    set_float_exception_flags(0, &env->active_fpu.fp_status);
2624    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2625    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2626    update_fcr31();
2627    return fdt2;
2628}
2629
2630uint32_t helper_float_rsqrt1_s(uint32_t fst0)
2631{
2632    uint32_t fst2;
2633
2634    set_float_exception_flags(0, &env->active_fpu.fp_status);
2635    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2636    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2637    update_fcr31();
2638    return fst2;
2639}
2640
2641uint64_t helper_float_rsqrt1_ps(uint64_t fdt0)
2642{
2643    uint32_t fst2;
2644    uint32_t fsth2;
2645
2646    set_float_exception_flags(0, &env->active_fpu.fp_status);
2647    fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2648    fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
2649    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2650    fsth2 = float32_div(FLOAT_ONE32, fsth2, &env->active_fpu.fp_status);
2651    update_fcr31();
2652    return ((uint64_t)fsth2 << 32) | fst2;
2653}
2654
2655#define FLOAT_OP(name, p) void helper_float_##name##_##p(void)
2656
2657/* binary operations */
2658#define FLOAT_BINOP(name)                                          \
2659uint64_t helper_float_ ## name ## _d(uint64_t fdt0, uint64_t fdt1)     \
2660{                                                                  \
2661    uint64_t dt2;                                                  \
2662                                                                   \
2663    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2664    dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status);     \
2665    update_fcr31();                                                \
2666    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
2667        dt2 = FLOAT_QNAN64;                                        \
2668    return dt2;                                                    \
2669}                                                                  \
2670                                                                   \
2671uint32_t helper_float_ ## name ## _s(uint32_t fst0, uint32_t fst1)     \
2672{                                                                  \
2673    uint32_t wt2;                                                  \
2674                                                                   \
2675    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2676    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
2677    update_fcr31();                                                \
2678    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
2679        wt2 = FLOAT_QNAN32;                                        \
2680    return wt2;                                                    \
2681}                                                                  \
2682                                                                   \
2683uint64_t helper_float_ ## name ## _ps(uint64_t fdt0, uint64_t fdt1)    \
2684{                                                                  \
2685    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                             \
2686    uint32_t fsth0 = fdt0 >> 32;                                   \
2687    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                             \
2688    uint32_t fsth1 = fdt1 >> 32;                                   \
2689    uint32_t wt2;                                                  \
2690    uint32_t wth2;                                                 \
2691                                                                   \
2692    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2693    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
2694    wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status);  \
2695    update_fcr31();                                                \
2696    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) {              \
2697        wt2 = FLOAT_QNAN32;                                        \
2698        wth2 = FLOAT_QNAN32;                                       \
2699    }                                                              \
2700    return ((uint64_t)wth2 << 32) | wt2;                           \
2701}
2702
2703FLOAT_BINOP(add)
2704FLOAT_BINOP(sub)
2705FLOAT_BINOP(mul)
2706FLOAT_BINOP(div)
2707#undef FLOAT_BINOP
2708
2709/* ternary operations */
2710#define FLOAT_TERNOP(name1, name2)                                        \
2711uint64_t helper_float_ ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1,  \
2712                                           uint64_t fdt2)                 \
2713{                                                                         \
2714    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
2715    return float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
2716}                                                                         \
2717                                                                          \
2718uint32_t helper_float_ ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1,  \
2719                                           uint32_t fst2)                 \
2720{                                                                         \
2721    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2722    return float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2723}                                                                         \
2724                                                                          \
2725uint64_t helper_float_ ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1, \
2726                                            uint64_t fdt2)                \
2727{                                                                         \
2728    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
2729    uint32_t fsth0 = fdt0 >> 32;                                          \
2730    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
2731    uint32_t fsth1 = fdt1 >> 32;                                          \
2732    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
2733    uint32_t fsth2 = fdt2 >> 32;                                          \
2734                                                                          \
2735    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2736    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
2737    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2738    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
2739    return ((uint64_t)fsth2 << 32) | fst2;                                \
2740}
2741
2742FLOAT_TERNOP(mul, add)
2743FLOAT_TERNOP(mul, sub)
2744#undef FLOAT_TERNOP
2745
2746/* negated ternary operations */
2747#define FLOAT_NTERNOP(name1, name2)                                       \
2748uint64_t helper_float_n ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
2749                                           uint64_t fdt2)                 \
2750{                                                                         \
2751    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
2752    fdt2 = float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
2753    return float64_chs(fdt2);                                             \
2754}                                                                         \
2755                                                                          \
2756uint32_t helper_float_n ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
2757                                           uint32_t fst2)                 \
2758{                                                                         \
2759    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2760    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2761    return float32_chs(fst2);                                             \
2762}                                                                         \
2763                                                                          \
2764uint64_t helper_float_n ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1,\
2765                                           uint64_t fdt2)                 \
2766{                                                                         \
2767    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
2768    uint32_t fsth0 = fdt0 >> 32;                                          \
2769    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
2770    uint32_t fsth1 = fdt1 >> 32;                                          \
2771    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
2772    uint32_t fsth2 = fdt2 >> 32;                                          \
2773                                                                          \
2774    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2775    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
2776    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2777    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
2778    fst2 = float32_chs(fst2);                                             \
2779    fsth2 = float32_chs(fsth2);                                           \
2780    return ((uint64_t)fsth2 << 32) | fst2;                                \
2781}
2782
2783FLOAT_NTERNOP(mul, add)
2784FLOAT_NTERNOP(mul, sub)
2785#undef FLOAT_NTERNOP
2786
2787/* MIPS specific binary operations */
2788uint64_t helper_float_recip2_d(uint64_t fdt0, uint64_t fdt2)
2789{
2790    set_float_exception_flags(0, &env->active_fpu.fp_status);
2791    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
2792    fdt2 = float64_chs(float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status));
2793    update_fcr31();
2794    return fdt2;
2795}
2796
2797uint32_t helper_float_recip2_s(uint32_t fst0, uint32_t fst2)
2798{
2799    set_float_exception_flags(0, &env->active_fpu.fp_status);
2800    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2801    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
2802    update_fcr31();
2803    return fst2;
2804}
2805
2806uint64_t helper_float_recip2_ps(uint64_t fdt0, uint64_t fdt2)
2807{
2808    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2809    uint32_t fsth0 = fdt0 >> 32;
2810    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
2811    uint32_t fsth2 = fdt2 >> 32;
2812
2813    set_float_exception_flags(0, &env->active_fpu.fp_status);
2814    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2815    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
2816    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
2817    fsth2 = float32_chs(float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status));
2818    update_fcr31();
2819    return ((uint64_t)fsth2 << 32) | fst2;
2820}
2821
2822uint64_t helper_float_rsqrt2_d(uint64_t fdt0, uint64_t fdt2)
2823{
2824    set_float_exception_flags(0, &env->active_fpu.fp_status);
2825    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
2826    fdt2 = float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status);
2827    fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
2828    update_fcr31();
2829    return fdt2;
2830}
2831
2832uint32_t helper_float_rsqrt2_s(uint32_t fst0, uint32_t fst2)
2833{
2834    set_float_exception_flags(0, &env->active_fpu.fp_status);
2835    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2836    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
2837    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
2838    update_fcr31();
2839    return fst2;
2840}
2841
2842uint64_t helper_float_rsqrt2_ps(uint64_t fdt0, uint64_t fdt2)
2843{
2844    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2845    uint32_t fsth0 = fdt0 >> 32;
2846    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
2847    uint32_t fsth2 = fdt2 >> 32;
2848
2849    set_float_exception_flags(0, &env->active_fpu.fp_status);
2850    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2851    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
2852    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
2853    fsth2 = float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status);
2854    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
2855    fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
2856    update_fcr31();
2857    return ((uint64_t)fsth2 << 32) | fst2;
2858}
2859
2860uint64_t helper_float_addr_ps(uint64_t fdt0, uint64_t fdt1)
2861{
2862    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2863    uint32_t fsth0 = fdt0 >> 32;
2864    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
2865    uint32_t fsth1 = fdt1 >> 32;
2866    uint32_t fst2;
2867    uint32_t fsth2;
2868
2869    set_float_exception_flags(0, &env->active_fpu.fp_status);
2870    fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
2871    fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
2872    update_fcr31();
2873    return ((uint64_t)fsth2 << 32) | fst2;
2874}
2875
2876uint64_t helper_float_mulr_ps(uint64_t fdt0, uint64_t fdt1)
2877{
2878    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2879    uint32_t fsth0 = fdt0 >> 32;
2880    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
2881    uint32_t fsth1 = fdt1 >> 32;
2882    uint32_t fst2;
2883    uint32_t fsth2;
2884
2885    set_float_exception_flags(0, &env->active_fpu.fp_status);
2886    fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
2887    fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
2888    update_fcr31();
2889    return ((uint64_t)fsth2 << 32) | fst2;
2890}
2891
2892/* compare operations */
2893#define FOP_COND_D(op, cond)                                   \
2894void helper_cmp_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
2895{                                                              \
2896    int c = cond;                                              \
2897    update_fcr31();                                            \
2898    if (c)                                                     \
2899        SET_FP_COND(cc, env->active_fpu);                      \
2900    else                                                       \
2901        CLEAR_FP_COND(cc, env->active_fpu);                    \
2902}                                                              \
2903void helper_cmpabs_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
2904{                                                              \
2905    int c;                                                     \
2906    fdt0 = float64_abs(fdt0);                                  \
2907    fdt1 = float64_abs(fdt1);                                  \
2908    c = cond;                                                  \
2909    update_fcr31();                                            \
2910    if (c)                                                     \
2911        SET_FP_COND(cc, env->active_fpu);                      \
2912    else                                                       \
2913        CLEAR_FP_COND(cc, env->active_fpu);                    \
2914}
2915
2916static int float64_is_unordered(int sig, float64 a, float64 b STATUS_PARAM)
2917{
2918    if (float64_is_signaling_nan(a) ||
2919        float64_is_signaling_nan(b) ||
2920        (sig && (float64_is_any_nan(a) || float64_is_any_nan(b)))) {
2921        float_raise(float_flag_invalid, status);
2922        return 1;
2923    } else if (float64_is_any_nan(a) || float64_is_any_nan(b)) {
2924        return 1;
2925    } else {
2926        return 0;
2927    }
2928}
2929
2930/* NOTE: the comma operator will make "cond" to eval to false,
2931 * but float*_is_unordered() is still called. */
2932FOP_COND_D(f,   (float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status), 0))
2933FOP_COND_D(un,  float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status))
2934FOP_COND_D(eq,  !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2935FOP_COND_D(ueq, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2936FOP_COND_D(olt, !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2937FOP_COND_D(ult, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2938FOP_COND_D(ole, !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2939FOP_COND_D(ule, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2940/* NOTE: the comma operator will make "cond" to eval to false,
2941 * but float*_is_unordered() is still called. */
2942FOP_COND_D(sf,  (float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status), 0))
2943FOP_COND_D(ngle,float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status))
2944FOP_COND_D(seq, !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2945FOP_COND_D(ngl, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2946FOP_COND_D(lt,  !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2947FOP_COND_D(nge, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2948FOP_COND_D(le,  !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2949FOP_COND_D(ngt, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2950
2951#define FOP_COND_S(op, cond)                                   \
2952void helper_cmp_s_ ## op (uint32_t fst0, uint32_t fst1, int cc)    \
2953{                                                              \
2954    int c = cond;                                              \
2955    update_fcr31();                                            \
2956    if (c)                                                     \
2957        SET_FP_COND(cc, env->active_fpu);                      \
2958    else                                                       \
2959        CLEAR_FP_COND(cc, env->active_fpu);                    \
2960}                                                              \
2961void helper_cmpabs_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
2962{                                                              \
2963    int c;                                                     \
2964    fst0 = float32_abs(fst0);                                  \
2965    fst1 = float32_abs(fst1);                                  \
2966    c = cond;                                                  \
2967    update_fcr31();                                            \
2968    if (c)                                                     \
2969        SET_FP_COND(cc, env->active_fpu);                      \
2970    else                                                       \
2971        CLEAR_FP_COND(cc, env->active_fpu);                    \
2972}
2973
2974static flag float32_is_unordered(int sig, float32 a, float32 b STATUS_PARAM)
2975{
2976    if (float32_is_signaling_nan(a) ||
2977        float32_is_signaling_nan(b) ||
2978        (sig && (float32_is_any_nan(a) || float32_is_any_nan(b)))) {
2979        float_raise(float_flag_invalid, status);
2980        return 1;
2981    } else if (float32_is_any_nan(a) || float32_is_any_nan(b)) {
2982        return 1;
2983    } else {
2984        return 0;
2985    }
2986}
2987
2988/* NOTE: the comma operator will make "cond" to eval to false,
2989 * but float*_is_unordered() is still called. */
2990FOP_COND_S(f,   (float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status), 0))
2991FOP_COND_S(un,  float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status))
2992FOP_COND_S(eq,  !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2993FOP_COND_S(ueq, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2994FOP_COND_S(olt, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2995FOP_COND_S(ult, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2996FOP_COND_S(ole, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_le(fst0, fst1, &env->active_fpu.fp_status))
2997FOP_COND_S(ule, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_le(fst0, fst1, &env->active_fpu.fp_status))
2998/* NOTE: the comma operator will make "cond" to eval to false,
2999 * but float*_is_unordered() is still called. */
3000FOP_COND_S(sf,  (float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status), 0))
3001FOP_COND_S(ngle,float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status))
3002FOP_COND_S(seq, !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3003FOP_COND_S(ngl, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3004FOP_COND_S(lt,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3005FOP_COND_S(nge, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3006FOP_COND_S(le,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_le(fst0, fst1, &env->active_fpu.fp_status))
3007FOP_COND_S(ngt, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_le(fst0, fst1, &env->active_fpu.fp_status))
3008
3009#define FOP_COND_PS(op, condl, condh)                           \
3010void helper_cmp_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
3011{                                                               \
3012    uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF);             \
3013    uint32_t fsth0 = float32_abs(fdt0 >> 32);                   \
3014    uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF);             \
3015    uint32_t fsth1 = float32_abs(fdt1 >> 32);                   \
3016    int cl = condl;                                             \
3017    int ch = condh;                                             \
3018                                                                \
3019    update_fcr31();                                             \
3020    if (cl)                                                     \
3021        SET_FP_COND(cc, env->active_fpu);                       \
3022    else                                                        \
3023        CLEAR_FP_COND(cc, env->active_fpu);                     \
3024    if (ch)                                                     \
3025        SET_FP_COND(cc + 1, env->active_fpu);                   \
3026    else                                                        \
3027        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
3028}                                                               \
3029void helper_cmpabs_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3030{                                                               \
3031    uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF);             \
3032    uint32_t fsth0 = float32_abs(fdt0 >> 32);                   \
3033    uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF);             \
3034    uint32_t fsth1 = float32_abs(fdt1 >> 32);                   \
3035    int cl = condl;                                             \
3036    int ch = condh;                                             \
3037                                                                \
3038    update_fcr31();                                             \
3039    if (cl)                                                     \
3040        SET_FP_COND(cc, env->active_fpu);                       \
3041    else                                                        \
3042        CLEAR_FP_COND(cc, env->active_fpu);                     \
3043    if (ch)                                                     \
3044        SET_FP_COND(cc + 1, env->active_fpu);                   \
3045    else                                                        \
3046        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
3047}
3048
3049/* NOTE: the comma operator will make "cond" to eval to false,
3050 * but float*_is_unordered() is still called. */
3051FOP_COND_PS(f,   (float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status), 0),
3052                 (float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status), 0))
3053FOP_COND_PS(un,  float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status),
3054                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status))
3055FOP_COND_PS(eq,  !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3056                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3057FOP_COND_PS(ueq, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3058                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3059FOP_COND_PS(olt, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3060                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3061FOP_COND_PS(ult, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3062                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3063FOP_COND_PS(ole, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_le(fst0, fst1, &env->active_fpu.fp_status),
3064                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3065FOP_COND_PS(ule, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_le(fst0, fst1, &env->active_fpu.fp_status),
3066                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3067/* NOTE: the comma operator will make "cond" to eval to false,
3068 * but float*_is_unordered() is still called. */
3069FOP_COND_PS(sf,  (float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status), 0),
3070                 (float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status), 0))
3071FOP_COND_PS(ngle,float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status),
3072                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status))
3073FOP_COND_PS(seq, !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3074                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3075FOP_COND_PS(ngl, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3076                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3077FOP_COND_PS(lt,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3078                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3079FOP_COND_PS(nge, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3080                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3081FOP_COND_PS(le,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_le(fst0, fst1, &env->active_fpu.fp_status),
3082                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3083FOP_COND_PS(ngt, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_le(fst0, fst1, &env->active_fpu.fp_status),
3084                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3085