1/*
2 * Contains the system-call and fault low-level handling routines.
3 * This also contains the timer-interrupt handler, as well as all
4 * interrupts and faults that can result in a task-switch.
5 *
6 * Copyright 2005-2009 Analog Devices Inc.
7 *
8 * Licensed under the GPL-2 or later.
9 */
10
11/* NOTE: This code handles signal-recognition, which happens every time
12 * after a timer-interrupt and after each system call.
13 */
14
15#include <linux/init.h>
16#include <linux/linkage.h>
17#include <linux/unistd.h>
18#include <asm/blackfin.h>
19#include <asm/errno.h>
20#include <asm/fixed_code.h>
21#include <asm/thread_info.h>  /* TIF_NEED_RESCHED */
22#include <asm/asm-offsets.h>
23#include <asm/trace.h>
24#include <asm/traps.h>
25
26#include <asm/context.S>
27
28#if defined(CONFIG_BFIN_SCRATCH_REG_RETN)
29# define EX_SCRATCH_REG RETN
30#elif defined(CONFIG_BFIN_SCRATCH_REG_RETE)
31# define EX_SCRATCH_REG RETE
32#else
33# define EX_SCRATCH_REG CYCLES
34#endif
35
36#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
37.section .l1.text
38#else
39.text
40#endif
41
42/* Slightly simplified and streamlined entry point for CPLB misses.
43 * This one does not lower the level to IRQ5, and thus can be used to
44 * patch up CPLB misses on the kernel stack.
45 */
46#if ANOMALY_05000261
47#define _ex_dviol _ex_workaround_261
48#define _ex_dmiss _ex_workaround_261
49#define _ex_dmult _ex_workaround_261
50
51ENTRY(_ex_workaround_261)
52	/*
53	 * Work around an anomaly: if we see a new DCPLB fault, return
54	 * without doing anything.  Then, if we get the same fault again,
55	 * handle it.
56	 */
57	P4 = R7;	/* Store EXCAUSE */
58
59	GET_PDA(p5, r7);
60	r7 = [p5 + PDA_LFRETX];
61	r6 = retx;
62	[p5 + PDA_LFRETX] = r6;
63	cc = r6 == r7;
64	if !cc jump _bfin_return_from_exception;
65	/* fall through */
66	R7 = P4;
67	R6 = VEC_CPLB_M;	/* Data CPLB Miss */
68	cc = R6 == R7;
69	if cc jump _ex_dcplb_miss (BP);
70#ifdef CONFIG_MPU
71	R6 = VEC_CPLB_VL;	/* Data CPLB Violation */
72	cc = R6 == R7;
73	if cc jump _ex_dcplb_viol (BP);
74#endif
75	/* Handle Data CPLB Protection Violation
76	 * and Data CPLB Multiple Hits - Linux Trap Zero
77	 */
78	jump _ex_trap_c;
79ENDPROC(_ex_workaround_261)
80
81#else
82#ifdef CONFIG_MPU
83#define _ex_dviol _ex_dcplb_viol
84#else
85#define _ex_dviol _ex_trap_c
86#endif
87#define _ex_dmiss _ex_dcplb_miss
88#define _ex_dmult _ex_trap_c
89#endif
90
91
92ENTRY(_ex_dcplb_viol)
93ENTRY(_ex_dcplb_miss)
94ENTRY(_ex_icplb_miss)
95	(R7:6,P5:4) = [sp++];
96	/* We leave the previously pushed ASTAT on the stack.  */
97	SAVE_CONTEXT_CPLB
98
99	/* We must load R1 here, _before_ DEBUG_HWTRACE_SAVE, since that
100	 * will change the stack pointer.  */
101	R0 = SEQSTAT;
102	R1 = SP;
103
104	DEBUG_HWTRACE_SAVE(p5, r7)
105
106	sp += -12;
107	call _cplb_hdr;
108	sp += 12;
109	CC = R0 == 0;
110	IF !CC JUMP _handle_bad_cplb;
111
112#ifdef CONFIG_DEBUG_DOUBLEFAULT
113	/* While we were processing this, did we double fault? */
114	r7 = SEQSTAT;           /* reason code is in bit 5:0 */
115	r6.l = lo(SEQSTAT_EXCAUSE);
116	r6.h = hi(SEQSTAT_EXCAUSE);
117	r7 = r7 & r6;
118	r6 = 0x25;
119	CC = R7 == R6;
120	if CC JUMP _double_fault;
121#endif
122
123	DEBUG_HWTRACE_RESTORE(p5, r7)
124	RESTORE_CONTEXT_CPLB
125	ASTAT = [SP++];
126	SP = EX_SCRATCH_REG;
127	rtx;
128ENDPROC(_ex_icplb_miss)
129
130ENTRY(_ex_syscall)
131	raise 15;		/* invoked by TRAP #0, for sys call */
132	jump.s _bfin_return_from_exception;
133ENDPROC(_ex_syscall)
134
135ENTRY(_ex_single_step)
136	/* If we just returned from an interrupt, the single step event is
137	   for the RTI instruction.  */
138	r7 = retx;
139	r6 = reti;
140	cc = r7 == r6;
141	if cc jump _bfin_return_from_exception;
142
143#ifdef CONFIG_KGDB
144	/* Don't do single step in hardware exception handler */
145        p5.l = lo(IPEND);
146        p5.h = hi(IPEND);
147	r6 = [p5];
148	cc = bittst(r6, 4);
149	if cc jump _bfin_return_from_exception;
150	cc = bittst(r6, 5);
151	if cc jump _bfin_return_from_exception;
152
153	/* skip single step if current interrupt priority is higher than
154	 * that of the first instruction, from which gdb starts single step */
155	r6 >>= 6;
156	r7 = 10;
157.Lfind_priority_start:
158	cc = bittst(r6, 0);
159	if cc jump .Lfind_priority_done;
160	r6 >>= 1;
161	r7 += -1;
162	cc = r7 == 0;
163	if cc jump .Lfind_priority_done;
164	jump.s .Lfind_priority_start;
165.Lfind_priority_done:
166	p4.l = _kgdb_single_step;
167	p4.h = _kgdb_single_step;
168	r6 = [p4];
169	cc = r6 == 0;
170	if cc jump .Ldo_single_step;
171	r6 += -1;
172	cc = r6 < r7;
173	if cc jump 1f;
174.Ldo_single_step:
175#else
176	/* If we were in user mode, do the single step normally.  */
177	p5.l = lo(IPEND);
178	p5.h = hi(IPEND);
179	r6 = [p5];
180	r7 = 0xffe0 (z);
181	r7 = r7 & r6;
182	cc = r7 == 0;
183	if !cc jump 1f;
184#endif
185#ifdef CONFIG_EXACT_HWERR
186	/* Read the ILAT, and to check to see if the process we are
187	 * single stepping caused a previous hardware error
188	 * If so, do not single step, (which lowers to IRQ5, and makes
189	 * us miss the error).
190	 */
191	p5.l = lo(ILAT);
192	p5.h = hi(ILAT);
193	r7 = [p5];
194	cc = bittst(r7, EVT_IVHW_P);
195	if cc jump 1f;
196#endif
197	/* Single stepping only a single instruction, so clear the trace
198	 * bit here.  */
199	r7 = syscfg;
200	bitclr (r7, SYSCFG_SSSTEP_P);
201	syscfg = R7;
202	jump _ex_trap_c;
203
2041:
205	/*
206	 * We were in an interrupt handler.  By convention, all of them save
207	 * SYSCFG with their first instruction, so by checking whether our
208	 * RETX points at the entry point, we can determine whether to allow
209	 * a single step, or whether to clear SYSCFG.
210	 *
211	 * First, find out the interrupt level and the event vector for it.
212	 */
213	p5.l = lo(EVT0);
214	p5.h = hi(EVT0);
215	p5 += -4;
2162:
217	r7 = rot r7 by -1;
218	p5 += 4;
219	if !cc jump 2b;
220
221	/* What we actually do is test for the _second_ instruction in the
222	 * IRQ handler.  That way, if there are insns following the restore
223	 * of SYSCFG after leaving the handler, we will not turn off SYSCFG
224	 * for them.  */
225
226	r7 = [p5];
227	r7 += 2;
228	r6 = RETX;
229	cc = R7 == R6;
230	if !cc jump _bfin_return_from_exception;
231
232	r7 = syscfg;
233	bitclr (r7, SYSCFG_SSSTEP_P);	/* Turn off single step */
234	syscfg = R7;
235
236	/* Fall through to _bfin_return_from_exception.  */
237ENDPROC(_ex_single_step)
238
239ENTRY(_bfin_return_from_exception)
240#if ANOMALY_05000257
241	R7=LC0;
242	LC0=R7;
243	R7=LC1;
244	LC1=R7;
245#endif
246
247#ifdef CONFIG_DEBUG_DOUBLEFAULT
248	/* While we were processing the current exception,
249	 * did we cause another, and double fault?
250	 */
251	r7 = SEQSTAT;           /* reason code is in bit 5:0 */
252	r6.l = lo(SEQSTAT_EXCAUSE);
253	r6.h = hi(SEQSTAT_EXCAUSE);
254	r7 = r7 & r6;
255	r6 = VEC_UNCOV;
256	CC = R7 == R6;
257	if CC JUMP _double_fault;
258#endif
259
260	(R7:6,P5:4) = [sp++];
261	ASTAT = [sp++];
262	sp = EX_SCRATCH_REG;
263	rtx;
264ENDPROC(_bfin_return_from_exception)
265
266ENTRY(_handle_bad_cplb)
267	DEBUG_HWTRACE_RESTORE(p5, r7)
268	/* To get here, we just tried and failed to change a CPLB
269	 * so, handle things in trap_c (C code), by lowering to
270	 * IRQ5, just like we normally do. Since this is not a
271	 * "normal" return path, we have a do a lot of stuff to
272	 * the stack to get ready so, we can fall through - we
273	 * need to make a CPLB exception look like a normal exception
274	 */
275	RESTORE_CONTEXT_CPLB
276	/* ASTAT is still on the stack, where it is needed.  */
277	[--sp] = (R7:6,P5:4);
278
279ENTRY(_ex_replaceable)
280	nop;
281
282ENTRY(_ex_trap_c)
283	/* The only thing that has been saved in this context is
284	 * (R7:6,P5:4), ASTAT & SP - don't use anything else
285	 */
286
287	GET_PDA(p5, r6);
288
289	/* Make sure we are not in a double fault */
290	p4.l = lo(IPEND);
291	p4.h = hi(IPEND);
292	r7 = [p4];
293	CC = BITTST (r7, 5);
294	if CC jump _double_fault;
295	[p5 + PDA_EXIPEND] = r7;
296
297	/* Call C code (trap_c) to handle the exception, which most
298	 * likely involves sending a signal to the current process.
299	 * To avoid double faults, lower our priority to IRQ5 first.
300	 */
301	r7.h = _exception_to_level5;
302	r7.l = _exception_to_level5;
303	p4.l = lo(EVT5);
304	p4.h = hi(EVT5);
305	[p4] = r7;
306	csync;
307
308	/*
309	 * Save these registers, as they are only valid in exception context
310	 *  (where we are now - as soon as we defer to IRQ5, they can change)
311	 * DCPLB_STATUS and ICPLB_STATUS are also only valid in EVT3,
312	 * but they are not very interesting, so don't save them
313	 */
314
315	p4.l = lo(DCPLB_FAULT_ADDR);
316	p4.h = hi(DCPLB_FAULT_ADDR);
317	r7 = [p4];
318	[p5 + PDA_DCPLB] = r7;
319
320	p4.l = lo(ICPLB_FAULT_ADDR);
321	p4.h = hi(ICPLB_FAULT_ADDR);
322	r6 = [p4];
323	[p5 + PDA_ICPLB] = r6;
324
325	r6 = retx;
326	[p5 + PDA_RETX] = r6;
327
328	r6 = SEQSTAT;
329	[p5 + PDA_SEQSTAT] = r6;
330
331	/* Save the state of single stepping */
332	r6 = SYSCFG;
333	[p5 + PDA_SYSCFG] = r6;
334	/* Clear it while we handle the exception in IRQ5 mode */
335	BITCLR(r6, SYSCFG_SSSTEP_P);
336	SYSCFG = r6;
337
338	/* Save the current IMASK, since we change in order to jump to level 5 */
339	cli r6;
340	[p5 + PDA_EXIMASK] = r6;
341
342	p4.l = lo(SAFE_USER_INSTRUCTION);
343	p4.h = hi(SAFE_USER_INSTRUCTION);
344	retx = p4;
345
346	/* Disable all interrupts, but make sure level 5 is enabled so
347	 * we can switch to that level.
348	 */
349	r6 = 0x3f;
350	sti r6;
351
352	/* In case interrupts are disabled IPEND[4] (global interrupt disable bit)
353	 * clear it (re-enabling interrupts again) by the special sequence of pushing
354	 * RETI onto the stack.  This way we can lower ourselves to IVG5 even if the
355	 * exception was taken after the interrupt handler was called but before it
356	 * got a chance to enable global interrupts itself.
357	 */
358	[--sp] = reti;
359	sp += 4;
360
361	raise 5;
362	jump.s _bfin_return_from_exception;
363ENDPROC(_ex_trap_c)
364
365/* We just realized we got an exception, while we were processing a different
366 * exception. This is a unrecoverable event, so crash.
367 * Note: this cannot be ENTRY() as we jump here with "if cc jump" ...
368 */
369ENTRY(_double_fault)
370	/* Turn caches & protection off, to ensure we don't get any more
371	 * double exceptions
372	 */
373
374	P4.L = LO(IMEM_CONTROL);
375	P4.H = HI(IMEM_CONTROL);
376
377	R5 = [P4];              /* Control Register*/
378	BITCLR(R5,ENICPLB_P);
379	CSYNC;          /* Disabling of CPLBs should be proceeded by a CSYNC */
380	[P4] = R5;
381	SSYNC;
382
383	P4.L = LO(DMEM_CONTROL);
384	P4.H = HI(DMEM_CONTROL);
385	R5 = [P4];
386	BITCLR(R5,ENDCPLB_P);
387	CSYNC;          /* Disabling of CPLBs should be proceeded by a CSYNC */
388	[P4] = R5;
389	SSYNC;
390
391	/* Fix up the stack */
392	(R7:6,P5:4) = [sp++];
393	ASTAT = [sp++];
394	SP = EX_SCRATCH_REG;
395
396	/* We should be out of the exception stack, and back down into
397	 * kernel or user space stack
398	 */
399	SAVE_ALL_SYS
400
401	/* The dumping functions expect the return address in the RETI
402	 * slot.  */
403	r6 = retx;
404	[sp + PT_PC] = r6;
405
406	r0 = sp;        /* stack frame pt_regs pointer argument ==> r0 */
407	SP += -12;
408	pseudo_long_call _double_fault_c, p5;
409	SP += 12;
410.L_double_fault_panic:
411        JUMP .L_double_fault_panic
412
413ENDPROC(_double_fault)
414
415ENTRY(_exception_to_level5)
416	SAVE_ALL_SYS
417
418	GET_PDA(p5, r7);        /* Fetch current PDA */
419	r6 = [p5 + PDA_RETX];
420	[sp + PT_PC] = r6;
421
422	r6 = [p5 + PDA_SYSCFG];
423	[sp + PT_SYSCFG] = r6;
424
425	r6 = [p5 + PDA_SEQSTAT]; /* Read back seqstat */
426	[sp + PT_SEQSTAT] = r6;
427
428	/* Restore the hardware error vector.  */
429	r7.h = _evt_ivhw;
430	r7.l = _evt_ivhw;
431	p4.l = lo(EVT5);
432	p4.h = hi(EVT5);
433	[p4] = r7;
434	csync;
435
436#ifdef CONFIG_DEBUG_DOUBLEFAULT
437	/* Now that we have the hardware error vector programmed properly
438	 * we can re-enable interrupts (IPEND[4]), so if the _trap_c causes
439	 * another hardware error, we can catch it (self-nesting).
440	 */
441	[--sp] = reti;
442	sp += 4;
443#endif
444
445	r7 = [p5 + PDA_EXIPEND]	/* Read the IPEND from the Exception state */
446	[sp + PT_IPEND] = r7;   /* Store IPEND onto the stack */
447
448	r0 = sp; 	/* stack frame pt_regs pointer argument ==> r0 */
449	SP += -12;
450	pseudo_long_call _trap_c, p4;
451	SP += 12;
452
453	/* If interrupts were off during the exception (IPEND[4] = 1), turn them off
454	 * before we return.
455	 */
456	CC = BITTST(r7, EVT_IRPTEN_P)
457	if !CC jump 1f;
458	/* this will load a random value into the reti register - but that is OK,
459	 * since we do restore it to the correct value in the 'RESTORE_ALL_SYS' macro
460	 */
461	sp += -4;
462	reti = [sp++];
4631:
464	/* restore the interrupt mask (IMASK) */
465	r6 = [p5 + PDA_EXIMASK];
466	sti r6;
467
468	call _ret_from_exception;
469	RESTORE_ALL_SYS
470	rti;
471ENDPROC(_exception_to_level5)
472
473ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
474	/* Since the kernel stack can be anywhere, it's not guaranteed to be
475	 * covered by a CPLB.  Switch to an exception stack; use RETN as a
476	 * scratch register (for want of a better option).
477	 */
478	EX_SCRATCH_REG = sp;
479	GET_PDA_SAFE(sp);
480	sp = [sp + PDA_EXSTACK];
481	/* Try to deal with syscalls quickly.  */
482	[--sp] = ASTAT;
483	[--sp] = (R7:6,P5:4);
484
485	ANOMALY_283_315_WORKAROUND(p5, r7)
486
487#ifdef CONFIG_EXACT_HWERR
488	/* Make sure all pending read/writes complete. This will ensure any
489	 * accesses which could cause hardware errors completes, and signal
490	 * the the hardware before we do something silly, like crash the
491	 * kernel. We don't need to work around anomaly 05000312, since
492	 * we are already atomic
493	 */
494	ssync;
495#endif
496
497#ifdef CONFIG_DEBUG_DOUBLEFAULT
498	/*
499	 * Save these registers, as they are only valid in exception context
500	 * (where we are now - as soon as we defer to IRQ5, they can change)
501	 * DCPLB_STATUS and ICPLB_STATUS are also only valid in EVT3,
502	 * but they are not very interesting, so don't save them
503	 */
504
505	GET_PDA(p5, r7);
506	p4.l = lo(DCPLB_FAULT_ADDR);
507	p4.h = hi(DCPLB_FAULT_ADDR);
508	r7 = [p4];
509	[p5 + PDA_DF_DCPLB] = r7;
510
511	p4.l = lo(ICPLB_FAULT_ADDR);
512	p4.h = hi(ICPLB_FAULT_ADDR);
513	r7 = [p4];
514	[p5 + PDA_DF_ICPLB] = r7;
515
516	r7 = retx;
517	[p5 + PDA_DF_RETX] = r7;
518
519	r7 = SEQSTAT;		/* reason code is in bit 5:0 */
520	[p5 + PDA_DF_SEQSTAT] = r7;
521#else
522	r7 = SEQSTAT;           /* reason code is in bit 5:0 */
523#endif
524	r6.l = lo(SEQSTAT_EXCAUSE);
525	r6.h = hi(SEQSTAT_EXCAUSE);
526	r7 = r7 & r6;
527	p5.h = _ex_table;
528	p5.l = _ex_table;
529	p4 = r7;
530	p5 = p5 + (p4 << 2);
531	p4 = [p5];
532	jump (p4);
533
534.Lbadsys:
535	r7 = -ENOSYS; 		/* signextending enough */
536	[sp + PT_R0] = r7;	/* return value from system call */
537	jump .Lsyscall_really_exit;
538ENDPROC(_trap)
539
540ENTRY(_kernel_execve)
541	link SIZEOF_PTREGS;
542	p0 = sp;
543	r3 = SIZEOF_PTREGS / 4;
544	r4 = 0(x);
545.Lclear_regs:
546	[p0++] = r4;
547	r3 += -1;
548	cc = r3 == 0;
549	if !cc jump .Lclear_regs (bp);
550
551	p0 = sp;
552	sp += -16;
553	[sp + 12] = p0;
554	pseudo_long_call _do_execve, p5;
555	SP += 16;
556	cc = r0 == 0;
557	if ! cc jump .Lexecve_failed;
558	/* Success.  Copy our temporary pt_regs to the top of the kernel
559	 * stack and do a normal exception return.
560	 */
561	r1 = sp;
562	r0 = (-KERNEL_STACK_SIZE) (x);
563	r1 = r1 & r0;
564	p2 = r1;
565	p3 = [p2];
566	r0 = KERNEL_STACK_SIZE - 4 (z);
567	p1 = r0;
568	p1 = p1 + p2;
569
570	p0 = fp;
571	r4 = [p0--];
572	r3 = SIZEOF_PTREGS / 4;
573.Lcopy_regs:
574	r4 = [p0--];
575	[p1--] = r4;
576	r3 += -1;
577	cc = r3 == 0;
578	if ! cc jump .Lcopy_regs (bp);
579
580	r0 = (KERNEL_STACK_SIZE - SIZEOF_PTREGS) (z);
581	p1 = r0;
582	p1 = p1 + p2;
583	sp = p1;
584	r0 = syscfg;
585	[SP + PT_SYSCFG] = r0;
586	[p3 + (TASK_THREAD + THREAD_KSP)] = sp;
587
588	RESTORE_CONTEXT;
589	rti;
590.Lexecve_failed:
591	unlink;
592	rts;
593ENDPROC(_kernel_execve)
594
595ENTRY(_system_call)
596	/* Store IPEND */
597	p2.l = lo(IPEND);
598	p2.h = hi(IPEND);
599	csync;
600	r0 = [p2];
601	[sp + PT_IPEND] = r0;
602
603	/* Store RETS for now */
604	r0 = rets;
605	[sp + PT_RESERVED] = r0;
606	/* Set the stack for the current process */
607	r7 = sp;
608	r6.l = lo(ALIGN_PAGE_MASK);
609	r6.h = hi(ALIGN_PAGE_MASK);
610	r7 = r7 & r6;  		/* thread_info */
611	p2 = r7;
612	p2 = [p2];
613
614	[p2+(TASK_THREAD+THREAD_KSP)] = sp;
615#ifdef CONFIG_IPIPE
616	r0 = sp;
617	SP += -12;
618	pseudo_long_call ___ipipe_syscall_root, p0;
619	SP += 12;
620	cc = r0 == 1;
621	if cc jump .Lsyscall_really_exit;
622	cc = r0 == -1;
623	if cc jump .Lresume_userspace;
624	r3 = [sp + PT_R3];
625	r4 = [sp + PT_R4];
626	p0 = [sp + PT_ORIG_P0];
627#endif /* CONFIG_IPIPE */
628
629	/* are we tracing syscalls?*/
630	r7 = sp;
631	r6.l = lo(ALIGN_PAGE_MASK);
632	r6.h = hi(ALIGN_PAGE_MASK);
633	r7 = r7 & r6;
634	p2 = r7;
635	r7 = [p2+TI_FLAGS];
636	CC = BITTST(r7,TIF_SYSCALL_TRACE);
637	if CC JUMP _sys_trace;
638	CC = BITTST(r7,TIF_SINGLESTEP);
639	if CC JUMP _sys_trace;
640
641	/* Make sure the system call # is valid */
642	p4 = __NR_syscall;
643	/* System call number is passed in P0 */
644	cc = p4 <= p0;
645	if cc jump .Lbadsys;
646
647	/* Execute the appropriate system call */
648
649	p4 = p0;
650	p5.l = _sys_call_table;
651	p5.h = _sys_call_table;
652	p5 = p5 + (p4 << 2);
653	r0 = [sp + PT_R0];
654	r1 = [sp + PT_R1];
655	r2 = [sp + PT_R2];
656	p5 = [p5];
657
658	[--sp] = r5;
659	[--sp] = r4;
660	[--sp] = r3;
661	SP += -12;
662	call (p5);
663	SP += 24;
664	[sp + PT_R0] = r0;
665
666.Lresume_userspace:
667	r7 = sp;
668	r4.l = lo(ALIGN_PAGE_MASK);
669	r4.h = hi(ALIGN_PAGE_MASK);
670	r7 = r7 & r4;		/* thread_info->flags */
671	p5 = r7;
672.Lresume_userspace_1:
673	/* Disable interrupts.  */
674	[--sp] = reti;
675	reti = [sp++];
676
677	r7 = [p5 + TI_FLAGS];
678	r4.l = lo(_TIF_WORK_MASK);
679	r4.h = hi(_TIF_WORK_MASK);
680	r7 =  r7 & r4;
681
682.Lsyscall_resched:
683#ifdef CONFIG_IPIPE
684	cc = BITTST(r7, TIF_IRQ_SYNC);
685	if !cc jump .Lsyscall_no_irqsync;
686	/*
687	 * Clear IPEND[4] manually to undo what resume_userspace_1 just did;
688	 * we need this so that high priority domain interrupts may still
689	 * preempt the current domain while the pipeline log is being played
690	 * back.
691	 */
692	[--sp] = reti;
693	SP += 4; /* don't merge with next insn to keep the pattern obvious */
694	SP += -12;
695	pseudo_long_call ___ipipe_sync_root, p4;
696	SP += 12;
697	jump .Lresume_userspace_1;
698.Lsyscall_no_irqsync:
699#endif
700	cc = BITTST(r7, TIF_NEED_RESCHED);
701	if !cc jump .Lsyscall_sigpending;
702
703	/* Reenable interrupts.  */
704	[--sp] = reti;
705	sp += 4;
706
707	SP += -12;
708	pseudo_long_call _schedule, p4;
709	SP += 12;
710
711	jump .Lresume_userspace_1;
712
713.Lsyscall_sigpending:
714	cc = BITTST(r7, TIF_RESTORE_SIGMASK);
715	if cc jump .Lsyscall_do_signals;
716	cc = BITTST(r7, TIF_SIGPENDING);
717	if cc jump .Lsyscall_do_signals;
718	cc = BITTST(r7, TIF_NOTIFY_RESUME);
719	if !cc jump .Lsyscall_really_exit;
720.Lsyscall_do_signals:
721	/* Reenable interrupts.  */
722	[--sp] = reti;
723	sp += 4;
724
725	r0 = sp;
726	SP += -12;
727	pseudo_long_call _do_notify_resume, p5;
728	SP += 12;
729
730.Lsyscall_really_exit:
731	r5 = [sp + PT_RESERVED];
732	rets = r5;
733	rts;
734ENDPROC(_system_call)
735
736/* Do not mark as ENTRY() to avoid error in assembler ...
737 * this symbol need not be global anyways, so ...
738 */
739_sys_trace:
740	r0 = sp;
741	pseudo_long_call _syscall_trace_enter, p5;
742
743	/* Make sure the system call # is valid */
744	p4 = [SP + PT_P0];
745	p3 = __NR_syscall;
746	cc = p3 <= p4;
747	r0 = -ENOSYS;
748	if cc jump .Lsys_trace_badsys;
749
750	/* Execute the appropriate system call */
751	p5.l = _sys_call_table;
752	p5.h = _sys_call_table;
753	p5 = p5 + (p4 << 2);
754	r0 = [sp + PT_R0];
755	r1 = [sp + PT_R1];
756	r2 = [sp + PT_R2];
757	r3 = [sp + PT_R3];
758	r4 = [sp + PT_R4];
759	r5 = [sp + PT_R5];
760	p5 = [p5];
761
762	[--sp] = r5;
763	[--sp] = r4;
764	[--sp] = r3;
765	SP += -12;
766	call (p5);
767	SP += 24;
768.Lsys_trace_badsys:
769	[sp + PT_R0] = r0;
770
771	r0 = sp;
772	pseudo_long_call _syscall_trace_leave, p5;
773	jump .Lresume_userspace;
774ENDPROC(_sys_trace)
775
776ENTRY(_resume)
777	/*
778	 * Beware - when entering resume, prev (the current task) is
779	 * in r0, next (the new task) is in r1.
780	 */
781	p0 = r0;
782	p1 = r1;
783	[--sp] = rets;
784	[--sp] = fp;
785	[--sp] = (r7:4, p5:3);
786
787	/* save usp */
788	p2 = usp;
789	[p0+(TASK_THREAD+THREAD_USP)] = p2;
790
791	/* save current kernel stack pointer */
792	[p0+(TASK_THREAD+THREAD_KSP)] = sp;
793
794	/* save program counter */
795	r1.l = _new_old_task;
796	r1.h = _new_old_task;
797	[p0+(TASK_THREAD+THREAD_PC)] = r1;
798
799	/* restore the kernel stack pointer */
800	sp = [p1+(TASK_THREAD+THREAD_KSP)];
801
802	/* restore user stack pointer */
803	p0 = [p1+(TASK_THREAD+THREAD_USP)];
804	usp = p0;
805
806	/* restore pc */
807	p0 = [p1+(TASK_THREAD+THREAD_PC)];
808	jump (p0);
809
810	/*
811	 * Following code actually lands up in a new (old) task.
812	 */
813
814_new_old_task:
815	(r7:4, p5:3) = [sp++];
816	fp = [sp++];
817	rets = [sp++];
818
819	/*
820	 * When we come out of resume, r0 carries "old" task, because we are
821	 * in "new" task.
822	 */
823	rts;
824ENDPROC(_resume)
825
826ENTRY(_ret_from_exception)
827#ifdef CONFIG_IPIPE
828	p2.l = _ipipe_percpu_domain;
829	p2.h = _ipipe_percpu_domain;
830	r0.l = _ipipe_root;
831	r0.h = _ipipe_root;
832	r2 = [p2];
833	cc = r0 == r2;
834	if !cc jump 4f;  /* not on behalf of the root domain, get out */
835#endif /* CONFIG_IPIPE */
836	p2.l = lo(IPEND);
837	p2.h = hi(IPEND);
838
839	csync;
840	r0 = [p2];
841	[sp + PT_IPEND] = r0;
842
8431:
844	r2 = LO(~0x37) (Z);
845	r0 = r2 & r0;
846	cc = r0 == 0;
847	if !cc jump 4f;	/* if not return to user mode, get out */
848
849	/* Make sure any pending system call or deferred exception
850	 * return in ILAT for this process to get executed, otherwise
851	 * in case context switch happens, system call of
852	 * first process (i.e in ILAT) will be carried
853	 * forward to the switched process
854	 */
855
856	p2.l = lo(ILAT);
857	p2.h = hi(ILAT);
858	r0 = [p2];
859	r1 = (EVT_IVG14 | EVT_IVG15) (z);
860	r0 = r0 & r1;
861	cc = r0 == 0;
862	if !cc jump 5f;
863
864	/* Set the stack for the current process */
865	r7 = sp;
866	r4.l = lo(ALIGN_PAGE_MASK);
867	r4.h = hi(ALIGN_PAGE_MASK);
868	r7 = r7 & r4;		/* thread_info->flags */
869	p5 = r7;
870	r7 = [p5 + TI_FLAGS];
871	r4.l = lo(_TIF_WORK_MASK);
872	r4.h = hi(_TIF_WORK_MASK);
873	r7 =  r7 & r4;
874	cc = r7 == 0;
875	if cc jump 4f;
876
877	p0.l = lo(EVT15);
878	p0.h = hi(EVT15);
879	p1.l = _schedule_and_signal;
880	p1.h = _schedule_and_signal;
881	[p0] = p1;
882	csync;
883	raise 15;		/* raise evt15 to do signal or reschedule */
8844:
885	r0 = syscfg;
886	bitclr(r0, SYSCFG_SSSTEP_P);		/* Turn off single step */
887	syscfg = r0;
8885:
889	rts;
890ENDPROC(_ret_from_exception)
891
892#if defined(CONFIG_PREEMPT)
893
894ENTRY(_up_to_irq14)
895#if ANOMALY_05000281 || ANOMALY_05000461
896	r0.l = lo(SAFE_USER_INSTRUCTION);
897	r0.h = hi(SAFE_USER_INSTRUCTION);
898	reti = r0;
899#endif
900
901#ifdef CONFIG_DEBUG_HWERR
902	/* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */
903	r0 = (EVT_IVG14 | EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
904#else
905	/* Only enable irq14 interrupt, until we transition to _evt_evt14 */
906	r0 = (EVT_IVG14 | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
907#endif
908	sti r0;
909
910	p0.l = lo(EVT14);
911	p0.h = hi(EVT14);
912	p1.l = _evt_up_evt14;
913	p1.h = _evt_up_evt14;
914	[p0] = p1;
915	csync;
916
917	raise 14;
9181:
919	jump 1b;
920ENDPROC(_up_to_irq14)
921
922ENTRY(_evt_up_evt14)
923#ifdef CONFIG_DEBUG_HWERR
924	r0 = (EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
925	sti r0;
926#else
927	cli r0;
928#endif
929#ifdef CONFIG_TRACE_IRQFLAGS
930	[--sp] = rets;
931	sp += -12;
932	call _trace_hardirqs_off;
933	sp += 12;
934	rets = [sp++];
935#endif
936	[--sp] = RETI;
937	SP += 4;
938
939	/* restore normal evt14 */
940	p0.l = lo(EVT14);
941	p0.h = hi(EVT14);
942	p1.l = _evt_evt14;
943	p1.h = _evt_evt14;
944	[p0] = p1;
945	csync;
946
947	rts;
948ENDPROC(_evt_up_evt14)
949
950#endif
951
952#ifdef CONFIG_IPIPE
953
954_resume_kernel_from_int:
955	r1 = LO(~0x8000) (Z);
956	r1 = r0 & r1;
957	r0 = 1;
958	r0 = r1 - r0;
959	r2 = r1 & r0;
960	cc = r2 == 0;
961	/* Sync the root stage only from the outer interrupt level. */
962	if !cc jump .Lnosync;
963	r0.l = ___ipipe_sync_root;
964	r0.h = ___ipipe_sync_root;
965	[--sp] = reti;
966	[--sp] = rets;
967	[--sp] = ( r7:4, p5:3 );
968	SP += -12;
969	call ___ipipe_call_irqtail
970	SP += 12;
971	( r7:4, p5:3 ) = [sp++];
972	rets = [sp++];
973	reti = [sp++];
974.Lnosync:
975	rts
976#elif defined(CONFIG_PREEMPT)
977
978_resume_kernel_from_int:
979	/* check preempt_count */
980	r7 = sp;
981	r4.l = lo(ALIGN_PAGE_MASK);
982	r4.h = hi(ALIGN_PAGE_MASK);
983	r7 = r7 & r4;
984	p5 = r7;
985	r7 = [p5 + TI_PREEMPT];
986	cc = r7 == 0x0;
987	if !cc jump .Lreturn_to_kernel;
988.Lneed_schedule:
989	r7 = [p5 + TI_FLAGS];
990	r4.l = lo(_TIF_WORK_MASK);
991	r4.h = hi(_TIF_WORK_MASK);
992	r7 =  r7 & r4;
993	cc = BITTST(r7, TIF_NEED_RESCHED);
994	if !cc jump .Lreturn_to_kernel;
995	/*
996	 * let schedule done at level 15, otherwise sheduled process will run
997	 * at high level and block low level interrupt
998	 */
999	r6 = reti;  /* save reti */
1000	r5.l = .Lkernel_schedule;
1001	r5.h = .Lkernel_schedule;
1002	reti = r5;
1003	rti;
1004.Lkernel_schedule:
1005	[--sp] = rets;
1006	sp += -12;
1007	pseudo_long_call _preempt_schedule_irq, p4;
1008	sp += 12;
1009	rets = [sp++];
1010
1011	[--sp] = rets;
1012	sp += -12;
1013	/* up to irq14 so that reti after restore_all can return to irq15(kernel) */
1014	pseudo_long_call _up_to_irq14, p4;
1015	sp += 12;
1016	rets = [sp++];
1017
1018	reti = r6; /* restore reti so that origin process can return to interrupted point */
1019
1020	jump .Lneed_schedule;
1021#else
1022
1023#define _resume_kernel_from_int	.Lreturn_to_kernel
1024#endif
1025
1026ENTRY(_return_from_int)
1027	/* If someone else already raised IRQ 15, do nothing.  */
1028	csync;
1029	p2.l = lo(ILAT);
1030	p2.h = hi(ILAT);
1031	r0 = [p2];
1032	cc = bittst (r0, EVT_IVG15_P);
1033	if cc jump .Lreturn_to_kernel;
1034
1035	/* if not return to user mode, get out */
1036	p2.l = lo(IPEND);
1037	p2.h = hi(IPEND);
1038	r0 = [p2];
1039	r1 = 0x17(Z);
1040	r2 = ~r1;
1041	r2.h = 0;
1042	r0 = r2 & r0;
1043	r1 = 1;
1044	r1 = r0 - r1;
1045	r2 = r0 & r1;
1046	cc = r2 == 0;
1047	if !cc jump _resume_kernel_from_int;
1048
1049	/* Lower the interrupt level to 15.  */
1050	p0.l = lo(EVT15);
1051	p0.h = hi(EVT15);
1052	p1.l = _schedule_and_signal_from_int;
1053	p1.h = _schedule_and_signal_from_int;
1054	[p0] = p1;
1055	csync;
1056#if ANOMALY_05000281 || ANOMALY_05000461
1057	r0.l = lo(SAFE_USER_INSTRUCTION);
1058	r0.h = hi(SAFE_USER_INSTRUCTION);
1059	reti = r0;
1060#endif
1061	r0 = 0x801f (z);
1062	STI r0;
1063	raise 15;	/* raise evt15 to do signal or reschedule */
1064	rti;
1065.Lreturn_to_kernel:
1066	rts;
1067ENDPROC(_return_from_int)
1068
1069ENTRY(_lower_to_irq14)
1070#if ANOMALY_05000281 || ANOMALY_05000461
1071	r0.l = lo(SAFE_USER_INSTRUCTION);
1072	r0.h = hi(SAFE_USER_INSTRUCTION);
1073	reti = r0;
1074#endif
1075
1076#ifdef CONFIG_DEBUG_HWERR
1077	/* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */
1078	r0 = (EVT_IVG14 | EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
1079#else
1080	/* Only enable irq14 interrupt, until we transition to _evt_evt14 */
1081	r0 = (EVT_IVG14 | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
1082#endif
1083	sti r0;
1084	raise 14;
1085	rti;
1086ENDPROC(_lower_to_irq14)
1087
1088ENTRY(_evt_evt14)
1089#ifdef CONFIG_DEBUG_HWERR
1090	r0 = (EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
1091	sti r0;
1092#else
1093	cli r0;
1094#endif
1095#ifdef CONFIG_TRACE_IRQFLAGS
1096	[--sp] = rets;
1097	sp += -12;
1098	call _trace_hardirqs_off;
1099	sp += 12;
1100	rets = [sp++];
1101#endif
1102	[--sp] = RETI;
1103	SP += 4;
1104	rts;
1105ENDPROC(_evt_evt14)
1106
1107ENTRY(_schedule_and_signal_from_int)
1108	/* To end up here, vector 15 was changed - so we have to change it
1109	 * back.
1110	 */
1111	p0.l = lo(EVT15);
1112	p0.h = hi(EVT15);
1113	p1.l = _evt_system_call;
1114	p1.h = _evt_system_call;
1115	[p0] = p1;
1116	csync;
1117
1118	/* Set orig_p0 to -1 to indicate this isn't the end of a syscall.  */
1119	r0 = -1 (x);
1120	[sp + PT_ORIG_P0] = r0;
1121
1122	p1 = rets;
1123	[sp + PT_RESERVED] = p1;
1124
1125#ifdef CONFIG_TRACE_IRQFLAGS
1126	/* trace_hardirqs_on() checks if all irqs are disabled. But here IRQ 15
1127	 * is turned on, so disable all irqs. */
1128	cli r0;
1129	sp += -12;
1130	call _trace_hardirqs_on;
1131	sp += 12;
1132#endif
1133#ifdef CONFIG_SMP
1134	GET_PDA(p0, r0); 	/* Fetch current PDA (can't migrate to other CPU here) */
1135	r0 = [p0 + PDA_IRQFLAGS];
1136#else
1137	p0.l = _bfin_irq_flags;
1138	p0.h = _bfin_irq_flags;
1139	r0 = [p0];
1140#endif
1141	sti r0;
1142
1143	/* finish the userspace "atomic" functions for it */
1144	r1 = FIXED_CODE_END;
1145	r2 = [sp + PT_PC];
1146	cc = r1 <= r2;
1147	if cc jump .Lresume_userspace (bp);
1148
1149	r0 = sp;
1150	sp += -12;
1151
1152	pseudo_long_call _finish_atomic_sections, p5;
1153	sp += 12;
1154	jump.s .Lresume_userspace;
1155ENDPROC(_schedule_and_signal_from_int)
1156
1157ENTRY(_schedule_and_signal)
1158	SAVE_CONTEXT_SYSCALL
1159	/* To end up here, vector 15 was changed - so we have to change it
1160	 * back.
1161	 */
1162	p0.l = lo(EVT15);
1163	p0.h = hi(EVT15);
1164	p1.l = _evt_system_call;
1165	p1.h = _evt_system_call;
1166	[p0] = p1;
1167	csync;
1168	p0.l = 1f;
1169	p0.h = 1f;
1170	[sp + PT_RESERVED] = P0;
1171	call .Lresume_userspace;
11721:
1173	RESTORE_CONTEXT
1174	rti;
1175ENDPROC(_schedule_and_signal)
1176
1177/* We handle this 100% in exception space - to reduce overhead
1178 * Only potiential problem is if the software buffer gets swapped out of the
1179 * CPLB table - then double fault. - so we don't let this happen in other places
1180 */
1181#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
1182ENTRY(_ex_trace_buff_full)
1183	[--sp] = P3;
1184	[--sp] = P2;
1185	[--sp] = LC0;
1186	[--sp] = LT0;
1187	[--sp] = LB0;
1188	P5.L = _trace_buff_offset;
1189	P5.H = _trace_buff_offset;
1190	P3 = [P5];              /* trace_buff_offset */
1191	P5.L = lo(TBUFSTAT);
1192	P5.H = hi(TBUFSTAT);
1193	R7 = [P5];
1194	R7 <<= 1;               /* double, since we need to read twice */
1195	LC0 = R7;
1196	R7 <<= 2;               /* need to shift over again,
1197				 * to get the number of bytes */
1198	P5.L = lo(TBUF);
1199	P5.H = hi(TBUF);
1200	R6 = ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN)*1024) - 1;
1201
1202	P2 = R7;
1203	P3 = P3 + P2;
1204	R7 = P3;
1205	R7 = R7 & R6;
1206	P3 = R7;
1207	P2.L = _trace_buff_offset;
1208	P2.H = _trace_buff_offset;
1209	[P2] = P3;
1210
1211	P2.L = _software_trace_buff;
1212	P2.H = _software_trace_buff;
1213
1214	LSETUP (.Lstart, .Lend) LC0;
1215.Lstart:
1216	R7 = [P5];      /* read TBUF */
1217	P4 = P3 + P2;
1218	[P4] = R7;
1219	P3 += -4;
1220	R7 = P3;
1221	R7 = R7 & R6;
1222.Lend:
1223	P3 = R7;
1224
1225	LB0 = [sp++];
1226	LT0 = [sp++];
1227	LC0 = [sp++];
1228	P2 = [sp++];
1229	P3 = [sp++];
1230	jump _bfin_return_from_exception;
1231ENDPROC(_ex_trace_buff_full)
1232
1233#if CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN == 4
1234.data
1235#else
1236.section .l1.data.B
1237#endif /* CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN */
1238ENTRY(_trace_buff_offset)
1239        .long 0;
1240ALIGN
1241ENTRY(_software_trace_buff)
1242	.rept ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN)*256);
1243	.long 0
1244	.endr
1245#endif /* CONFIG_DEBUG_BFIN_HWTRACE_EXPAND */
1246
1247#ifdef CONFIG_EARLY_PRINTK
1248__INIT
1249ENTRY(_early_trap)
1250	SAVE_ALL_SYS
1251	trace_buffer_stop(p0,r0);
1252
1253	ANOMALY_283_315_WORKAROUND(p4, r5)
1254
1255	/* Turn caches off, to ensure we don't get double exceptions */
1256
1257	P4.L = LO(IMEM_CONTROL);
1258	P4.H = HI(IMEM_CONTROL);
1259
1260	R5 = [P4];              /* Control Register*/
1261	BITCLR(R5,ENICPLB_P);
1262	CSYNC;          /* Disabling of CPLBs should be proceeded by a CSYNC */
1263	[P4] = R5;
1264	SSYNC;
1265
1266	P4.L = LO(DMEM_CONTROL);
1267	P4.H = HI(DMEM_CONTROL);
1268	R5 = [P4];
1269	BITCLR(R5,ENDCPLB_P);
1270	CSYNC;          /* Disabling of CPLBs should be proceeded by a CSYNC */
1271	[P4] = R5;
1272	SSYNC;
1273
1274	r0 = sp;        /* stack frame pt_regs pointer argument ==> r0 */
1275	r1 = RETX;
1276
1277	SP += -12;
1278	call _early_trap_c;
1279	SP += 12;
1280ENDPROC(_early_trap)
1281__FINIT
1282#endif /* CONFIG_EARLY_PRINTK */
1283
1284/*
1285 * Put these in the kernel data section - that should always be covered by
1286 * a CPLB. This is needed to ensure we don't get double fault conditions
1287 */
1288
1289#ifdef CONFIG_SYSCALL_TAB_L1
1290.section .l1.data
1291#else
1292.data
1293#endif
1294
1295ENTRY(_ex_table)
1296	/* entry for each EXCAUSE[5:0]
1297	 * This table must be in sync with the table in ./kernel/traps.c
1298	 * EXCPT instruction can provide 4 bits of EXCAUSE, allowing 16 to be user defined
1299	 */
1300	.long _ex_syscall       /* 0x00 - User Defined - Linux Syscall */
1301	.long _ex_trap_c        /* 0x01 - User Defined - Software breakpoint */
1302#ifdef	CONFIG_KGDB
1303	.long _ex_trap_c	/* 0x02 - User Defined - KGDB initial connection
1304							 and break signal trap */
1305#else
1306	.long _ex_replaceable   /* 0x02 - User Defined */
1307#endif
1308	.long _ex_trap_c        /* 0x03 - User Defined - userspace stack overflow */
1309	.long _ex_trap_c        /* 0x04 - User Defined - dump trace buffer */
1310	.long _ex_replaceable   /* 0x05 - User Defined */
1311	.long _ex_replaceable   /* 0x06 - User Defined */
1312	.long _ex_replaceable   /* 0x07 - User Defined */
1313	.long _ex_replaceable   /* 0x08 - User Defined */
1314	.long _ex_replaceable   /* 0x09 - User Defined */
1315	.long _ex_replaceable   /* 0x0A - User Defined */
1316	.long _ex_replaceable   /* 0x0B - User Defined */
1317	.long _ex_replaceable   /* 0x0C - User Defined */
1318	.long _ex_replaceable   /* 0x0D - User Defined */
1319	.long _ex_replaceable   /* 0x0E - User Defined */
1320	.long _ex_replaceable   /* 0x0F - User Defined */
1321	.long _ex_single_step   /* 0x10 - HW Single step */
1322#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
1323	.long _ex_trace_buff_full /* 0x11 - Trace Buffer Full */
1324#else
1325	.long _ex_trap_c        /* 0x11 - Trace Buffer Full */
1326#endif
1327	.long _ex_trap_c        /* 0x12 - Reserved */
1328	.long _ex_trap_c        /* 0x13 - Reserved */
1329	.long _ex_trap_c        /* 0x14 - Reserved */
1330	.long _ex_trap_c        /* 0x15 - Reserved */
1331	.long _ex_trap_c        /* 0x16 - Reserved */
1332	.long _ex_trap_c        /* 0x17 - Reserved */
1333	.long _ex_trap_c        /* 0x18 - Reserved */
1334	.long _ex_trap_c        /* 0x19 - Reserved */
1335	.long _ex_trap_c        /* 0x1A - Reserved */
1336	.long _ex_trap_c        /* 0x1B - Reserved */
1337	.long _ex_trap_c        /* 0x1C - Reserved */
1338	.long _ex_trap_c        /* 0x1D - Reserved */
1339	.long _ex_trap_c        /* 0x1E - Reserved */
1340	.long _ex_trap_c        /* 0x1F - Reserved */
1341	.long _ex_trap_c        /* 0x20 - Reserved */
1342	.long _ex_trap_c        /* 0x21 - Undefined Instruction */
1343	.long _ex_trap_c        /* 0x22 - Illegal Instruction Combination */
1344	.long _ex_dviol         /* 0x23 - Data CPLB Protection Violation */
1345	.long _ex_trap_c        /* 0x24 - Data access misaligned */
1346	.long _ex_trap_c        /* 0x25 - Unrecoverable Event */
1347	.long _ex_dmiss         /* 0x26 - Data CPLB Miss */
1348	.long _ex_dmult         /* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero */
1349	.long _ex_trap_c        /* 0x28 - Emulation Watchpoint */
1350	.long _ex_trap_c        /* 0x29 - Instruction fetch access error (535 only) */
1351	.long _ex_trap_c        /* 0x2A - Instruction fetch misaligned */
1352	.long _ex_trap_c        /* 0x2B - Instruction CPLB protection Violation */
1353	.long _ex_icplb_miss    /* 0x2C - Instruction CPLB miss */
1354	.long _ex_trap_c        /* 0x2D - Instruction CPLB Multiple Hits */
1355	.long _ex_trap_c        /* 0x2E - Illegal use of Supervisor Resource */
1356	.long _ex_trap_c        /* 0x2E - Illegal use of Supervisor Resource */
1357	.long _ex_trap_c        /* 0x2F - Reserved */
1358	.long _ex_trap_c        /* 0x30 - Reserved */
1359	.long _ex_trap_c        /* 0x31 - Reserved */
1360	.long _ex_trap_c        /* 0x32 - Reserved */
1361	.long _ex_trap_c        /* 0x33 - Reserved */
1362	.long _ex_trap_c        /* 0x34 - Reserved */
1363	.long _ex_trap_c        /* 0x35 - Reserved */
1364	.long _ex_trap_c        /* 0x36 - Reserved */
1365	.long _ex_trap_c        /* 0x37 - Reserved */
1366	.long _ex_trap_c        /* 0x38 - Reserved */
1367	.long _ex_trap_c        /* 0x39 - Reserved */
1368	.long _ex_trap_c        /* 0x3A - Reserved */
1369	.long _ex_trap_c        /* 0x3B - Reserved */
1370	.long _ex_trap_c        /* 0x3C - Reserved */
1371	.long _ex_trap_c        /* 0x3D - Reserved */
1372	.long _ex_trap_c        /* 0x3E - Reserved */
1373	.long _ex_trap_c        /* 0x3F - Reserved */
1374END(_ex_table)
1375
1376ENTRY(_sys_call_table)
1377	.long _sys_restart_syscall	/* 0 */
1378	.long _sys_exit
1379	.long _sys_fork
1380	.long _sys_read
1381	.long _sys_write
1382	.long _sys_open		/* 5 */
1383	.long _sys_close
1384	.long _sys_ni_syscall	/* old waitpid */
1385	.long _sys_creat
1386	.long _sys_link
1387	.long _sys_unlink	/* 10 */
1388	.long _sys_execve
1389	.long _sys_chdir
1390	.long _sys_time
1391	.long _sys_mknod
1392	.long _sys_chmod		/* 15 */
1393	.long _sys_chown	/* chown16 */
1394	.long _sys_ni_syscall	/* old break syscall holder */
1395	.long _sys_ni_syscall	/* old stat */
1396	.long _sys_lseek
1397	.long _sys_getpid	/* 20 */
1398	.long _sys_mount
1399	.long _sys_ni_syscall	/* old umount */
1400	.long _sys_setuid
1401	.long _sys_getuid
1402	.long _sys_stime		/* 25 */
1403	.long _sys_ptrace
1404	.long _sys_alarm
1405	.long _sys_ni_syscall	/* old fstat */
1406	.long _sys_pause
1407	.long _sys_ni_syscall	/* old utime */ /* 30 */
1408	.long _sys_ni_syscall	/* old stty syscall holder */
1409	.long _sys_ni_syscall	/* old gtty syscall holder */
1410	.long _sys_access
1411	.long _sys_nice
1412	.long _sys_ni_syscall	/* 35 */ /* old ftime syscall holder */
1413	.long _sys_sync
1414	.long _sys_kill
1415	.long _sys_rename
1416	.long _sys_mkdir
1417	.long _sys_rmdir		/* 40 */
1418	.long _sys_dup
1419	.long _sys_pipe
1420	.long _sys_times
1421	.long _sys_ni_syscall	/* old prof syscall holder */
1422	.long _sys_brk		/* 45 */
1423	.long _sys_setgid
1424	.long _sys_getgid
1425	.long _sys_ni_syscall	/* old sys_signal */
1426	.long _sys_geteuid	/* geteuid16 */
1427	.long _sys_getegid	/* getegid16 */	/* 50 */
1428	.long _sys_acct
1429	.long _sys_umount	/* recycled never used phys() */
1430	.long _sys_ni_syscall	/* old lock syscall holder */
1431	.long _sys_ioctl
1432	.long _sys_fcntl		/* 55 */
1433	.long _sys_ni_syscall	/* old mpx syscall holder */
1434	.long _sys_setpgid
1435	.long _sys_ni_syscall	/* old ulimit syscall holder */
1436	.long _sys_ni_syscall	/* old old uname */
1437	.long _sys_umask		/* 60 */
1438	.long _sys_chroot
1439	.long _sys_ustat
1440	.long _sys_dup2
1441	.long _sys_getppid
1442	.long _sys_getpgrp	/* 65 */
1443	.long _sys_setsid
1444	.long _sys_ni_syscall	/* old sys_sigaction */
1445	.long _sys_sgetmask
1446	.long _sys_ssetmask
1447	.long _sys_setreuid	/* setreuid16 */	/* 70 */
1448	.long _sys_setregid	/* setregid16 */
1449	.long _sys_ni_syscall	/* old sys_sigsuspend */
1450	.long _sys_ni_syscall	/* old sys_sigpending */
1451	.long _sys_sethostname
1452	.long _sys_setrlimit	/* 75 */
1453	.long _sys_ni_syscall	/* old getrlimit */
1454	.long _sys_getrusage
1455	.long _sys_gettimeofday
1456	.long _sys_settimeofday
1457	.long _sys_getgroups	/* getgroups16 */	/* 80 */
1458	.long _sys_setgroups	/* setgroups16 */
1459	.long _sys_ni_syscall	/* old_select */
1460	.long _sys_symlink
1461	.long _sys_ni_syscall	/* old lstat */
1462	.long _sys_readlink	/* 85 */
1463	.long _sys_uselib
1464	.long _sys_ni_syscall	/* sys_swapon */
1465	.long _sys_reboot
1466	.long _sys_ni_syscall	/* old_readdir */
1467	.long _sys_ni_syscall	/* sys_mmap */	/* 90 */
1468	.long _sys_munmap
1469	.long _sys_truncate
1470	.long _sys_ftruncate
1471	.long _sys_fchmod
1472	.long _sys_fchown	/* fchown16 */	/* 95 */
1473	.long _sys_getpriority
1474	.long _sys_setpriority
1475	.long _sys_ni_syscall	/* old profil syscall holder */
1476	.long _sys_statfs
1477	.long _sys_fstatfs	/* 100 */
1478	.long _sys_ni_syscall
1479	.long _sys_ni_syscall	/* old sys_socketcall */
1480	.long _sys_syslog
1481	.long _sys_setitimer
1482	.long _sys_getitimer	/* 105 */
1483	.long _sys_newstat
1484	.long _sys_newlstat
1485	.long _sys_newfstat
1486	.long _sys_ni_syscall	/* old uname */
1487	.long _sys_ni_syscall	/* iopl for i386 */ /* 110 */
1488	.long _sys_vhangup
1489	.long _sys_ni_syscall	/* obsolete idle() syscall */
1490	.long _sys_ni_syscall	/* vm86old for i386 */
1491	.long _sys_wait4
1492	.long _sys_ni_syscall	/* 115 */ /* sys_swapoff */
1493	.long _sys_sysinfo
1494	.long _sys_ni_syscall	/* old sys_ipc */
1495	.long _sys_fsync
1496	.long _sys_ni_syscall	/* old sys_sigreturn */
1497	.long _sys_clone		/* 120 */
1498	.long _sys_setdomainname
1499	.long _sys_newuname
1500	.long _sys_ni_syscall	/* old sys_modify_ldt */
1501	.long _sys_adjtimex
1502	.long _sys_mprotect	/* 125 */
1503	.long _sys_ni_syscall	/* old sys_sigprocmask */
1504	.long _sys_ni_syscall	/* old "creat_module" */
1505	.long _sys_init_module
1506	.long _sys_delete_module
1507	.long _sys_ni_syscall	/* 130: old "get_kernel_syms" */
1508	.long _sys_quotactl
1509	.long _sys_getpgid
1510	.long _sys_fchdir
1511	.long _sys_bdflush
1512	.long _sys_ni_syscall	/* 135 */ /* sys_sysfs */
1513	.long _sys_personality
1514	.long _sys_ni_syscall	/* for afs_syscall */
1515	.long _sys_setfsuid	/* setfsuid16 */
1516	.long _sys_setfsgid	/* setfsgid16 */
1517	.long _sys_llseek	/* 140 */
1518	.long _sys_getdents
1519	.long _sys_ni_syscall	/* sys_select */
1520	.long _sys_flock
1521	.long _sys_msync
1522	.long _sys_readv		/* 145 */
1523	.long _sys_writev
1524	.long _sys_getsid
1525	.long _sys_fdatasync
1526	.long _sys_sysctl
1527	.long _sys_mlock	/* 150 */
1528	.long _sys_munlock
1529	.long _sys_mlockall
1530	.long _sys_munlockall
1531	.long _sys_sched_setparam
1532	.long _sys_sched_getparam /* 155 */
1533	.long _sys_sched_setscheduler
1534	.long _sys_sched_getscheduler
1535	.long _sys_sched_yield
1536	.long _sys_sched_get_priority_max
1537	.long _sys_sched_get_priority_min  /* 160 */
1538	.long _sys_sched_rr_get_interval
1539	.long _sys_nanosleep
1540	.long _sys_mremap
1541	.long _sys_setresuid	/* setresuid16 */
1542	.long _sys_getresuid	/* getresuid16 */	/* 165 */
1543	.long _sys_ni_syscall	/* for vm86 */
1544	.long _sys_ni_syscall	/* old "query_module" */
1545	.long _sys_ni_syscall	/* sys_poll */
1546	.long _sys_ni_syscall   /* old nfsservctl */
1547	.long _sys_setresgid	/* setresgid16 */	/* 170 */
1548	.long _sys_getresgid	/* getresgid16 */
1549	.long _sys_prctl
1550	.long _sys_rt_sigreturn
1551	.long _sys_rt_sigaction
1552	.long _sys_rt_sigprocmask /* 175 */
1553	.long _sys_rt_sigpending
1554	.long _sys_rt_sigtimedwait
1555	.long _sys_rt_sigqueueinfo
1556	.long _sys_rt_sigsuspend
1557	.long _sys_pread64	/* 180 */
1558	.long _sys_pwrite64
1559	.long _sys_lchown	/* lchown16 */
1560	.long _sys_getcwd
1561	.long _sys_capget
1562	.long _sys_capset	/* 185 */
1563	.long _sys_sigaltstack
1564	.long _sys_sendfile
1565	.long _sys_ni_syscall	/* streams1 */
1566	.long _sys_ni_syscall	/* streams2 */
1567	.long _sys_vfork		/* 190 */
1568	.long _sys_getrlimit
1569	.long _sys_mmap_pgoff
1570	.long _sys_truncate64
1571	.long _sys_ftruncate64
1572	.long _sys_stat64	/* 195 */
1573	.long _sys_lstat64
1574	.long _sys_fstat64
1575	.long _sys_chown
1576	.long _sys_getuid
1577	.long _sys_getgid	/* 200 */
1578	.long _sys_geteuid
1579	.long _sys_getegid
1580	.long _sys_setreuid
1581	.long _sys_setregid
1582	.long _sys_getgroups	/* 205 */
1583	.long _sys_setgroups
1584	.long _sys_fchown
1585	.long _sys_setresuid
1586	.long _sys_getresuid
1587	.long _sys_setresgid	/* 210 */
1588	.long _sys_getresgid
1589	.long _sys_lchown
1590	.long _sys_setuid
1591	.long _sys_setgid
1592	.long _sys_setfsuid	/* 215 */
1593	.long _sys_setfsgid
1594	.long _sys_pivot_root
1595	.long _sys_mincore
1596	.long _sys_madvise
1597	.long _sys_getdents64	/* 220 */
1598	.long _sys_fcntl64
1599	.long _sys_ni_syscall	/* reserved for TUX */
1600	.long _sys_ni_syscall
1601	.long _sys_gettid
1602	.long _sys_readahead	/* 225 */
1603	.long _sys_setxattr
1604	.long _sys_lsetxattr
1605	.long _sys_fsetxattr
1606	.long _sys_getxattr
1607	.long _sys_lgetxattr	/* 230 */
1608	.long _sys_fgetxattr
1609	.long _sys_listxattr
1610	.long _sys_llistxattr
1611	.long _sys_flistxattr
1612	.long _sys_removexattr	/* 235 */
1613	.long _sys_lremovexattr
1614	.long _sys_fremovexattr
1615	.long _sys_tkill
1616	.long _sys_sendfile64
1617	.long _sys_futex		/* 240 */
1618	.long _sys_sched_setaffinity
1619	.long _sys_sched_getaffinity
1620	.long _sys_ni_syscall	/* sys_set_thread_area */
1621	.long _sys_ni_syscall	/* sys_get_thread_area */
1622	.long _sys_io_setup	/* 245 */
1623	.long _sys_io_destroy
1624	.long _sys_io_getevents
1625	.long _sys_io_submit
1626	.long _sys_io_cancel
1627	.long _sys_ni_syscall	/* 250 */ /* sys_alloc_hugepages */
1628	.long _sys_ni_syscall	/* sys_freec_hugepages */
1629	.long _sys_exit_group
1630	.long _sys_lookup_dcookie
1631	.long _sys_bfin_spinlock
1632	.long _sys_epoll_create	/* 255 */
1633	.long _sys_epoll_ctl
1634	.long _sys_epoll_wait
1635	.long _sys_ni_syscall /* remap_file_pages */
1636	.long _sys_set_tid_address
1637	.long _sys_timer_create	/* 260 */
1638	.long _sys_timer_settime
1639	.long _sys_timer_gettime
1640	.long _sys_timer_getoverrun
1641	.long _sys_timer_delete
1642	.long _sys_clock_settime /* 265 */
1643	.long _sys_clock_gettime
1644	.long _sys_clock_getres
1645	.long _sys_clock_nanosleep
1646	.long _sys_statfs64
1647	.long _sys_fstatfs64	/* 270 */
1648	.long _sys_tgkill
1649	.long _sys_utimes
1650	.long _sys_fadvise64_64
1651	.long _sys_ni_syscall /* vserver */
1652	.long _sys_mbind	/* 275 */
1653	.long _sys_ni_syscall /* get_mempolicy */
1654	.long _sys_ni_syscall /* set_mempolicy */
1655	.long _sys_mq_open
1656	.long _sys_mq_unlink
1657	.long _sys_mq_timedsend	/* 280 */
1658	.long _sys_mq_timedreceive
1659	.long _sys_mq_notify
1660	.long _sys_mq_getsetattr
1661	.long _sys_ni_syscall /* kexec_load */
1662	.long _sys_waitid	/* 285 */
1663	.long _sys_add_key
1664	.long _sys_request_key
1665	.long _sys_keyctl
1666	.long _sys_ioprio_set
1667	.long _sys_ioprio_get	/* 290 */
1668	.long _sys_inotify_init
1669	.long _sys_inotify_add_watch
1670	.long _sys_inotify_rm_watch
1671	.long _sys_ni_syscall /* migrate_pages */
1672	.long _sys_openat	/* 295 */
1673	.long _sys_mkdirat
1674	.long _sys_mknodat
1675	.long _sys_fchownat
1676	.long _sys_futimesat
1677	.long _sys_fstatat64	/* 300 */
1678	.long _sys_unlinkat
1679	.long _sys_renameat
1680	.long _sys_linkat
1681	.long _sys_symlinkat
1682	.long _sys_readlinkat	/* 305 */
1683	.long _sys_fchmodat
1684	.long _sys_faccessat
1685	.long _sys_pselect6
1686	.long _sys_ppoll
1687	.long _sys_unshare	/* 310 */
1688	.long _sys_sram_alloc
1689	.long _sys_sram_free
1690	.long _sys_dma_memcpy
1691	.long _sys_accept
1692	.long _sys_bind		/* 315 */
1693	.long _sys_connect
1694	.long _sys_getpeername
1695	.long _sys_getsockname
1696	.long _sys_getsockopt
1697	.long _sys_listen	/* 320 */
1698	.long _sys_recv
1699	.long _sys_recvfrom
1700	.long _sys_recvmsg
1701	.long _sys_send
1702	.long _sys_sendmsg	/* 325 */
1703	.long _sys_sendto
1704	.long _sys_setsockopt
1705	.long _sys_shutdown
1706	.long _sys_socket
1707	.long _sys_socketpair	/* 330 */
1708	.long _sys_semctl
1709	.long _sys_semget
1710	.long _sys_semop
1711	.long _sys_msgctl
1712	.long _sys_msgget	/* 335 */
1713	.long _sys_msgrcv
1714	.long _sys_msgsnd
1715	.long _sys_shmat
1716	.long _sys_shmctl
1717	.long _sys_shmdt	/* 340 */
1718	.long _sys_shmget
1719	.long _sys_splice
1720	.long _sys_sync_file_range
1721	.long _sys_tee
1722	.long _sys_vmsplice	/* 345 */
1723	.long _sys_epoll_pwait
1724	.long _sys_utimensat
1725	.long _sys_signalfd
1726	.long _sys_timerfd_create
1727	.long _sys_eventfd	/* 350 */
1728	.long _sys_pread64
1729	.long _sys_pwrite64
1730	.long _sys_fadvise64
1731	.long _sys_set_robust_list
1732	.long _sys_get_robust_list	/* 355 */
1733	.long _sys_fallocate
1734	.long _sys_semtimedop
1735	.long _sys_timerfd_settime
1736	.long _sys_timerfd_gettime
1737	.long _sys_signalfd4		/* 360 */
1738	.long _sys_eventfd2
1739	.long _sys_epoll_create1
1740	.long _sys_dup3
1741	.long _sys_pipe2
1742	.long _sys_inotify_init1	/* 365 */
1743	.long _sys_preadv
1744	.long _sys_pwritev
1745	.long _sys_rt_tgsigqueueinfo
1746	.long _sys_perf_event_open
1747	.long _sys_recvmmsg		/* 370 */
1748	.long _sys_fanotify_init
1749	.long _sys_fanotify_mark
1750	.long _sys_prlimit64
1751	.long _sys_cacheflush
1752	.long _sys_name_to_handle_at	/* 375 */
1753	.long _sys_open_by_handle_at
1754	.long _sys_clock_adjtime
1755	.long _sys_syncfs
1756	.long _sys_setns
1757	.long _sys_sendmmsg		/* 380 */
1758	.long _sys_process_vm_readv
1759	.long _sys_process_vm_writev
1760
1761	.rept NR_syscalls-(.-_sys_call_table)/4
1762	.long _sys_ni_syscall
1763	.endr
1764END(_sys_call_table)
1765