Searched refs:ss (Results 1 - 25 of 85) sorted by relevance

1234

/arch/m68k/lib/
H A Dmemset.c27 short *ss = s; local
28 *ss++ = c;
29 s = ss;
64 short *ss = s; local
65 *ss++ = c;
66 s = ss;
/arch/mips/boot/compressed/
H A Dstring.c23 char *ss = s; local
26 ss[i] = c;
/arch/hexagon/kernel/
H A Dprocess.c77 struct hexagon_switch_stack *ss; local
92 ss = (struct hexagon_switch_stack *) ((unsigned long) childregs -
93 sizeof(*ss));
94 ss->lr = (unsigned long)ret_from_fork;
95 p->thread.switch_sp = ss;
99 ss->r24 = usp;
100 ss->r25 = arg;
105 ss->r2524 = 0;
/arch/x86/boot/compressed/
H A Dstring.c36 char *ss = s; local
39 ss[i] = c;
/arch/x86/include/asm/
H A Dsuspend_32.h14 u16 es, fs, gs, ss; member in struct:saved_context
H A Dsuspend_64.h22 u16 ds, es, fs, gs, ss; member in struct:saved_context
H A Dsigcontext.h26 unsigned short ss, __ssh; member in struct:sigcontext
H A Duser_32.h94 unsigned long ss; member in struct:user_regs_struct
H A Da.out-core.h58 dump->regs.ss = (u16)regs->ss;
H A Dkexec.h72 * CPU does not save ss and sp on stack if execution is already
82 "movw %%ss, %%ax\n\t"
83 :"=a"(newregs->ss));
89 * via panic otherwise just fix up the ss and sp if coming via kernel
108 asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
130 asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
H A Dptrace.h28 unsigned long ss; member in struct:pt_regs
57 unsigned long ss; member in struct:pt_regs
167 #define MAX_REG_OFFSET (offsetof(struct pt_regs, ss))
185 * Traps from the kernel do not save sp and ss.
/arch/x86/power/
H A Dcpu.c85 savesegment(ss, ctxt->ss);
92 asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
207 loadsegment(ss, ctxt->ss);
220 asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
/arch/x86/include/uapi/asm/
H A Dptrace.h67 unsigned long ss; member in struct:pt_regs
H A Dsigcontext32.h71 unsigned short ss, __ssh; member in struct:sigcontext_ia32
/arch/x86/xen/
H A Dxen-asm_32.S105 movl %ss:xen_vcpu, %eax
118 setz %ss:XEN_vcpu_info_mask(%eax)
122 cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax)
130 movb $1, %ss:XEN_vcpu_info_mask(%eax)
166 * ss : (ss/esp may be present if we came from usermode)
175 * cs } (no ss/esp because we're nested
/arch/m32r/boot/compressed/
H A Dmisc.c33 char *ss = s; local
36 *ss++ = c;
/arch/x86/kvm/
H A Dtss.h25 u32 ss; member in struct:tss_segment_32
54 u16 ss; member in struct:tss_segment_16
/arch/x86/purgatory/
H A Dsetup-x86_64.S26 movl %eax, %ss
/arch/sh/boot/compressed/
H A Dmisc.c81 char *ss = (char*)s; local
83 for (i=0;i<n;i++) ss[i] = c;
/arch/x86/kernel/
H A Ddumpstack.c256 unsigned short ss; local
280 ss = regs->ss & 0xffff;
283 savesegment(ss, ss);
287 printk(" SS:ESP %04x:%08lx\n", ss, sp);
H A Dprocess_32.c74 unsigned short ss, gs; local
78 ss = regs->ss & 0xffff;
82 savesegment(ss, ss);
96 (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss);
204 regs->ss = __USER_DS;
/arch/ia64/mm/
H A Dtlb.c108 static inline void spinaphore_init(struct spinaphore *ss, int val) argument
110 ss->ticket = 0;
111 ss->serve = val;
114 static inline void down_spin(struct spinaphore *ss) argument
116 unsigned long t = ia64_fetchadd(1, &ss->ticket, acq), serve;
118 if (time_before(t, ss->serve))
124 asm volatile ("ld8.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory");
131 static inline void up_spin(struct spinaphore *ss) argument
133 ia64_fetchadd(1, &ss->serve, rel);
/arch/x86/math-emu/
H A Dget_address.c50 offsetof(struct pt_regs, ss),
63 offsetof(struct pt_regs, ss),
73 u_char ss, index, base; local
81 ss = base >> 6;
92 /* A non-zero ss is illegal */
93 if (ss)
96 offset += (REG_(index)) << ss;
/arch/x86/include/asm/xen/
H A Dinterface_64.h83 uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss; member in struct:iret_context
122 uint16_t ss, _pad2[3]; member in struct:cpu_user_regs
/arch/x86/realmode/rm/
H A Dreboot.S63 movl %ecx, %ss
115 movw %ax, %ss

Completed in 399 milliseconds

1234