Searched refs:context (Results 226 - 250 of 270) sorted by relevance
1234567891011
/arch/arm64/kernel/ |
H A D | asm-offsets.c | 65 DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id));
|
H A D | signal.c | 245 sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
|
/arch/blackfin/kernel/ |
H A D | process.c | 88 (void *)current->mm->context.stack_start;
|
/arch/hexagon/kernel/ |
H A D | signal.c | 112 struct hexagon_vdso *vdso = current->mm->context.vdso;
|
/arch/metag/kernel/ |
H A D | process.c | 214 * The Meta's stack grows upwards, and the context is the the first 319 * Force a restore of the FPU context next time this process is 430 list_add(&tcm->list, ¤t->mm->context.tcm);
|
/arch/powerpc/kernel/ |
H A D | signal_32.c | 450 * to this context, except in the specific case below where we set it. 890 /* Pull in MSR TM from user context */ 1014 if (vdso32_rt_sigtramp && current->mm->context.vdso_base) { 1016 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp; 1158 * Check that the context is not smaller than the original 1164 * If the new context state sets the MSR VSX bits but 1170 /* Does the context have enough room to store VSX data? */ 1206 * If we get a fault copying the context into the kernel's 1213 * or if another thread unmaps the region containing the context. 1370 * If we get a fault copying the context int [all...] |
H A D | setup_64.c | 688 init_mm.context.pte_frag = NULL; 707 /* Initialize the MMU context management stuff */
|
H A D | signal_64.c | 86 * the context). This is very important because we must ensure we 127 * to this context, except in the specific case below where we set it. 177 * the context). This is very important because we must ensure we 430 /* pull in MSR TM from user context */ 433 /* pull in MSR LE from user context */ 599 * Check that the context is not smaller than the original 605 * If the new context state sets the MSR VSX bits but 611 /* Does the context have enough room to store VSX data? */ 631 * If we get a fault copying the context into the kernel's 638 * or if another thread unmaps the region containing the context [all...] |
/arch/score/kernel/ |
H A D | asm-offsets.c | 150 OFFSET(MM_CONTEXT, mm_struct, context);
|
/arch/sh/kernel/cpu/sh5/ |
H A D | switchto.S | 4 * sh64 context switch 61 to an exception, i.e. pc,sr,regs etc. However, for the context 118 ! Now switch context
|
/arch/arm/kernel/ |
H A D | entry-armv.S | 67 @ r4 - aborted context pc 68 @ r5 - aborted context psr 251 @ the saved context.
|
H A D | process.c | 676 mm->context.sigpage = addr;
|
H A D | signal.c | 35 /* the crunch context must be 64 bit aligned */ 48 /* the crunch context must be 64 bit aligned */ 67 /* the iWMMXt context must be 64 bit aligned */ 80 /* the iWMMXt context must be 64 bit aligned */ 399 retcode = mm->context.sigpage + signal_return_offset +
|
/arch/blackfin/mm/ |
H A D | sram-alloc.c | 746 for (tmp = &mm->context.sram_list; *tmp; tmp = &(*tmp)->next) 791 lsl->next = mm->context.sram_list; 792 mm->context.sram_list = lsl;
|
/arch/x86/kernel/ |
H A D | signal.c | 300 if (current->mm->context.vdso) 301 restorer = current->mm->context.vdso + 365 restorer = current->mm->context.vdso +
|
H A D | ptrace.c | 185 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1); local 189 if (context == (sp & ~(THREAD_SIZE - 1))) 192 prev_esp = (u32 *)(context);
|
H A D | machine_kexec_64.c | 102 .context = image,
|
/arch/x86/mm/ |
H A D | init_64.c | 89 pmd = (pmd_t *)info->alloc_pgt_page(info->context); 122 pud = (pud_t *)info->alloc_pgt_page(info->context); 1228 if (!mm || mm->context.ia32_compat) 1246 * context. It is less reliable than using a task's mm and may give
|
/arch/ |
H A D | Kconfig | 321 - secure_computing is called from a ptrace_event()-safe context 329 need to be called from a ptrace-safe context. It must then
|
/arch/arm/mach-pxa/ |
H A D | balloon3.c | 550 .context = NULL,
|
/arch/ia64/kvm/ |
H A D | kvm-ia64.c | 643 union context *host_ctx, *guest_ctx; 658 /*Get host and guest context with guest address space.*/ 894 memcpy(&vcpu->arch.guest, ®s->saved_guest, sizeof(union context)); 1163 union context *p_ctx = &vcpu->arch.guest; 1166 /*Init vcpu context for first run.*/ 1405 memcpy(®s->saved_guest, &vcpu->arch.guest, sizeof(union context));
|
H A D | vcpu.h | 735 extern void vmm_trampoline(union context *from, union context *to);
|
/arch/ia64/mm/ |
H A D | tlb.c | 310 mm->context = 0;
|
/arch/ia64/sn/kernel/sn2/ |
H A D | sn2_smp.c | 152 * - cpu_vm_mask is a bit mask that indicates which cpus have loaded the context. 159 * - if multiple cpus have loaded the context, then flushing has to be 225 rr_value = (mm->context << 3) | REGION_NUMBER(start);
|
/arch/x86/kernel/cpu/ |
H A D | common.c | 1379 load_LDT(&init_mm.context); 1422 load_LDT(&init_mm.context);
|
Completed in 3534 milliseconds
1234567891011