/arch/frv/include/asm/ |
H A D | mmu_context.h | 1 /* mmu_context.h: MMU context management routines 38 change_mm_context(&prev->context, &next->context, next->pgd); \ 43 change_mm_context(&prev->context, &next->context, next->pgd); \
|
/arch/powerpc/mm/ |
H A D | mmu_context_hash32.c | 34 * (virtual segment identifiers) for each context. Although the 40 * that we used to have when the context number overflowed, 50 * segment IDs). We use a skew on both the context and the high 4 bits 80 * Set up the context for a new address space. 84 mm->context.id = __init_new_context(); 90 * Free a context ID. Make sure to call this with preempt disabled! 99 * We're finished using the context for an address space. 104 if (mm->context.id != NO_CONTEXT) { 105 __destroy_context(mm->context.id); 106 mm->context [all...] |
H A D | mmu_context_nohash.c | 19 * - The global context lock will not scale very well 22 * - Implement flush_tlb_mm() by making the context stale and picking 66 /* Steal a context from a task that has one at the moment. 71 * This isn't an LRU system, it just frees up each context in 76 * For context stealing, we use a slightly different approach for 97 if (mm->context.active) { 105 /* Mark this mm has having no context anymore */ 106 mm->context.id = MMU_NO_CONTEXT; 151 /* Flush the TLB for that context */ 154 /* Mark this mm has having no context anymor [all...] |
H A D | icswx.c | 37 * of the Coprocessor Type (CT) and context switching. On a server 71 mtspr(SPRN_PID, next->context.cop_pid); 73 mtspr(SPRN_ACOP, next->context.acop); 97 spin_lock(mm->context.cop_lockp); 104 mm->context.acop |= acop; 117 spin_unlock(mm->context.cop_lockp); 141 spin_lock(mm->context.cop_lockp); 143 mm->context.acop &= ~acop; 159 spin_unlock(mm->context.cop_lockp); 243 if ((acop_copro_type_bit(ct) & current->active_mm->context [all...] |
/arch/tile/include/gxio/ |
H A D | iorpc_mpipe_info.h | 36 int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t *context, 39 int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t *context, 44 int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t *context, 47 int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t *context,
|
H A D | iorpc_usb_host.h | 34 int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t *context, int inter_x, 37 int gxio_usb_host_register_client_memory(gxio_usb_host_context_t *context, 40 int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t *context, 43 int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t *context,
|
H A D | mpipe.h | 90 * mPIPE context's set of open links; all VLANs; and all dMACs. 315 /* A context object used to manage mPIPE hardware resources. */ 338 /* Initialize an mPIPE context. 343 * @param context Context object to be initialized. 345 * context. 347 extern int gxio_mpipe_init(gxio_mpipe_context_t *context, 350 /* Destroy an mPIPE context. 356 * will destroy the mPIPE context as part of process teardown. 358 * @param context Context object to be destroyed. 360 extern int gxio_mpipe_destroy(gxio_mpipe_context_t *context); 457 gxio_mpipe_push_buffer(gxio_mpipe_context_t *context, unsigned int stack, void *buffer) argument 487 gxio_mpipe_pop_buffer(gxio_mpipe_context_t *context, unsigned int stack) argument 776 gxio_mpipe_credit(gxio_mpipe_context_t *context, int ring, int bucket, unsigned int count) argument 941 gxio_mpipe_context_t *context; member in struct:__anon2901 1119 gxio_mpipe_context_t *context; member in struct:__anon2902 1329 gxio_mpipe_context_t *context; member in struct:__anon2903 1720 gxio_mpipe_context_t *context; member in struct:__anon2904 [all...] |
/arch/x86/kernel/ |
H A D | ldt.c | 27 load_LDT(¤t->active_mm->context); 110 mutex_init(&mm->context.lock); 111 mm->context.size = 0; 113 if (old_mm && old_mm->context.size > 0) { 114 mutex_lock(&old_mm->context.lock); 115 retval = copy_ldt(&mm->context, &old_mm->context); 116 mutex_unlock(&old_mm->context.lock); 128 if (mm->context.size) { 134 paravirt_free_ldt(mm->context [all...] |
/arch/sparc/mm/ |
H A D | tsb.c | 77 spin_lock_irqsave(&mm->context.lock, flags); 79 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; 80 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; 86 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { 87 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; 88 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; 94 spin_unlock_irqrestore(&mm->context.lock, flags); 101 spin_lock_irqsave(&mm->context.lock, flags); 103 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; 104 nentries = mm->context [all...] |
/arch/arm64/mm/ |
H A D | context.c | 2 * Based on arch/arm/mm/context.c 39 * We fork()ed a process, and we need a new context for the child to run in. 43 mm->context.id = 0; 44 raw_spin_lock_init(&mm->context.id_lock); 64 * mm->context.id could be set from different CPUs during the 66 * mm->context.id_lock has to be IRQ-safe. 68 raw_spin_lock_irqsave(&mm->context.id_lock, flags); 69 if (likely((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) { 74 mm->context.id = asid; 77 raw_spin_unlock_irqrestore(&mm->context [all...] |
/arch/tile/gxio/ |
H A D | iorpc_usb_host.c | 22 int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t *context, int inter_x, argument 33 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, 44 int gxio_usb_host_register_client_memory(gxio_usb_host_context_t *context, argument 53 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, 64 int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t *context, HV_PTE *base) argument 71 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), 85 int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t *context, argument 94 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
H A D | iorpc_uart.c | 22 int gxio_uart_cfg_interrupt(gxio_uart_context_t *context, int inter_x, argument 33 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, 43 int gxio_uart_get_mmio_base(gxio_uart_context_t *context, HV_PTE *base) argument 50 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), 64 int gxio_uart_check_mmio_offset(gxio_uart_context_t *context, argument 73 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
H A D | iorpc_trio.c | 24 int gxio_trio_alloc_asids(gxio_trio_context_t *context, unsigned int count, argument 34 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, 47 int gxio_trio_alloc_memory_maps(gxio_trio_context_t *context, argument 58 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, 70 int gxio_trio_alloc_scatter_queues(gxio_trio_context_t *context, argument 81 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, 94 int gxio_trio_alloc_pio_regions(gxio_trio_context_t *context, argument 105 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, 118 int gxio_trio_init_pio_region_aux(gxio_trio_context_t *context, argument 130 return hv_dev_pwrite(context 148 gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t *context, unsigned int map, unsigned long va, uint64_t size, unsigned int asid, unsigned int mac, uint64_t bus_address, unsigned int node, unsigned int order_mode) argument 178 gxio_trio_get_port_property(gxio_trio_context_t *context, struct pcie_trio_ports_property *trio_ports) argument 201 gxio_trio_config_legacy_intr(gxio_trio_context_t *context, int inter_x, int inter_y, int inter_ipi, int inter_event, unsigned int mac, unsigned int intx) argument 230 gxio_trio_config_msi_intr(gxio_trio_context_t *context, int inter_x, int inter_y, int inter_ipi, int inter_event, unsigned int mac, unsigned int mem_map, uint64_t mem_map_base, uint64_t mem_map_limit, unsigned int asid) argument 262 gxio_trio_set_mps_mrs(gxio_trio_context_t *context, uint16_t mps, uint16_t mrs, unsigned int mac) argument 282 gxio_trio_force_rc_link_up(gxio_trio_context_t *context, unsigned int mac) argument 299 gxio_trio_force_ep_link_up(gxio_trio_context_t *context, unsigned int mac) argument 316 gxio_trio_get_mmio_base(gxio_trio_context_t *context, HV_PTE *base) argument 337 gxio_trio_check_mmio_offset(gxio_trio_context_t *context, unsigned long offset, unsigned long size) argument [all...] |
/arch/cris/mm/ |
H A D | tlb.c | 17 * The running context is R_MMU_CONTEXT, and each TLB entry contains a 40 D(printk("tlb: alloc context %d (%p)\n", map_replace_ptr, mm)); 52 old_mm->context.page_id = NO_CONTEXT; 57 mm->context.page_id = map_replace_ptr; 67 * if needed, get a new MMU context for the mm. otherwise nothing is done. 73 if(mm->context.page_id == NO_CONTEXT) 77 /* called by __exit_mm to destroy the used MMU context if any before 88 if(mm->context.page_id != NO_CONTEXT) { 89 D(printk("destroy_context %d (%p)\n", mm->context.page_id, mm)); 91 page_id_map[mm->context [all...] |
/arch/parisc/include/asm/ |
H A D | tlbflush.h | 54 if (mm->context != 0) 55 free_sid(mm->context); 56 mm->context = alloc_sid(); 58 load_context(mm->context); 71 sid = vma->vm_mm->context; 82 #define flush_tlb_range(vma,start,end) __flush_tlb_range((vma)->vm_mm->context,start,end)
|
/arch/m68k/include/asm/ |
H A D | mmu_context.h | 34 if (mm->context != NO_CONTEXT) 47 mm->context = ctx; 52 * Set up the context for a new address space. 54 #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) 57 * We're finished using the context for an address space. 61 if (mm->context != NO_CONTEXT) { 62 clear_bit(mm->context, context_map); 63 mm->context = NO_CONTEXT; 68 static inline void set_context(mm_context_t context, pgd_t *pgd) argument 70 __asm__ __volatile__ ("movec %0,%%asid" : : "d" (context)); [all...] |
/arch/arm/include/asm/ |
H A D | mmu_context.h | 29 #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) 48 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) 59 mm->context.switch_pending = 1; 70 if (mm && mm->context.switch_pending) { 78 if (mm->context.switch_pending) { 79 mm->context.switch_pending = 0; 98 * mm: describes the currently active mm context
|
H A D | mmu.h | 19 #define ASID(mm) ((unsigned int)((mm)->context.id.counter & ~ASID_MASK))
|
/arch/hexagon/include/asm/ |
H A D | mmu_context.h | 2 * MM context support for the Hexagon architecture 52 * init_new_context - initialize context related info for new mm_struct instance 59 /* mm->context is set up by pgd_alloc */ 64 * Switch active mm context 75 if (next->context.generation < prev->context.generation) { 79 next->context.generation = prev->context.generation; 82 __vmnewmap((void *)next->context.ptbase);
|
/arch/score/include/asm/ |
H A D | mmu_context.h | 16 * into the context register. 57 mm->context = asid; 62 * Initialize the context related info for a new mm_struct 68 mm->context = 0; 78 if ((next->context ^ asid_cache) & ASID_VERSION_MASK) 81 pevn_set(next->context); 87 * Destroy context related info for an mm_struct that is about 99 * the context for the new mm so we see the new mappings. 108 pevn_set(next->context);
|
/arch/m68k/sun3/ |
H A D | mmu_emu.c | 56 context. 0xffffffff is a marker for kernel context */ 61 /* has this context been mmdrop'd? */ 203 /* erase the mappings for a dead context. Uses the pg_dir for hints 207 context for when they're cleared */ 208 void clear_context(unsigned long context) argument 213 if(context) { 214 if(!ctx_alloc[context]) 215 panic("clear_context: context not allocated\n"); 217 ctx_alloc[context] 282 mmu_emu_map_pmeg(int context, int vaddr) argument 357 unsigned char context; local [all...] |
/arch/xtensa/include/asm/ |
H A D | mmu_context.h | 2 * Switch an MMU context. 38 * any user or kernel context. We use the reserved values in the 84 mm->context.asid[cpu] = asid; 85 mm->context.cpu = cpu; 95 unsigned long asid = mm->context.asid[cpu]; 106 set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); 111 * Initialize the context related info for a new mm_struct 121 mm->context.asid[cpu] = NO_CONTEXT; 123 mm->context.cpu = -1; 131 int migrated = next->context [all...] |
/arch/ia64/kvm/ |
H A D | misc.h | 60 /* Get host context of the vcpu */ 61 static inline union context *kvm_get_host_context(struct kvm_vcpu *vcpu) 63 union context *ctx = &vcpu->arch.host; 67 /* Get guest context of the vcpu */ 68 static inline union context *kvm_get_guest_context(struct kvm_vcpu *vcpu) 70 union context *ctx = &vcpu->arch.guest;
|
/arch/metag/include/asm/ |
H A D | mmu_context.h | 22 /* We use context to store a pointer to the page holding the 24 * running the pgd and context fields should be equal. 26 mm->context.pgd_base = (unsigned long) mm->pgd; 29 INIT_LIST_HEAD(&mm->context.tcm); 43 list_for_each_entry_safe(pos, n, &mm->context.tcm, list) { 79 /* prev->context == prev->pgd in the case where we are initially 81 if (prev->context.pgd_base != (unsigned long) prev->pgd) { 83 ((pgd_t *) prev->context.pgd_base)[i] = prev->pgd[i]; 88 prev->pgd = (pgd_t *) prev->context.pgd_base; 91 next->pgd[i] = ((pgd_t *) next->context [all...] |
/arch/frv/mm/ |
H A D | mmu-context.c | 1 /* mmu-context.c: MMU context allocation and management 27 * initialise a new context 31 memset(&mm->context, 0, sizeof(mm->context)); 32 INIT_LIST_HEAD(&mm->context.id_link); 33 mm->context.itlb_cached_pge = 0xffffffffUL; 34 mm->context.dtlb_cached_pge = 0xffffffffUL; 41 * make sure a kernel MMU context has a CPU context numbe [all...] |