Searched defs:next (Results 1 - 25 of 27) sorted by relevance

12

/mm/
H A Dpagewalk.c31 unsigned long next; local
37 next = pmd_addr_end(addr, end);
40 err = walk->pte_hole(addr, next, walk);
50 err = walk->pmd_entry(pmd, addr, next, walk);
64 err = walk_pte_range(pmd, addr, next, walk);
67 } while (pmd++, addr = next, addr != end);
76 unsigned long next; local
81 next = pud_addr_end(addr, end);
84 err = walk->pte_hole(addr, next, walk);
90 err = walk->pud_entry(pud, addr, next, wal
113 unsigned long next; local
171 unsigned long next; local
[all...]
H A Dmincore.c120 unsigned long next; local
129 next = addr + PAGE_SIZE;
131 mincore_unmapped_range(vma, addr, next, vec);
155 } while (ptep++, addr = next, addr != end);
163 unsigned long next; local
168 next = pmd_addr_end(addr, end);
170 if (mincore_huge_pmd(vma, pmd, addr, next, vec)) {
171 vec += (next - addr) >> PAGE_SHIFT;
177 mincore_unmapped_range(vma, addr, next, vec);
179 mincore_pte_range(vma, pmd, addr, next, ve
188 unsigned long next; local
206 unsigned long next; local
[all...]
H A Ddmapool.c72 char *next; local
76 next = buf;
79 temp = scnprintf(next, size, "poolinfo - 0.1\n");
81 next += temp;
96 temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
101 next += temp;
212 unsigned int next = offset + pool->size; local
213 if (unlikely((next + pool->size) >= next_boundary)) {
214 next = next_boundary;
217 *(int *)(page->vaddr + offset) = next;
[all...]
H A Dgup.c881 unsigned long next; local
888 next = pmd_addr_end(addr, end);
901 if (!gup_huge_pmd(pmd, pmdp, addr, next, write,
905 } else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
907 } while (pmdp++, addr = next, addr != end);
915 unsigned long next; local
922 next = pud_addr_end(addr, end);
926 if (!gup_huge_pud(pud, pudp, addr, next, write,
929 } else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
931 } while (pudp++, addr = next, add
945 unsigned long next, flags; local
[all...]
H A Dmprotect.c139 unsigned long next; local
148 next = pmd_addr_end(addr, end);
159 if (next - addr != HPAGE_PMD_SIZE)
177 this_pages = change_pte_range(vma, pmd, addr, next, newprot,
180 } while (pmd++, addr = next, addr != end);
195 unsigned long next; local
200 next = pud_addr_end(addr, end);
203 pages += change_pmd_range(vma, pud, addr, next, newprot,
205 } while (pud++, addr = next, addr != end);
216 unsigned long next; local
[all...]
H A Dmremap.c169 unsigned long extent, next, old_end; local
184 next = (old_addr + PMD_SIZE) & PMD_MASK;
185 /* even if next overflowed, extent below will be ok */
186 extent = next - old_addr;
220 next = (new_addr + PMD_SIZE) & PMD_MASK;
221 if (extent > next - new_addr)
222 extent = next - new_addr;
H A Dutil.c144 struct vm_area_struct *next; local
148 next = prev->vm_next;
153 next = rb_entry(rb_parent,
156 next = NULL;
158 vma->vm_next = next;
159 if (next)
160 next->vm_prev = vma;
H A Dslob.c22 * sufficient free blocks (using a next-fit-like approach) followed by
78 * or offset of next block if -ve (in SLOB_UNITs).
80 * Free blocks of size 1 unit simply contain the offset of the next block.
82 * memory, and the offset of the next free block in the second SLOB_UNIT.
143 * Encode the given size and next info into a free slob block s.
145 static void set_slob(slob_t *s, slobidx_t size, slob_t *next) argument
148 slobidx_t offset = next - base;
168 * Return the next free slob block pointer after this one.
173 slobidx_t next; local
176 next
230 slob_t *next; local
343 slob_t *prev, *next, *b = (slob_t *)block; local
[all...]
H A Dcompaction.c43 struct page *page, *next; local
46 list_for_each_entry_safe(page, next, freelist, lru) {
879 * Remember where the free scanner should restart next time,
882 * now points at block_end_pfn, which is the start of the next
893 * In the next loop iteration, zone->compact_cached_free_pfn
944 freepage = list_entry(cc->freepages.next, struct page, lru);
1063 /* Let the next compaction start anew. */
H A Dmemblock.c435 struct memblock_region *next = &type->regions[i + 1]; local
437 if (this->base + this->size != next->base ||
439 memblock_get_region_node(next) ||
440 this->flags != next->flags) {
441 BUG_ON(this->base + this->size > next->base);
446 this->size += next->size;
447 /* move forward from next + 1, index of which is i + 2 */
448 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
[all...]
H A Dmemory-failure.c36 * - pass bad pages to kdump next kernel
350 struct to_kill *tk, *next; local
352 list_for_each_entry_safe (tk, next, to_kill, nd) {
662 * The EIO will be only reported on the next IO
H A Drmap.c327 struct anon_vma_chain *avc, *next; local
334 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
357 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
1009 /* address might be in next vma when migration races vma_adjust */
1761 /* address might be in next vma when migration races vma_adjust */
H A Dzsmalloc.c212 /* Handle of next free chunk (encodes <PFN, obj_idx>) */
213 void *next; member in struct:link_free
442 *head = (struct page *)list_entry((*head)->lru.next,
530 struct page *next; local
533 next = NULL;
535 next = (struct page *)page_private(page);
537 next = list_entry(page->lru.next, struct page, lru);
539 return next;
646 link->next
[all...]
H A Dhuge_memory.c92 * @address: the next address inside that to be scanned
2652 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2702 /* move to next address */
2728 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2730 mm_slot->mm_node.next,
2972 struct vm_area_struct *next = vma->vm_next; local
2973 unsigned long nstart = next->vm_start;
2976 (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
2977 (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
2978 split_huge_page_address(next
[all...]
H A Dksm.c112 * @address: the next address inside that to be scanned
113 * @rmap_list: link to the next rmap to be scanned in the rmap_list
151 * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
500 if (rmap_item->hlist.next)
674 * if exit comes before the next scan reaches it).
678 * to the next pass of ksmd - consider, for example, how ksmd might be
743 struct list_head *this, *next; local
753 break; /* proceed to next nid */
758 list_for_each_safe(this, next, &migrate_nodes) {
775 ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next,
1568 struct list_head *this, *next; local
1995 struct list_head *this, *next; local
[all...]
H A Dmemory.c188 if (batch->next) {
189 tlb->active = batch->next;
201 batch->next = NULL;
205 tlb->active->next = batch;
226 tlb->local.next = NULL;
250 for (batch = &tlb->local; batch; batch = batch->next) {
271 struct mmu_gather_batch *batch, *next; local
278 for (batch = tlb->local.next; batch; batch = next) {
279 next
406 unsigned long next; local
439 unsigned long next; local
475 unsigned long next; local
532 struct vm_area_struct *next = vma->vm_next; local
956 unsigned long next; local
989 unsigned long next; local
1010 unsigned long next; local
1228 unsigned long next; local
1272 unsigned long next; local
1291 unsigned long next; local
1685 unsigned long next; local
1706 unsigned long next; local
1735 unsigned long next; local
1875 unsigned long next; local
1897 unsigned long next; local
1920 unsigned long next; local
2612 struct vm_area_struct *next = vma->vm_next; local
[all...]
H A Dmempolicy.c557 unsigned long next; local
561 next = pmd_addr_end(addr, end);
572 if (queue_pages_pte_range(vma, pmd, addr, next, nodes,
575 } while (pmd++, addr = next, addr != end);
585 unsigned long next; local
589 next = pud_addr_end(addr, end);
594 if (queue_pages_pmd_range(vma, pud, addr, next, nodes,
597 } while (pud++, addr = next, addr != end);
607 unsigned long next; local
611 next
745 struct vm_area_struct *next; local
1729 unsigned nid, next; local
2394 struct rb_node *next = rb_next(&n->nd); local
2523 struct rb_node *next; local
[all...]
H A Dmmap.c270 * Close a vm structure and free it, returning the next.
274 struct vm_area_struct *next = vma->vm_next; local
283 return next;
710 struct vm_area_struct *next; local
713 prev->vm_next = next = vma->vm_next;
714 if (next)
715 next->vm_prev = prev;
732 struct vm_area_struct *next = vma->vm_next; local
742 if (next && !insert) {
745 if (end >= next
1050 struct vm_area_struct *area, *next; local
2309 struct vm_area_struct *next; local
2405 struct vm_area_struct *next = prev ? prev->vm_next : mm->mmap; local
[all...]
H A Dpercpu.c342 int next = chunk->map[i + 1]; local
345 if (!(next & 1) && nend >= round_up(end, PAGE_SIZE))
705 /* merge with next? */
1117 struct pcpu_chunk *chunk, *next; local
1127 list_for_each_entry_safe(chunk, next, free_head, list) {
1139 list_for_each_entry_safe(chunk, next, &to_free, list) {
H A Dswapfile.c191 lh = se->list.next;
449 cluster->next = cluster_next(&cluster->index) *
469 tmp = cluster->next;
482 cluster->next = tmp + 1;
575 goto scan; /* check next one */
641 struct swap_info_struct *si, *next; local
651 plist_for_each_entry_safe(si, next, &swap_avail_head, avail_list) {
689 * list may have been modified; so if next is still in the
692 if (plist_node_empty(&next->avail_list))
1188 unsigned long next; local
1208 unsigned long next; local
1227 unsigned long addr, end, next; local
1561 struct list_head *p, *next; local
2931 struct list_head *this, *next; local
[all...]
H A Dvmalloc.c72 unsigned long next; local
76 next = pmd_addr_end(addr, end);
79 vunmap_pte_range(pmd, addr, next);
80 } while (pmd++, addr = next, addr != end);
86 unsigned long next; local
90 next = pud_addr_end(addr, end);
93 vunmap_pmd_range(pud, addr, next);
94 } while (pud++, addr = next, addr != end);
100 unsigned long next; local
105 next
142 unsigned long next; local
159 unsigned long next; local
182 unsigned long next; local
2367 struct vmap_area **vas, *prev, *next; local
2554 struct vmap_area *va = p, *next; local
[all...]
H A Dshmem.c93 pgoff_t next; /* the next page offset to be fallocated */ member in struct:shmem_falloc
632 * Move _head_ to start search for next from here.
637 if (shmem_swaplist.next != &info->swaplist)
697 struct list_head *this, *next; local
721 list_for_each_safe(this, next, &shmem_swaplist) {
730 /* found nothing in this: move on to search the next */
798 index < shmem_falloc->next)
1309 vmf->pgoff < shmem_falloc->next) {
2081 shmem_falloc.next
[all...]
H A Dvmscan.c419 * 1 we keep it going and assume we'll be able to shrink next
979 * page to have PageReclaim set next time memcg
1201 struct page *page, *next; local
1204 list_for_each_entry_safe(page, next, page_list, lru) {
1940 * priority drop. Global direct reclaim can go to the next
H A Dhugetlb.c278 rg = list_entry(rg->link.next, typeof(*rg), link);
636 * next node from which to allocate, handling wrap at end of node
655 * next node id whether or not we find a free huge page to free so
656 * that the next attempt to free addresses the next node.
1025 * Free huge page from pool from next node to free.
1044 list_entry(h->hugepage_freelists[node].next,
1565 struct page *page, *next; local
1567 list_for_each_entry_safe(page, next, freel, lru) {
H A Dmemcontrol.c976 unsigned long val, next; local
979 next = __this_cpu_read(memcg->stat->targets[target]);
981 if ((long)next - (long)val < 0) {
984 next = val + THRESHOLDS_EVENTS_TARGET;
987 next = val + SOFTLIMIT_EVENTS_TARGET;
990 next = val + NUMAINFO_EVENTS_TARGET;
995 __this_cpu_write(memcg->stat->targets[target], next);
1066 * Returns a next (in a pre-order walk) alive memcg (with elevated css
3446 * on the next attempt and the call should be retried later.
3453 * disappear in the next attemp
6502 struct list_head *next; local
[all...]

Completed in 1778 milliseconds

12