Searched refs:order (Results 1 - 25 of 31) sorted by relevance

12

/include/asm-generic/
H A Dgetorder.h15 int order; local
20 order = fls(size);
22 order = fls64(size);
24 return order;
28 * get_order - Determine the allocation order of a memory size
29 * @size: The size for which to get the order
31 * Determine the allocation order of a particular sized block of memory. This
41 * The order returned is used to find the smallest allocation granule required
H A Ddma-coherent.h11 int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
28 #define dma_release_from_coherent(dev, order, vaddr) (0)
29 #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
/include/linux/
H A Dcompaction.h32 extern int fragmentation_index(struct zone *zone, unsigned int order);
34 int order, gfp_t gfp_mask, nodemask_t *mask,
37 extern void compact_pgdat(pg_data_t *pgdat, int order);
39 extern unsigned long compaction_suitable(struct zone *zone, int order);
49 static inline void defer_compaction(struct zone *zone, int order) argument
54 if (order < zone->compact_order_failed)
55 zone->compact_order_failed = order;
62 static inline bool compaction_deferred(struct zone *zone, int order) argument
66 if (order < zone->compact_order_failed)
77 * Update defer tracking counters after successful compaction of given order,
81 compaction_defer_reset(struct zone *zone, int order, bool alloc_success) argument
93 compaction_restarting(struct zone *zone, int order) argument
103 try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, enum migrate_mode mode, int *contended, struct zone **candidate_zone) argument
111 compact_pgdat(pg_data_t *pgdat, int order) argument
119 compaction_suitable(struct zone *zone, int order) argument
124 defer_compaction(struct zone *zone, int order) argument
128 compaction_deferred(struct zone *zone, int order) argument
[all...]
H A Dgfp.h195 * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA.
295 static inline void arch_free_page(struct page *page, int order) { } argument
298 static inline void arch_alloc_page(struct page *page, int order) { } argument
302 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
306 __alloc_pages(gfp_t gfp_mask, unsigned int order, argument
309 return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
313 unsigned int order)
319 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
323 unsigned int order)
327 return __alloc_pages(gfp_mask, order, node_zonelis
312 alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) argument
322 alloc_pages_exact_node(int nid, gfp_t gfp_mask, unsigned int order) argument
334 alloc_pages(gfp_t gfp_mask, unsigned int order) argument
[all...]
H A Di2c-pnx.h25 int order; /* RX Bytes to order via TX */ member in struct:i2c_pnx_mif
H A Dkmemcheck.h11 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
12 void kmemcheck_free_shadow(struct page *page, int order);
17 void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
93 kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) argument
98 kmemcheck_free_shadow(struct page *page, int order) argument
114 unsigned int order, gfp_t gfpflags)
113 kmemcheck_pagealloc_alloc(struct page *p, unsigned int order, gfp_t gfpflags) argument
H A Dmemcontrol.h161 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
333 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, argument
412 int order);
414 struct mem_cgroup *memcg, int order);
415 void __memcg_kmem_uncharge_pages(struct page *page, int order);
424 int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order);
425 void __memcg_uncharge_slab(struct kmem_cache *cachep, int order);
433 * @order: allocation order.
442 memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) argument
474 memcg_kmem_uncharge_pages(struct page *page, int order) argument
492 memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) argument
529 memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) argument
534 memcg_kmem_uncharge_pages(struct page *page, int order) argument
539 memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) argument
[all...]
H A Dbitops.h62 int order; local
64 order = fls(count);
65 return order; /* We could be slightly more clever with -1 here... */
70 int order; local
72 order = fls(count) - 1;
74 order++;
75 return order;
H A Doom.h56 extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
65 int order, const nodemask_t *nodemask);
72 int order, nodemask_t *mask, bool force_kill);
H A Dmempool.h64 * allocates pages of the order specified by pool_data
68 static inline mempool_t *mempool_create_page_pool(int min_nr, int order) argument
71 (void *)(long)order);
H A Ddma-contiguous.h115 unsigned int order);
148 unsigned int order)
147 dma_alloc_from_contiguous(struct device *dev, int count, unsigned int order) argument
H A Dslab.h168 * 32 megabyte (2^25) or the maximum allocatable page order if that is
172 * to do various tricks to work around compiler limitations in order to
185 * SLUB directly allocates requests fitting in to an order-1 page
212 /* Maximum order allocatable via the slab allocagtor */
341 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order);
344 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
347 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) argument
349 return kmalloc_order(size, flags, order);
355 unsigned int order = get_order(size); local
356 return kmalloc_order_trace(size, flags, order);
[all...]
H A Dhugetlb.h251 unsigned int order; member in struct:hstate
286 void __init hugetlb_add_hstate(unsigned order);
324 return (unsigned long)PAGE_SIZE << h->order;
338 return h->order;
343 return h->order + PAGE_SHIFT;
353 return 1 << h->order;
379 return hstates[index].order + PAGE_SHIFT;
H A Dbitmap.h60 * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
61 * bitmap_release_region(bitmap, pos, order) Free specified bit region
62 * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
143 extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
144 extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
145 extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
H A Dmmzone.h31 * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
72 #define for_each_migratetype_order(order, type) \
73 for (order = 0; order < MAX_ORDER; order++) \
119 NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
794 void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
795 bool zone_watermark_ok(struct zone *z, unsigned int order,
797 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
H A Dswap.h172 * high_wmark, in order to provide better per-zone lru behavior. We are ok to
327 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
350 static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) argument
H A Dkexec.h220 unsigned int order);
/include/trace/events/
H A Dvmscan.h58 TP_PROTO(int nid, int order),
60 TP_ARGS(nid, order),
64 __field( int, order )
69 __entry->order = order;
72 TP_printk("nid=%d order=%d", __entry->nid, __entry->order)
77 TP_PROTO(int nid, int zid, int order),
79 TP_ARGS(nid, zid, order),
84 __field( int, order )
[all...]
H A Dkmem.h152 TP_PROTO(struct page *page, unsigned int order),
154 TP_ARGS(page, order),
158 __field( unsigned int, order )
163 __entry->order = order;
166 TP_printk("page=%p pfn=%lu order=%d",
169 __entry->order)
188 TP_printk("page=%p pfn=%lu order=0 cold=%d",
196 TP_PROTO(struct page *page, unsigned int order,
199 TP_ARGS(page, order, gfp_flag
[all...]
/include/drm/
H A Ddrm_hashtab.h49 u8 order; member in struct:drm_open_hash
52 extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
H A Ddrm_legacy.h51 int order; /**< log-base-2(total) */ member in struct:drm_buf
82 * Buffer entry. There is one of this for each buffer size order.
101 struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
/include/xen/
H A Dxen-ops.h23 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
27 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
/include/media/
H A Dsoc_mediabus.h43 * enum soc_mbus_order - sample order on the media bus
76 * @order: Sample order when storing in memory
83 enum soc_mbus_order order; member in struct:soc_mbus_pixelfmt
/include/net/
H A Dact_api.h80 __u32 order; member in struct:tc_action
/include/acpi/
H A Dacbuffer.h59 * this decision was a design error in C. Ritchie could have picked an order
134 u8 order; member in struct:acpi_pld_info

Completed in 6702 milliseconds

12