/drivers/staging/lustre/lustre/ptlrpc/ |
H A D | sec_bulk.c | 58 * bulk encryption page pools * 75 unsigned int epp_max_pools; /* number of pools, const */ 86 * indicating how idle the pools are, from 0 to MAX_IDLE_IDX 88 * the pools, not based on time. which means in case that system 90 * activities happened in the pools. 102 unsigned long epp_total_pages; /* total pages in pools */ 118 * pointers to pools 136 "max pools: %u\n" 212 /* free unused pools */ 286 static unsigned long enc_pools_cleanup(struct page ***pools, in argument 314 enc_pools_insert(struct page ***pools, int npools, int npages) argument 398 struct page ***pools; local [all...] |
/drivers/gpu/drm/ttm/ |
H A D | ttm_page_alloc_dma.c | 28 * over the DMA pools: 77 * The pool structure. There are usually six pools: 84 * @pools: The 'struct device->dma_pools' link. 92 * @dev: The device that is associated with these pools. 104 struct list_head pools; /* The 'struct device->dma_pools link */ member in struct:dma_pool 149 * DMA pools. Guarded by _mutex->lock. 150 * @pools: The link to 'struct ttm_pool_manager->pools' 155 struct list_head pools; member in struct:device_pools 161 * struct ttm_pool_manager - Holds memory pools fo 171 struct list_head pools; member in struct:ttm_pool_manager [all...] |
H A D | ttm_page_alloc.c | 98 * struct ttm_pool_manager - Holds memory pools for fst allocation 108 * @pools: All pool objects in use. 116 struct ttm_page_pool pools[NUM_POOLS]; member in union:ttm_pool_manager::__anon1048 272 return &_manager->pools[pool_index]; 409 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; 426 count += _manager->pools[i].npages; 853 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, 928 p = &_manager->pools[i];
|
/drivers/staging/android/ion/ |
H A D | ion_system_heap.c | 52 struct ion_page_pool *pools[0]; member in struct:ion_system_heap 60 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; 87 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; 190 /* uncached pages come from the page pools, zero them before returning 222 struct ion_page_pool *pool = sys_heap->pools[i]; 251 struct ion_page_pool *pool = sys_heap->pools[i]; 286 heap->pools[i] = pool; 294 ion_page_pool_destroy(heap->pools[i]); 307 ion_page_pool_destroy(sys_heap->pools[i]);
|
/drivers/soc/ti/ |
H A D | knav_qmss.h | 196 * @pools: list of descriptor pools in the region 208 struct list_head pools; member in struct:knav_region 212 * struct knav_pool: qmss pools 304 struct list_head pools; member in struct:knav_device 362 list_for_each_entry(pool, &kdev->pools, list)
|
H A D | knav_qmss_queue.c | 791 /* Region maintains a sorted (by region offset) list of pools 797 node = ®ion->pools; 798 list_for_each_entry(pi, ®ion->pools, region_inst) { 812 list_add_tail(&pool->list, &kdev->pools); 1007 list_add(&pool->region_inst, ®ion->pools); 1090 INIT_LIST_HEAD(®ion->pools); 1315 list_for_each_entry(pool, ®ion->pools, region_inst) 1684 INIT_LIST_HEAD(&kdev->pools); 1729 queue_pools = of_get_child_by_name(node, "queue-pools"); 1731 dev_err(dev, "queue-pools no [all...] |
/drivers/atm/ |
H A D | zatm.c | 610 /* prepare free buffer pools */ 1034 unsigned long pools; local 1037 pools = zin(RQA); 1038 EVENT("RQA (0x%08x)\n",pools,0); 1039 for (i = 0; pools; i++) { 1040 if (pools & 1) { 1044 pools >>= 1; 1048 unsigned long pools; local 1050 pools = zin(RQU); 1052 dev->number,pools); 1264 int pools,vccs,rx; local [all...] |
/drivers/md/ |
H A D | dm.c | 2999 struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL); local 3004 if (!pools) 3020 pools->io_pool = mempool_create_slab_pool(pool_size, cachep); 3021 if (!pools->io_pool) 3024 pools->bs = bioset_create_nobvec(pool_size, front_pad); 3025 if (!pools->bs) 3028 if (integrity && bioset_integrity_create(pools->bs, pool_size)) 3031 return pools; 3034 dm_free_md_mempools(pools); 3039 dm_free_md_mempools(struct dm_md_mempools *pools) argument [all...] |
H A D | dm.h | 212 void dm_free_md_mempools(struct dm_md_mempools *pools);
|
H A D | dm-thin.c | 329 * A global list of pools that uses a struct mapped_device as a key. 333 struct list_head pools; member in struct:dm_thin_pool_table 339 INIT_LIST_HEAD(&dm_thin_pool_table.pools); 345 list_add(&pool->list, &dm_thin_pool_table.pools); 360 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) { 376 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
|
/drivers/net/ethernet/intel/ixgbe/ |
H A D | ixgbe_lib.c | 320 * and VM pools where appropriate. Also assign queues based on DCB 345 /* 16 pools w/ 8 TC per pool */ 349 /* 32 pools w/ 4 TC per pool */ 356 /* queues in the remaining pools are available for FCoE */ 489 * and VM pools where appropriate. If RSS is available, then also try and 502 bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); local 511 /* double check we are limited to maximum pools */ 515 if ((vmdq_i > 32) || (rss_i < 4) || (vmdq_i > 16 && pools)) { 527 /* queues in the remaining pools are available for FCoE */
|
H A D | ixgbe_main.c | 7477 bool pools; local 7485 pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); 7486 if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS) 7730 /* Hardware has a limited number of available pools. Each VF, and the 7732 * then the available number of pools.
|