Lines Matching refs:pool

63 static inline void steal_tags(struct percpu_ida *pool,
66 unsigned cpus_have_tags, cpu = pool->cpu_last_stolen;
69 for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
71 cpu = cpumask_next(cpu, &pool->cpus_have_tags);
74 cpu = cpumask_first(&pool->cpus_have_tags);
79 pool->cpu_last_stolen = cpu;
80 remote = per_cpu_ptr(pool->tag_cpu, cpu);
82 cpumask_clear_cpu(cpu, &pool->cpus_have_tags);
109 static inline void alloc_global_tags(struct percpu_ida *pool,
113 pool->freelist, &pool->nr_free,
114 min(pool->nr_free, pool->percpu_batch_size));
131 * @pool: pool to allocate from
147 int percpu_ida_alloc(struct percpu_ida *pool, int state)
155 tags = this_cpu_ptr(pool->tag_cpu);
165 spin_lock(&pool->lock);
175 prepare_to_wait(&pool->wait, &wait, state);
178 alloc_global_tags(pool, tags);
180 steal_tags(pool, tags);
186 &pool->cpus_have_tags);
189 spin_unlock(&pool->lock);
203 tags = this_cpu_ptr(pool->tag_cpu);
206 finish_wait(&pool->wait, &wait);
214 * @pool: pool @tag was allocated from
219 void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
225 BUG_ON(tag >= pool->nr_tags);
228 tags = this_cpu_ptr(pool->tag_cpu);
238 &pool->cpus_have_tags);
239 wake_up(&pool->wait);
242 if (nr_free == pool->percpu_max_size) {
243 spin_lock(&pool->lock);
249 if (tags->nr_free == pool->percpu_max_size) {
250 move_tags(pool->freelist, &pool->nr_free,
252 pool->percpu_batch_size);
254 wake_up(&pool->wait);
256 spin_unlock(&pool->lock);
264 * percpu_ida_destroy - release a tag pool's resources
265 * @pool: pool to free
269 void percpu_ida_destroy(struct percpu_ida *pool)
271 free_percpu(pool->tag_cpu);
272 free_pages((unsigned long) pool->freelist,
273 get_order(pool->nr_tags * sizeof(unsigned)));
278 * percpu_ida_init - initialize a percpu tag pool
279 * @pool: pool to initialize
282 * Initializes @pool so that it can be used to allocate tags - integers in the
289 int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
294 memset(pool, 0, sizeof(*pool));
296 init_waitqueue_head(&pool->wait);
297 spin_lock_init(&pool->lock);
298 pool->nr_tags = nr_tags;
299 pool->percpu_max_size = max_size;
300 pool->percpu_batch_size = batch_size;
309 pool->freelist = (void *) __get_free_pages(GFP_KERNEL, order);
310 if (!pool->freelist)
314 pool->freelist[i] = i;
316 pool->nr_free = nr_tags;
318 pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) +
319 pool->percpu_max_size * sizeof(unsigned),
321 if (!pool->tag_cpu)
325 spin_lock_init(&per_cpu_ptr(pool->tag_cpu, cpu)->lock);
329 percpu_ida_destroy(pool);
335 * percpu_ida_for_each_free - iterate free ids of a pool
336 * @pool: pool to iterate
344 int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
353 remote = per_cpu_ptr(pool->tag_cpu, cpu);
365 spin_lock(&pool->lock);
366 for (i = 0; i < pool->nr_free; i++) {
367 err = fn(pool->freelist[i], data);
371 spin_unlock(&pool->lock);
379 * percpu_ida_free_tags - return free tags number of a specific cpu or global pool
380 * @pool: pool related
381 * @cpu: specific cpu or global pool if @cpu == nr_cpu_ids
385 unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu)
389 return pool->nr_free;
390 remote = per_cpu_ptr(pool->tag_cpu, cpu);