Lines Matching refs:pool

46 	struct rds_iw_mr_pool	*pool;
57 * Our own little MR pool
78 static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all);
80 static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
81 static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool,
84 static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
85 static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
89 static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
340 struct rds_iw_mr_pool *pool;
342 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
343 if (!pool) {
348 pool->device = rds_iwdev;
349 INIT_LIST_HEAD(&pool->dirty_list);
350 INIT_LIST_HEAD(&pool->clean_list);
351 mutex_init(&pool->flush_lock);
352 spin_lock_init(&pool->list_lock);
353 INIT_WORK(&pool->flush_worker, rds_iw_mr_pool_flush_worker);
355 pool->max_message_size = fastreg_message_size;
356 pool->max_items = fastreg_pool_size;
357 pool->max_free_pinned = pool->max_items * pool->max_message_size / 4;
358 pool->max_pages = fastreg_message_size;
365 pool->max_items_soft = pool->max_items * 3 / 4;
367 return pool;
372 struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool;
374 iinfo->rdma_mr_max = pool->max_items;
375 iinfo->rdma_mr_size = pool->max_pages;
378 void rds_iw_destroy_mr_pool(struct rds_iw_mr_pool *pool)
381 rds_iw_flush_mr_pool(pool, 1);
382 BUG_ON(atomic_read(&pool->item_count));
383 BUG_ON(atomic_read(&pool->free_pinned));
384 kfree(pool);
387 static inline struct rds_iw_mr *rds_iw_reuse_fmr(struct rds_iw_mr_pool *pool)
392 spin_lock_irqsave(&pool->list_lock, flags);
393 if (!list_empty(&pool->clean_list)) {
394 ibmr = list_entry(pool->clean_list.next, struct rds_iw_mr, mapping.m_list);
397 spin_unlock_irqrestore(&pool->list_lock, flags);
404 struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool;
409 ibmr = rds_iw_reuse_fmr(pool);
422 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
425 atomic_dec(&pool->item_count);
434 rds_iw_flush_mr_pool(pool, 0);
447 err = rds_iw_init_fastreg(pool, ibmr);
456 rds_iw_destroy_fastreg(pool, ibmr);
459 atomic_dec(&pool->item_count);
481 * Flush our pool of MRs.
486 static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
497 mutex_lock(&pool->flush_lock);
499 spin_lock_irqsave(&pool->list_lock, flags);
501 list_splice_init(&pool->dirty_list, &unmap_list);
503 list_splice_init(&pool->clean_list, &kill_list);
504 spin_unlock_irqrestore(&pool->list_lock, flags);
515 ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list,
527 rds_iw_destroy_fastreg(pool, ibmr);
535 spin_lock_irqsave(&pool->list_lock, flags);
536 list_splice(&unmap_list, &pool->clean_list);
537 spin_unlock_irqrestore(&pool->list_lock, flags);
540 atomic_sub(unpinned, &pool->free_pinned);
541 atomic_sub(ncleaned, &pool->dirty_count);
542 atomic_sub(nfreed, &pool->item_count);
544 mutex_unlock(&pool->flush_lock);
550 struct rds_iw_mr_pool *pool = container_of(work, struct rds_iw_mr_pool, flush_worker);
552 rds_iw_flush_mr_pool(pool, 0);
558 struct rds_iw_mr_pool *pool = ibmr->device->mr_pool;
561 if (!pool)
564 /* Return it to the pool's free list */
565 rds_iw_free_fastreg(pool, ibmr);
568 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
569 atomic_read(&pool->dirty_count) >= pool->max_items / 10)
570 queue_work(rds_wq, &pool->flush_worker);
574 rds_iw_flush_mr_pool(pool, 0);
578 queue_work(rds_wq, &pool->flush_worker);
588 struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool;
590 if (pool)
591 rds_iw_flush_mr_pool(pool, 0);
658 static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool,
661 struct rds_iw_device *rds_iwdev = pool->device;
666 mr = ib_alloc_fast_reg_mr(rds_iwdev->pd, pool->max_message_size);
677 page_list = ib_alloc_fast_reg_page_list(rds_iwdev->dev, pool->max_message_size);
754 static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool,
759 struct rds_iw_device *rds_iwdev = pool->device;
773 if (mapping->m_sg.dma_len > pool->max_message_size) {
796 static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool,
810 spin_lock_irqsave(&pool->list_lock, flags);
812 list_add_tail(&ibmr->mapping.m_list, &pool->dirty_list);
813 atomic_add(ibmr->mapping.m_sg.len, &pool->free_pinned);
814 atomic_inc(&pool->dirty_count);
816 spin_unlock_irqrestore(&pool->list_lock, flags);
819 static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
847 spin_lock_irqsave(&pool->list_lock, flags);
853 spin_unlock_irqrestore(&pool->list_lock, flags);
864 static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool,