Lines Matching refs:mp

116  * @mp:	      Exchange Manager associated with this anchor
127 struct fc_exch_mgr *mp;
323 struct fc_exch_mgr *mp;
326 mp = ep->em;
330 mempool_free(ep, mp->ep_pool);
463 fc_exch_release(ep); /* drop hold for exch in mp */
799 * @mp: The exchange manager that will allocate the exchange
804 struct fc_exch_mgr *mp)
812 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
814 atomic_inc(&mp->stats.no_free_exch);
820 pool = per_cpu_ptr(mp->pool, cpu);
839 index = index == mp->pool_max_index ? 0 : index + 1;
843 pool->next_index = index == mp->pool_max_index ? 0 : index + 1;
845 fc_exch_hold(ep); /* hold for exch in mp */
863 ep->oxid = ep->xid = (index << fc_cpu_order | cpu) + mp->min_xid;
864 ep->em = mp;
869 ep->class = mp->class;
877 atomic_inc(&mp->stats.no_free_exch_xid);
878 mempool_free(ep, mp->ep_pool);
900 return fc_exch_em_alloc(lport, ema->mp);
906 * @mp: The exchange manager to lookup the exchange from
909 static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
914 if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
915 pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask);
917 ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
952 * @mp: The exchange manager to allocate the exchange from
958 struct fc_exch_mgr *mp,
999 * @mp: The Exchange Manager to lookup the exchange from
1006 struct fc_exch_mgr *mp,
1024 ep = fc_exch_find(mp, xid);
1026 atomic_inc(&mp->stats.xid_not_found);
1053 ep = fc_exch_find(mp, xid);
1056 atomic_inc(&mp->stats.xid_busy);
1060 ep = fc_exch_resp(lport, mp, fp);
1067 atomic_inc(&mp->stats.xid_not_found);
1085 atomic_inc(&mp->stats.seq_not_found);
1130 * @mp: The Exchange Manager to lookup the exchange from
1135 static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
1147 ep = fc_exch_find(mp, xid);
1440 fc_seq_lookup_recip(lport, ema->mp, fp) == FC_RJT_NONE)
1457 * @mp: The EM that the exchange is on
1463 static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
1490 reject = fc_seq_lookup_recip(lport, mp, fp);
1522 * @mp: The EM that the exchange is on
1525 static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1534 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
1536 atomic_inc(&mp->stats.xid_not_found);
1540 atomic_inc(&mp->stats.xid_not_found);
1546 atomic_inc(&mp->stats.xid_not_found);
1551 atomic_inc(&mp->stats.xid_not_found);
1560 atomic_inc(&mp->stats.seq_not_found);
1612 * @mp: The EM that the exchange is on
1615 static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1619 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
1622 atomic_inc(&mp->stats.xid_not_found);
1624 atomic_inc(&mp->stats.non_bls_resp);
1706 * @mp: The EM that the exchange is on
1712 static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
1722 ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ?
1917 per_cpu_ptr(ema->mp->pool, cpu),
1935 if (ema->mp->min_xid <= xid && xid <= ema->mp->max_xid)
1936 return fc_exch_find(ema->mp, xid);
2258 struct fc_exch_mgr *mp;
2263 mp = ema->mp;
2264 st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
2266 atomic_read(&mp->stats.no_free_exch_xid);
2267 st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
2268 st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
2269 st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
2270 st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
2278 * @mp: The exchange manager to be added to the local port
2282 struct fc_exch_mgr *mp,
2291 ema->mp = mp;
2295 kref_get(&mp->kref);
2306 struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref);
2308 mempool_destroy(mp->ep_pool);
2309 free_percpu(mp->pool);
2310 kfree(mp);
2321 kref_put(&ema->mp->kref, fc_exch_mgr_destroy);
2336 if (!fc_exch_mgr_add(dst, ema->mp, ema->match))
2360 struct fc_exch_mgr *mp;
2376 mp = kzalloc(sizeof(struct fc_exch_mgr), GFP_ATOMIC);
2377 if (!mp)
2380 mp->class = class;
2382 mp->min_xid = min_xid;
2388 mp->max_xid = pool_exch_range * (fc_cpu_mask + 1) +
2391 mp->max_xid = max_xid;
2392 pool_exch_range = (mp->max_xid - mp->min_xid + 1) /
2396 mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
2397 if (!mp->ep_pool)
2405 mp->pool_max_index = pool_exch_range - 1;
2411 mp->pool = __alloc_percpu(pool_size, __alignof__(struct fc_exch_pool));
2412 if (!mp->pool)
2415 pool = per_cpu_ptr(mp->pool, cpu);
2423 kref_init(&mp->kref);
2424 if (!fc_exch_mgr_add(lport, mp, match)) {
2425 free_percpu(mp->pool);
2430 * Above kref_init() sets mp->kref to 1 and then
2431 * call to fc_exch_mgr_add incremented mp->kref again,
2434 kref_put(&mp->kref, fc_exch_mgr_destroy);
2435 return mp;
2438 mempool_destroy(mp->ep_pool);
2440 kfree(mp);
2483 if ((xid >= ema->mp->min_xid) &&
2484 (xid <= ema->mp->max_xid))
2531 fc_exch_recv_bls(ema->mp, fp);
2534 fc_exch_recv_seq_resp(ema->mp, fp);
2536 fc_exch_recv_resp(ema->mp, fp);
2538 fc_exch_recv_req(lport, ema->mp, fp);