Lines Matching refs:ic

46 void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
51 for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) {
63 sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header));
65 sge->lkey = ic->i_mr->lkey;
70 sge->lkey = ic->i_mr->lkey;
120 int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic)
124 ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs);
126 ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags);
128 free_percpu(ic->i_cache_incs.percpu);
154 void rds_ib_recv_free_caches(struct rds_ib_connection *ic)
162 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
163 rds_ib_cache_splice_all_lists(&ic->i_cache_incs, &list);
164 free_percpu(ic->i_cache_incs.percpu);
172 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
173 rds_ib_cache_splice_all_lists(&ic->i_cache_frags, &list);
174 free_percpu(ic->i_cache_frags.percpu);
190 static void rds_ib_frag_free(struct rds_ib_connection *ic,
195 rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags);
204 struct rds_ib_connection *ic = inc->i_conn->c_transport_data;
211 rds_ib_frag_free(ic, frag);
216 rds_ib_recv_cache_put(&ibinc->ii_cache_entry, &ic->i_cache_incs);
219 static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
227 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
228 rds_ib_frag_free(ic, recv->r_frag);
233 void rds_ib_recv_clear_ring(struct rds_ib_connection *ic)
237 for (i = 0; i < ic->i_recv_ring.w_nr; i++)
238 rds_ib_recv_clear_one(ic, &ic->i_recvs[i]);
241 static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *ic,
248 cache_item = rds_ib_recv_cache_get(&ic->i_cache_incs);
265 rds_inc_init(&ibinc->ii_inc, ic->conn, ic->conn->c_faddr);
270 static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic,
277 cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags);
302 struct rds_ib_connection *ic = conn->c_transport_data;
313 if (!ic->i_cache_incs.ready)
314 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
315 if (!ic->i_cache_frags.ready)
316 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
323 recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask);
329 recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask);
333 ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg,
338 sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header);
342 sge->addr = ib_sg_dma_address(ic->i_cm_id->device, &recv->r_frag->f_sg);
343 sge->length = ib_sg_dma_len(ic->i_cm_id->device, &recv->r_frag->f_sg);
359 struct rds_ib_connection *ic = conn->c_transport_data;
367 rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
368 if (pos >= ic->i_recv_ring.w_nr) {
374 recv = &ic->i_recvs[pos];
381 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
385 ic->i_cm_id->device,
400 if (ic->i_flowctl && posted)
404 rds_ib_ring_unalloc(&ic->i_recv_ring, 1);
530 /* ic starts out kzalloc()ed */
531 void rds_ib_recv_init_ack(struct rds_ib_connection *ic)
533 struct ib_send_wr *wr = &ic->i_ack_wr;
534 struct ib_sge *sge = &ic->i_ack_sge;
536 sge->addr = ic->i_ack_dma;
538 sge->lkey = ic->i_mr->lkey;
570 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
575 spin_lock_irqsave(&ic->i_ack_lock, flags);
576 ic->i_ack_next = seq;
578 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
579 spin_unlock_irqrestore(&ic->i_ack_lock, flags);
582 static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
587 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
589 spin_lock_irqsave(&ic->i_ack_lock, flags);
590 seq = ic->i_ack_next;
591 spin_unlock_irqrestore(&ic->i_ack_lock, flags);
596 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
599 atomic64_set(&ic->i_ack_next, seq);
602 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
606 static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
608 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
611 return atomic64_read(&ic->i_ack_next);
616 static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits)
618 struct rds_header *hdr = ic->i_ack;
623 seq = rds_ib_get_ack(ic);
625 rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq);
630 ic->i_ack_queued = jiffies;
632 ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr);
637 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
638 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
642 rds_ib_conn_error(ic->conn, "sending ack failed\n");
685 void rds_ib_attempt_ack(struct rds_ib_connection *ic)
689 if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
692 if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) {
698 if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
700 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
704 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
705 rds_ib_send_ack(ic, adv_credits);
712 void rds_ib_ack_send_complete(struct rds_ib_connection *ic)
714 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
715 rds_ib_attempt_ack(ic);
722 u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic)
724 if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
726 return rds_ib_get_ack(ic);
822 struct rds_ib_connection *ic = conn->c_transport_data;
823 struct rds_ib_incoming *ibinc = ic->i_ibinc;
828 rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv,
841 ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs];
877 rds_ib_frag_free(ic, recv->r_frag);
891 ic->i_ibinc = ibinc;
895 ic->i_recv_data_rem = be32_to_cpu(hdr->h_len);
897 rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic, ibinc,
898 ic->i_recv_data_rem, hdr->h_flags);
916 if (ic->i_recv_data_rem > RDS_FRAG_SIZE)
917 ic->i_recv_data_rem -= RDS_FRAG_SIZE;
919 ic->i_recv_data_rem = 0;
920 ic->i_ibinc = NULL;
955 struct rds_ib_connection *ic = conn->c_transport_data;
961 tasklet_schedule(&ic->i_recv_tasklet);
964 static inline void rds_poll_cq(struct rds_ib_connection *ic,
967 struct rds_connection *conn = ic->conn;
971 while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) {
978 recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
980 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
1004 rds_ib_ring_free(&ic->i_recv_ring, 1);
1010 struct rds_ib_connection *ic = (struct rds_ib_connection *) data;
1011 struct rds_connection *conn = ic->conn;
1014 rds_poll_cq(ic, &state);
1015 ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
1016 rds_poll_cq(ic, &state);
1019 rds_ib_set_ack(ic, state.ack_next, state.ack_required);
1020 if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) {
1022 ic->i_ack_recv = state.ack_recv;
1025 rds_ib_attempt_ack(ic);
1030 if (rds_ib_ring_empty(&ic->i_recv_ring))
1033 if (rds_ib_ring_low(&ic->i_recv_ring))
1039 struct rds_ib_connection *ic = conn->c_transport_data;
1044 rds_ib_attempt_ack(ic);