Lines Matching refs:ic

69 static void rds_iw_recv_unmap_page(struct rds_iw_connection *ic,
76 ib_dma_unmap_page(ic->i_cm_id->device,
82 void rds_iw_recv_init_ring(struct rds_iw_connection *ic)
87 for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) {
98 sge = rds_iw_data_sge(ic, recv->r_sge);
103 sge = rds_iw_header_sge(ic, recv->r_sge);
104 sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header));
110 static void rds_iw_recv_clear_one(struct rds_iw_connection *ic,
118 rds_iw_recv_unmap_page(ic, recv);
126 void rds_iw_recv_clear_ring(struct rds_iw_connection *ic)
130 for (i = 0; i < ic->i_recv_ring.w_nr; i++)
131 rds_iw_recv_clear_one(ic, &ic->i_recvs[i]);
133 if (ic->i_frag.f_page)
134 rds_iw_frag_drop_page(&ic->i_frag);
141 struct rds_iw_connection *ic = conn->c_transport_data;
169 if (!ic->i_frag.f_page) {
170 ic->i_frag.f_page = alloc_page(page_gfp);
171 if (!ic->i_frag.f_page)
173 ic->i_frag.f_offset = 0;
176 dma_addr = ib_dma_map_page(ic->i_cm_id->device,
177 ic->i_frag.f_page,
178 ic->i_frag.f_offset,
181 if (ib_dma_mapping_error(ic->i_cm_id->device, dma_addr))
189 recv->r_frag->f_page = ic->i_frag.f_page;
190 recv->r_frag->f_offset = ic->i_frag.f_offset;
193 sge = rds_iw_data_sge(ic, recv->r_sge);
197 sge = rds_iw_header_sge(ic, recv->r_sge);
198 sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header);
203 if (ic->i_frag.f_offset < RDS_PAGE_LAST_OFF) {
204 ic->i_frag.f_offset += RDS_FRAG_SIZE;
206 put_page(ic->i_frag.f_page);
207 ic->i_frag.f_page = NULL;
208 ic->i_frag.f_offset = 0;
227 struct rds_iw_connection *ic = conn->c_transport_data;
235 rds_iw_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
236 if (pos >= ic->i_recv_ring.w_nr) {
243 recv = &ic->i_recvs[pos];
251 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
268 if (ic->i_flowctl && posted)
272 rds_iw_ring_unalloc(&ic->i_recv_ring, 1);
361 /* ic starts out kzalloc()ed */
362 void rds_iw_recv_init_ack(struct rds_iw_connection *ic)
364 struct ib_send_wr *wr = &ic->i_ack_wr;
365 struct ib_sge *sge = &ic->i_ack_sge;
367 sge->addr = ic->i_ack_dma;
369 sge->lkey = rds_iw_local_dma_lkey(ic);
401 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
406 spin_lock_irqsave(&ic->i_ack_lock, flags);
407 ic->i_ack_next = seq;
409 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
410 spin_unlock_irqrestore(&ic->i_ack_lock, flags);
413 static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
418 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
420 spin_lock_irqsave(&ic->i_ack_lock, flags);
421 seq = ic->i_ack_next;
422 spin_unlock_irqrestore(&ic->i_ack_lock, flags);
427 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
430 atomic64_set(&ic->i_ack_next, seq);
433 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
437 static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
439 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
442 return atomic64_read(&ic->i_ack_next);
447 static void rds_iw_send_ack(struct rds_iw_connection *ic, unsigned int adv_credits)
449 struct rds_header *hdr = ic->i_ack;
454 seq = rds_iw_get_ack(ic);
456 rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq);
461 ic->i_ack_queued = jiffies;
463 ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr);
468 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
469 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
473 rds_iw_conn_error(ic->conn, "sending ack failed\n");
516 void rds_iw_attempt_ack(struct rds_iw_connection *ic)
520 if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
523 if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) {
529 if (!rds_iw_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
531 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
535 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
536 rds_iw_send_ack(ic, adv_credits);
543 void rds_iw_ack_send_complete(struct rds_iw_connection *ic)
545 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
546 rds_iw_attempt_ack(ic);
553 u64 rds_iw_piggyb_ack(struct rds_iw_connection *ic)
555 if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
557 return rds_iw_get_ack(ic);
653 struct rds_iw_connection *ic = conn->c_transport_data;
654 struct rds_iw_incoming *iwinc = ic->i_iwinc;
659 rdsdebug("ic %p iwinc %p recv %p byte len %u\n", ic, iwinc, recv,
672 ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs];
722 ic->i_iwinc = iwinc;
726 ic->i_recv_data_rem = be32_to_cpu(hdr->h_len);
728 rdsdebug("ic %p iwinc %p rem %u flag 0x%x\n", ic, iwinc,
729 ic->i_recv_data_rem, hdr->h_flags);
747 if (ic->i_recv_data_rem > RDS_FRAG_SIZE)
748 ic->i_recv_data_rem -= RDS_FRAG_SIZE;
750 ic->i_recv_data_rem = 0;
751 ic->i_iwinc = NULL;
786 struct rds_iw_connection *ic = conn->c_transport_data;
792 tasklet_schedule(&ic->i_recv_tasklet);
795 static inline void rds_poll_cq(struct rds_iw_connection *ic,
798 struct rds_connection *conn = ic->conn;
802 while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) {
808 recv = &ic->i_recvs[rds_iw_ring_oldest(&ic->i_recv_ring)];
810 rds_iw_recv_unmap_page(ic, recv);
829 rds_iw_ring_free(&ic->i_recv_ring, 1);
835 struct rds_iw_connection *ic = (struct rds_iw_connection *) data;
836 struct rds_connection *conn = ic->conn;
839 rds_poll_cq(ic, &state);
840 ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
841 rds_poll_cq(ic, &state);
844 rds_iw_set_ack(ic, state.ack_next, state.ack_required);
845 if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) {
847 ic->i_ack_recv = state.ack_recv;
850 rds_iw_attempt_ack(ic);
855 if (rds_iw_ring_empty(&ic->i_recv_ring))
861 if (rds_iw_ring_low(&ic->i_recv_ring))
867 struct rds_iw_connection *ic = conn->c_transport_data;
876 mutex_lock(&ic->i_recv_mutex);
881 mutex_unlock(&ic->i_recv_mutex);
884 rds_iw_attempt_ack(ic);