Lines Matching refs:conn

64 void rds_send_reset(struct rds_connection *conn)
69 if (conn->c_xmit_rm) {
70 rm = conn->c_xmit_rm;
71 conn->c_xmit_rm = NULL;
80 conn->c_xmit_sg = 0;
81 conn->c_xmit_hdr_off = 0;
82 conn->c_xmit_data_off = 0;
83 conn->c_xmit_atomic_sent = 0;
84 conn->c_xmit_rdma_sent = 0;
85 conn->c_xmit_data_sent = 0;
87 conn->c_map_queued = 0;
89 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
90 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
93 spin_lock_irqsave(&conn->c_lock, flags);
94 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
98 list_splice_init(&conn->c_retrans, &conn->c_send_queue);
99 spin_unlock_irqrestore(&conn->c_lock, flags);
102 static int acquire_in_xmit(struct rds_connection *conn)
104 return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0;
107 static void release_in_xmit(struct rds_connection *conn)
109 clear_bit(RDS_IN_XMIT, &conn->c_flags);
117 if (waitqueue_active(&conn->c_waitq))
118 wake_up_all(&conn->c_waitq);
126 * - reassembly is optional and easily done by transports per conn
135 int rds_send_xmit(struct rds_connection *conn)
153 if (!acquire_in_xmit(conn)) {
160 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
163 if (!rds_conn_up(conn)) {
164 release_in_xmit(conn);
169 if (conn->c_trans->xmit_prepare)
170 conn->c_trans->xmit_prepare(conn);
178 rm = conn->c_xmit_rm;
184 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
185 rm = rds_cong_update_alloc(conn);
192 conn->c_xmit_rm = rm;
205 spin_lock_irqsave(&conn->c_lock, flags);
207 if (!list_empty(&conn->c_send_queue)) {
208 rm = list_entry(conn->c_send_queue.next,
217 list_move_tail(&rm->m_conn_item, &conn->c_retrans);
220 spin_unlock_irqrestore(&conn->c_lock, flags);
234 spin_lock_irqsave(&conn->c_lock, flags);
237 spin_unlock_irqrestore(&conn->c_lock, flags);
243 if (conn->c_unacked_packets == 0 ||
244 conn->c_unacked_bytes < len) {
247 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
248 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
251 conn->c_unacked_bytes -= len;
252 conn->c_unacked_packets--;
255 conn->c_xmit_rm = rm;
259 if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
261 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
264 conn->c_xmit_rdma_sent = 1;
271 if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
273 ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
276 conn->c_xmit_atomic_sent = 1;
305 if (rm->data.op_active && !conn->c_xmit_data_sent) {
307 ret = conn->c_trans->xmit(conn, rm,
308 conn->c_xmit_hdr_off,
309 conn->c_xmit_sg,
310 conn->c_xmit_data_off);
314 if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
317 conn->c_xmit_hdr_off);
318 conn->c_xmit_hdr_off += tmp;
322 sg = &rm->data.op_sg[conn->c_xmit_sg];
325 conn->c_xmit_data_off);
326 conn->c_xmit_data_off += tmp;
328 if (conn->c_xmit_data_off == sg->length) {
329 conn->c_xmit_data_off = 0;
331 conn->c_xmit_sg++;
333 conn->c_xmit_sg == rm->data.op_nents);
337 if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
338 (conn->c_xmit_sg == rm->data.op_nents))
339 conn->c_xmit_data_sent = 1;
347 if (!rm->data.op_active || conn->c_xmit_data_sent) {
348 conn->c_xmit_rm = NULL;
349 conn->c_xmit_sg = 0;
350 conn->c_xmit_hdr_off = 0;
351 conn->c_xmit_data_off = 0;
352 conn->c_xmit_rdma_sent = 0;
353 conn->c_xmit_atomic_sent = 0;
354 conn->c_xmit_data_sent = 0;
360 if (conn->c_trans->xmit_complete)
361 conn->c_trans->xmit_complete(conn);
363 release_in_xmit(conn);
386 if (!list_empty(&conn->c_send_queue)) {
524 struct rds_message *rds_send_get_message(struct rds_connection *conn,
530 spin_lock_irqsave(&conn->c_lock, flags);
532 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
540 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
549 spin_unlock_irqrestore(&conn->c_lock, flags);
645 void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
652 spin_lock_irqsave(&conn->c_lock, flags);
654 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
666 spin_unlock_irqrestore(&conn->c_lock, flags);
676 struct rds_connection *conn;
701 /* Remove the messages from the conn */
704 conn = rm->m_inc.i_conn;
706 spin_lock_irqsave(&conn->c_lock, flags);
708 * Maybe someone else beat us to removing rm from the conn.
713 spin_unlock_irqrestore(&conn->c_lock, flags);
717 spin_unlock_irqrestore(&conn->c_lock, flags);
751 static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
794 rm->m_inc.i_conn = conn;
797 spin_lock(&conn->c_lock);
798 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
799 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
801 spin_unlock(&conn->c_lock);
929 struct rds_connection *conn;
991 * Caching the conn in the socket helps a lot. */
993 conn = rs->rs_conn;
995 conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr,
998 if (IS_ERR(conn)) {
999 ret = PTR_ERR(conn);
1002 rs->rs_conn = conn;
1010 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1011 printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1012 &rm->rdma, conn->c_trans->xmit_rdma);
1017 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1018 printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1019 &rm->atomic, conn->c_trans->xmit_atomic);
1024 rds_conn_connect_if_down(conn);
1026 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1032 while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
1046 rds_send_queue_rm(rs, conn, rm,
1067 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1068 rds_send_xmit(conn);
1089 rds_send_pong(struct rds_connection *conn, __be16 dport)
1101 rm->m_daddr = conn->c_faddr;
1104 rds_conn_connect_if_down(conn);
1106 ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1110 spin_lock_irqsave(&conn->c_lock, flags);
1111 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1114 rm->m_inc.i_conn = conn;
1117 conn->c_next_tx_seq);
1118 conn->c_next_tx_seq++;
1119 spin_unlock_irqrestore(&conn->c_lock, flags);
1124 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1125 queue_delayed_work(rds_wq, &conn->c_send_w, 0);