send.c revision a63273d4992603979ddb181b6a8f07082839b39f
1/*
2 * Copyright (c) 2006 Oracle.  All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/gfp.h>
35#include <net/sock.h>
36#include <linux/in.h>
37#include <linux/list.h>
38
39#include "rds.h"
40
41/* When transmitting messages in rds_send_xmit, we need to emerge from
42 * time to time and briefly release the CPU. Otherwise the softlock watchdog
43 * will kick our shin.
44 * Also, it seems fairer to not let one busy connection stall all the
45 * others.
46 *
47 * send_batch_count is the number of times we'll loop in send_xmit. Setting
48 * it to 0 will restore the old behavior (where we looped until we had
49 * drained the queue).
50 */
51static int send_batch_count = 64;
52module_param(send_batch_count, int, 0444);
53MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
54
55/*
56 * Reset the send state. Caller must hold c_send_lock when calling here.
57 */
58void rds_send_reset(struct rds_connection *conn)
59{
60	struct rds_message *rm, *tmp;
61	unsigned long flags;
62
63	if (conn->c_xmit_rm) {
64		/* Tell the user the RDMA op is no longer mapped by the
65		 * transport. This isn't entirely true (it's flushed out
66		 * independently) but as the connection is down, there's
67		 * no ongoing RDMA to/from that memory */
68		rds_message_unmapped(conn->c_xmit_rm);
69		rds_message_put(conn->c_xmit_rm);
70		conn->c_xmit_rm = NULL;
71	}
72	conn->c_xmit_sg = 0;
73	conn->c_xmit_hdr_off = 0;
74	conn->c_xmit_data_off = 0;
75	conn->c_xmit_rdma_sent = 0;
76
77	conn->c_map_queued = 0;
78
79	conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
80	conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
81
82	/* Mark messages as retransmissions, and move them to the send q */
83	spin_lock_irqsave(&conn->c_lock, flags);
84	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
85		set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
86		set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
87	}
88	list_splice_init(&conn->c_retrans, &conn->c_send_queue);
89	spin_unlock_irqrestore(&conn->c_lock, flags);
90}
91
92/*
93 * We're making the concious trade-off here to only send one message
94 * down the connection at a time.
95 *   Pro:
96 *      - tx queueing is a simple fifo list
97 *   	- reassembly is optional and easily done by transports per conn
98 *      - no per flow rx lookup at all, straight to the socket
99 *   	- less per-frag memory and wire overhead
100 *   Con:
101 *      - queued acks can be delayed behind large messages
102 *   Depends:
103 *      - small message latency is higher behind queued large messages
104 *      - large message latency isn't starved by intervening small sends
105 */
106int rds_send_xmit(struct rds_connection *conn)
107{
108	struct rds_message *rm;
109	unsigned long flags;
110	unsigned int tmp;
111	unsigned int send_quota = send_batch_count;
112	struct scatterlist *sg;
113	int ret = 0;
114	int was_empty = 0;
115	LIST_HEAD(to_be_dropped);
116
117	/*
118	 * sendmsg calls here after having queued its message on the send
119	 * queue.  We only have one task feeding the connection at a time.  If
120	 * another thread is already feeding the queue then we back off.  This
121	 * avoids blocking the caller and trading per-connection data between
122	 * caches per message.
123	 *
124	 * The sem holder will issue a retry if they notice that someone queued
125	 * a message after they stopped walking the send queue but before they
126	 * dropped the sem.
127	 */
128	if (!mutex_trylock(&conn->c_send_lock)) {
129		rds_stats_inc(s_send_sem_contention);
130		ret = -ENOMEM;
131		goto out;
132	}
133
134	if (conn->c_trans->xmit_prepare)
135		conn->c_trans->xmit_prepare(conn);
136
137	/*
138	 * spin trying to push headers and data down the connection until
139	 * the connection doens't make forward progress.
140	 */
141	while (--send_quota) {
142		/*
143		 * See if need to send a congestion map update if we're
144		 * between sending messages.  The send_sem protects our sole
145		 * use of c_map_offset and _bytes.
146		 * Note this is used only by transports that define a special
147		 * xmit_cong_map function. For all others, we create allocate
148		 * a cong_map message and treat it just like any other send.
149		 */
150		if (conn->c_map_bytes) {
151			ret = conn->c_trans->xmit_cong_map(conn, conn->c_lcong,
152						conn->c_map_offset);
153			if (ret <= 0)
154				break;
155
156			conn->c_map_offset += ret;
157			conn->c_map_bytes -= ret;
158			if (conn->c_map_bytes)
159				continue;
160		}
161
162		/* If we're done sending the current message, clear the
163		 * offset and S/G temporaries.
164		 */
165		rm = conn->c_xmit_rm;
166		if (rm &&
167		    conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
168		    conn->c_xmit_sg == rm->data.m_nents) {
169			conn->c_xmit_rm = NULL;
170			conn->c_xmit_sg = 0;
171			conn->c_xmit_hdr_off = 0;
172			conn->c_xmit_data_off = 0;
173			conn->c_xmit_rdma_sent = 0;
174
175			/* Release the reference to the previous message. */
176			rds_message_put(rm);
177			rm = NULL;
178		}
179
180		/* If we're asked to send a cong map update, do so.
181		 */
182		if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
183			if (conn->c_trans->xmit_cong_map) {
184				conn->c_map_offset = 0;
185				conn->c_map_bytes = sizeof(struct rds_header) +
186					RDS_CONG_MAP_BYTES;
187				continue;
188			}
189
190			rm = rds_cong_update_alloc(conn);
191			if (IS_ERR(rm)) {
192				ret = PTR_ERR(rm);
193				break;
194			}
195
196			conn->c_xmit_rm = rm;
197		}
198
199		/*
200		 * Grab the next message from the send queue, if there is one.
201		 *
202		 * c_xmit_rm holds a ref while we're sending this message down
203		 * the connction.  We can use this ref while holding the
204		 * send_sem.. rds_send_reset() is serialized with it.
205		 */
206		if (!rm) {
207			unsigned int len;
208
209			spin_lock_irqsave(&conn->c_lock, flags);
210
211			if (!list_empty(&conn->c_send_queue)) {
212				rm = list_entry(conn->c_send_queue.next,
213						struct rds_message,
214						m_conn_item);
215				rds_message_addref(rm);
216
217				/*
218				 * Move the message from the send queue to the retransmit
219				 * list right away.
220				 */
221				list_move_tail(&rm->m_conn_item, &conn->c_retrans);
222			}
223
224			spin_unlock_irqrestore(&conn->c_lock, flags);
225
226			if (!rm) {
227				was_empty = 1;
228				break;
229			}
230
231			/* Unfortunately, the way Infiniband deals with
232			 * RDMA to a bad MR key is by moving the entire
233			 * queue pair to error state. We cold possibly
234			 * recover from that, but right now we drop the
235			 * connection.
236			 * Therefore, we never retransmit messages with RDMA ops.
237			 */
238			if (rm->rdma.m_rdma_op.r_active &&
239			    test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
240				spin_lock_irqsave(&conn->c_lock, flags);
241				if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
242					list_move(&rm->m_conn_item, &to_be_dropped);
243				spin_unlock_irqrestore(&conn->c_lock, flags);
244				rds_message_put(rm);
245				continue;
246			}
247
248			/* Require an ACK every once in a while */
249			len = ntohl(rm->m_inc.i_hdr.h_len);
250			if (conn->c_unacked_packets == 0 ||
251			    conn->c_unacked_bytes < len) {
252				__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
253
254				conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
255				conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
256				rds_stats_inc(s_send_ack_required);
257			} else {
258				conn->c_unacked_bytes -= len;
259				conn->c_unacked_packets--;
260			}
261
262			conn->c_xmit_rm = rm;
263		}
264
265		/*
266		 * Try and send an rdma message.  Let's see if we can
267		 * keep this simple and require that the transport either
268		 * send the whole rdma or none of it.
269		 */
270		if (rm->rdma.m_rdma_op.r_active && !conn->c_xmit_rdma_sent) {
271			ret = conn->c_trans->xmit_rdma(conn, &rm->rdma.m_rdma_op);
272			if (ret)
273				break;
274			conn->c_xmit_rdma_sent = 1;
275			/* The transport owns the mapped memory for now.
276			 * You can't unmap it while it's on the send queue */
277			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
278		}
279
280		if (conn->c_xmit_hdr_off < sizeof(struct rds_header) ||
281		    conn->c_xmit_sg < rm->data.m_nents) {
282			ret = conn->c_trans->xmit(conn, rm,
283						  conn->c_xmit_hdr_off,
284						  conn->c_xmit_sg,
285						  conn->c_xmit_data_off);
286			if (ret <= 0)
287				break;
288
289			if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
290				tmp = min_t(int, ret,
291					    sizeof(struct rds_header) -
292					    conn->c_xmit_hdr_off);
293				conn->c_xmit_hdr_off += tmp;
294				ret -= tmp;
295			}
296
297			sg = &rm->data.m_sg[conn->c_xmit_sg];
298			while (ret) {
299				tmp = min_t(int, ret, sg->length -
300						      conn->c_xmit_data_off);
301				conn->c_xmit_data_off += tmp;
302				ret -= tmp;
303				if (conn->c_xmit_data_off == sg->length) {
304					conn->c_xmit_data_off = 0;
305					sg++;
306					conn->c_xmit_sg++;
307					BUG_ON(ret != 0 &&
308					       conn->c_xmit_sg == rm->data.m_nents);
309				}
310			}
311		}
312	}
313
314	/* Nuke any messages we decided not to retransmit. */
315	if (!list_empty(&to_be_dropped))
316		rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
317
318	if (conn->c_trans->xmit_complete)
319		conn->c_trans->xmit_complete(conn);
320
321	/*
322	 * We might be racing with another sender who queued a message but
323	 * backed off on noticing that we held the c_send_lock.  If we check
324	 * for queued messages after dropping the sem then either we'll
325	 * see the queued message or the queuer will get the sem.  If we
326	 * notice the queued message then we trigger an immediate retry.
327	 *
328	 * We need to be careful only to do this when we stopped processing
329	 * the send queue because it was empty.  It's the only way we
330	 * stop processing the loop when the transport hasn't taken
331	 * responsibility for forward progress.
332	 */
333	mutex_unlock(&conn->c_send_lock);
334
335	if (conn->c_map_bytes || (send_quota == 0 && !was_empty)) {
336		/* We exhausted the send quota, but there's work left to
337		 * do. Return and (re-)schedule the send worker.
338		 */
339		ret = -EAGAIN;
340	}
341
342	if (ret == 0 && was_empty) {
343		/* A simple bit test would be way faster than taking the
344		 * spin lock */
345		spin_lock_irqsave(&conn->c_lock, flags);
346		if (!list_empty(&conn->c_send_queue)) {
347			rds_stats_inc(s_send_sem_queue_raced);
348			ret = -EAGAIN;
349		}
350		spin_unlock_irqrestore(&conn->c_lock, flags);
351	}
352out:
353	return ret;
354}
355
356static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
357{
358	u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
359
360	assert_spin_locked(&rs->rs_lock);
361
362	BUG_ON(rs->rs_snd_bytes < len);
363	rs->rs_snd_bytes -= len;
364
365	if (rs->rs_snd_bytes == 0)
366		rds_stats_inc(s_send_queue_empty);
367}
368
369static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
370				    is_acked_func is_acked)
371{
372	if (is_acked)
373		return is_acked(rm, ack);
374	return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
375}
376
377/*
378 * Returns true if there are no messages on the send and retransmit queues
379 * which have a sequence number greater than or equal to the given sequence
380 * number.
381 */
382int rds_send_acked_before(struct rds_connection *conn, u64 seq)
383{
384	struct rds_message *rm, *tmp;
385	int ret = 1;
386
387	spin_lock(&conn->c_lock);
388
389	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
390		if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq)
391			ret = 0;
392		break;
393	}
394
395	list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
396		if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq)
397			ret = 0;
398		break;
399	}
400
401	spin_unlock(&conn->c_lock);
402
403	return ret;
404}
405
406/*
407 * This is pretty similar to what happens below in the ACK
408 * handling code - except that we call here as soon as we get
409 * the IB send completion on the RDMA op and the accompanying
410 * message.
411 */
412void rds_rdma_send_complete(struct rds_message *rm, int status)
413{
414	struct rds_sock *rs = NULL;
415	struct rds_rdma_op *ro;
416	struct rds_notifier *notifier;
417	unsigned long flags;
418
419	spin_lock_irqsave(&rm->m_rs_lock, flags);
420
421	ro = &rm->rdma.m_rdma_op;
422	if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
423	    ro->r_active && ro->r_notify && ro->r_notifier) {
424		notifier = ro->r_notifier;
425		rs = rm->m_rs;
426		sock_hold(rds_rs_to_sk(rs));
427
428		notifier->n_status = status;
429		spin_lock(&rs->rs_lock);
430		list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
431		spin_unlock(&rs->rs_lock);
432
433		ro->r_notifier = NULL;
434	}
435
436	spin_unlock_irqrestore(&rm->m_rs_lock, flags);
437
438	if (rs) {
439		rds_wake_sk_sleep(rs);
440		sock_put(rds_rs_to_sk(rs));
441	}
442}
443EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
444
445/*
446 * This is the same as rds_rdma_send_complete except we
447 * don't do any locking - we have all the ingredients (message,
448 * socket, socket lock) and can just move the notifier.
449 */
450static inline void
451__rds_rdma_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
452{
453	struct rds_rdma_op *ro;
454
455	ro = &rm->rdma.m_rdma_op;
456	if (ro->r_active && ro->r_notify && ro->r_notifier) {
457		ro->r_notifier->n_status = status;
458		list_add_tail(&ro->r_notifier->n_list, &rs->rs_notify_queue);
459		ro->r_notifier = NULL;
460	}
461
462	/* No need to wake the app - caller does this */
463}
464
465/*
466 * This is called from the IB send completion when we detect
467 * a RDMA operation that failed with remote access error.
468 * So speed is not an issue here.
469 */
470struct rds_message *rds_send_get_message(struct rds_connection *conn,
471					 struct rds_rdma_op *op)
472{
473	struct rds_message *rm, *tmp, *found = NULL;
474	unsigned long flags;
475
476	spin_lock_irqsave(&conn->c_lock, flags);
477
478	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
479		if (&rm->rdma.m_rdma_op == op) {
480			atomic_inc(&rm->m_refcount);
481			found = rm;
482			goto out;
483		}
484	}
485
486	list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
487		if (&rm->rdma.m_rdma_op == op) {
488			atomic_inc(&rm->m_refcount);
489			found = rm;
490			break;
491		}
492	}
493
494out:
495	spin_unlock_irqrestore(&conn->c_lock, flags);
496
497	return found;
498}
499EXPORT_SYMBOL_GPL(rds_send_get_message);
500
501/*
502 * This removes messages from the socket's list if they're on it.  The list
503 * argument must be private to the caller, we must be able to modify it
504 * without locks.  The messages must have a reference held for their
505 * position on the list.  This function will drop that reference after
506 * removing the messages from the 'messages' list regardless of if it found
507 * the messages on the socket list or not.
508 */
509void rds_send_remove_from_sock(struct list_head *messages, int status)
510{
511	unsigned long flags;
512	struct rds_sock *rs = NULL;
513	struct rds_message *rm;
514
515	while (!list_empty(messages)) {
516		int was_on_sock = 0;
517
518		rm = list_entry(messages->next, struct rds_message,
519				m_conn_item);
520		list_del_init(&rm->m_conn_item);
521
522		/*
523		 * If we see this flag cleared then we're *sure* that someone
524		 * else beat us to removing it from the sock.  If we race
525		 * with their flag update we'll get the lock and then really
526		 * see that the flag has been cleared.
527		 *
528		 * The message spinlock makes sure nobody clears rm->m_rs
529		 * while we're messing with it. It does not prevent the
530		 * message from being removed from the socket, though.
531		 */
532		spin_lock_irqsave(&rm->m_rs_lock, flags);
533		if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
534			goto unlock_and_drop;
535
536		if (rs != rm->m_rs) {
537			if (rs) {
538				rds_wake_sk_sleep(rs);
539				sock_put(rds_rs_to_sk(rs));
540			}
541			rs = rm->m_rs;
542			sock_hold(rds_rs_to_sk(rs));
543		}
544		spin_lock(&rs->rs_lock);
545
546		if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
547			struct rds_rdma_op *ro = &rm->rdma.m_rdma_op;
548			struct rds_notifier *notifier;
549
550			list_del_init(&rm->m_sock_item);
551			rds_send_sndbuf_remove(rs, rm);
552
553			if (ro->r_active && ro->r_notifier &&
554			    (ro->r_notify || (ro->r_recverr && status))) {
555				notifier = ro->r_notifier;
556				list_add_tail(&notifier->n_list,
557						&rs->rs_notify_queue);
558				if (!notifier->n_status)
559					notifier->n_status = status;
560				rm->rdma.m_rdma_op.r_notifier = NULL;
561			}
562			was_on_sock = 1;
563			rm->m_rs = NULL;
564		}
565		spin_unlock(&rs->rs_lock);
566
567unlock_and_drop:
568		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
569		rds_message_put(rm);
570		if (was_on_sock)
571			rds_message_put(rm);
572	}
573
574	if (rs) {
575		rds_wake_sk_sleep(rs);
576		sock_put(rds_rs_to_sk(rs));
577	}
578}
579
580/*
581 * Transports call here when they've determined that the receiver queued
582 * messages up to, and including, the given sequence number.  Messages are
583 * moved to the retrans queue when rds_send_xmit picks them off the send
584 * queue. This means that in the TCP case, the message may not have been
585 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
586 * checks the RDS_MSG_HAS_ACK_SEQ bit.
587 *
588 * XXX It's not clear to me how this is safely serialized with socket
589 * destruction.  Maybe it should bail if it sees SOCK_DEAD.
590 */
591void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
592			 is_acked_func is_acked)
593{
594	struct rds_message *rm, *tmp;
595	unsigned long flags;
596	LIST_HEAD(list);
597
598	spin_lock_irqsave(&conn->c_lock, flags);
599
600	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
601		if (!rds_send_is_acked(rm, ack, is_acked))
602			break;
603
604		list_move(&rm->m_conn_item, &list);
605		clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
606	}
607
608	/* order flag updates with spin locks */
609	if (!list_empty(&list))
610		smp_mb__after_clear_bit();
611
612	spin_unlock_irqrestore(&conn->c_lock, flags);
613
614	/* now remove the messages from the sock list as needed */
615	rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
616}
617EXPORT_SYMBOL_GPL(rds_send_drop_acked);
618
619void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
620{
621	struct rds_message *rm, *tmp;
622	struct rds_connection *conn;
623	unsigned long flags;
624	LIST_HEAD(list);
625
626	/* get all the messages we're dropping under the rs lock */
627	spin_lock_irqsave(&rs->rs_lock, flags);
628
629	list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
630		if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
631			     dest->sin_port != rm->m_inc.i_hdr.h_dport))
632			continue;
633
634		list_move(&rm->m_sock_item, &list);
635		rds_send_sndbuf_remove(rs, rm);
636		clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
637	}
638
639	/* order flag updates with the rs lock */
640	smp_mb__after_clear_bit();
641
642	spin_unlock_irqrestore(&rs->rs_lock, flags);
643
644	if (list_empty(&list))
645		return;
646
647	/* Remove the messages from the conn */
648	list_for_each_entry(rm, &list, m_sock_item) {
649
650		conn = rm->m_inc.i_conn;
651
652		spin_lock_irqsave(&conn->c_lock, flags);
653		/*
654		 * Maybe someone else beat us to removing rm from the conn.
655		 * If we race with their flag update we'll get the lock and
656		 * then really see that the flag has been cleared.
657		 */
658		if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
659			spin_unlock_irqrestore(&conn->c_lock, flags);
660			continue;
661		}
662		list_del_init(&rm->m_conn_item);
663		spin_unlock_irqrestore(&conn->c_lock, flags);
664
665		/*
666		 * Couldn't grab m_rs_lock in top loop (lock ordering),
667		 * but we can now.
668		 */
669		spin_lock_irqsave(&rm->m_rs_lock, flags);
670
671		spin_lock(&rs->rs_lock);
672		__rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED);
673		spin_unlock(&rs->rs_lock);
674
675		rm->m_rs = NULL;
676		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
677
678		rds_message_put(rm);
679	}
680
681	rds_wake_sk_sleep(rs);
682
683	while (!list_empty(&list)) {
684		rm = list_entry(list.next, struct rds_message, m_sock_item);
685		list_del_init(&rm->m_sock_item);
686
687		rds_message_wait(rm);
688		rds_message_put(rm);
689	}
690}
691
692/*
693 * we only want this to fire once so we use the callers 'queued'.  It's
694 * possible that another thread can race with us and remove the
695 * message from the flow with RDS_CANCEL_SENT_TO.
696 */
697static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
698			     struct rds_message *rm, __be16 sport,
699			     __be16 dport, int *queued)
700{
701	unsigned long flags;
702	u32 len;
703
704	if (*queued)
705		goto out;
706
707	len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
708
709	/* this is the only place which holds both the socket's rs_lock
710	 * and the connection's c_lock */
711	spin_lock_irqsave(&rs->rs_lock, flags);
712
713	/*
714	 * If there is a little space in sndbuf, we don't queue anything,
715	 * and userspace gets -EAGAIN. But poll() indicates there's send
716	 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
717	 * freed up by incoming acks. So we check the *old* value of
718	 * rs_snd_bytes here to allow the last msg to exceed the buffer,
719	 * and poll() now knows no more data can be sent.
720	 */
721	if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
722		rs->rs_snd_bytes += len;
723
724		/* let recv side know we are close to send space exhaustion.
725		 * This is probably not the optimal way to do it, as this
726		 * means we set the flag on *all* messages as soon as our
727		 * throughput hits a certain threshold.
728		 */
729		if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
730			__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
731
732		list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
733		set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
734		rds_message_addref(rm);
735		rm->m_rs = rs;
736
737		/* The code ordering is a little weird, but we're
738		   trying to minimize the time we hold c_lock */
739		rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
740		rm->m_inc.i_conn = conn;
741		rds_message_addref(rm);
742
743		spin_lock(&conn->c_lock);
744		rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
745		list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
746		set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
747		spin_unlock(&conn->c_lock);
748
749		rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
750			 rm, len, rs, rs->rs_snd_bytes,
751			 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
752
753		*queued = 1;
754	}
755
756	spin_unlock_irqrestore(&rs->rs_lock, flags);
757out:
758	return *queued;
759}
760
761/*
762 * rds_message is getting to be quite complicated, and we'd like to allocate
763 * it all in one go. This figures out how big it needs to be up front.
764 */
765static int rds_rm_size(struct msghdr *msg, int data_len)
766{
767	struct cmsghdr *cmsg;
768	int size = 0;
769	int retval;
770
771	for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
772		if (!CMSG_OK(msg, cmsg))
773			return -EINVAL;
774
775		if (cmsg->cmsg_level != SOL_RDS)
776			continue;
777
778		switch (cmsg->cmsg_type) {
779		case RDS_CMSG_RDMA_ARGS:
780			retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
781			if (retval < 0)
782				return retval;
783			size += retval;
784			break;
785
786		case RDS_CMSG_RDMA_DEST:
787		case RDS_CMSG_RDMA_MAP:
788			/* these are valid but do no add any size */
789			break;
790
791		default:
792			return -EINVAL;
793		}
794
795	}
796
797	size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
798
799	return size;
800}
801
802static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
803			 struct msghdr *msg, int *allocated_mr)
804{
805	struct cmsghdr *cmsg;
806	int ret = 0;
807
808	for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
809		if (!CMSG_OK(msg, cmsg))
810			return -EINVAL;
811
812		if (cmsg->cmsg_level != SOL_RDS)
813			continue;
814
815		/* As a side effect, RDMA_DEST and RDMA_MAP will set
816		 * rm->m_rdma_cookie and rm->m_rdma_mr.
817		 */
818		switch (cmsg->cmsg_type) {
819		case RDS_CMSG_RDMA_ARGS:
820			ret = rds_cmsg_rdma_args(rs, rm, cmsg);
821			break;
822
823		case RDS_CMSG_RDMA_DEST:
824			ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
825			break;
826
827		case RDS_CMSG_RDMA_MAP:
828			ret = rds_cmsg_rdma_map(rs, rm, cmsg);
829			if (!ret)
830				*allocated_mr = 1;
831			break;
832
833		default:
834			return -EINVAL;
835		}
836
837		if (ret)
838			break;
839	}
840
841	return ret;
842}
843
844int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
845		size_t payload_len)
846{
847	struct sock *sk = sock->sk;
848	struct rds_sock *rs = rds_sk_to_rs(sk);
849	struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
850	__be32 daddr;
851	__be16 dport;
852	struct rds_message *rm = NULL;
853	struct rds_connection *conn;
854	int ret = 0;
855	int queued = 0, allocated_mr = 0;
856	int nonblock = msg->msg_flags & MSG_DONTWAIT;
857	long timeo = sock_sndtimeo(sk, nonblock);
858
859	/* Mirror Linux UDP mirror of BSD error message compatibility */
860	/* XXX: Perhaps MSG_MORE someday */
861	if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
862		printk(KERN_INFO "msg_flags 0x%08X\n", msg->msg_flags);
863		ret = -EOPNOTSUPP;
864		goto out;
865	}
866
867	if (msg->msg_namelen) {
868		/* XXX fail non-unicast destination IPs? */
869		if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
870			ret = -EINVAL;
871			goto out;
872		}
873		daddr = usin->sin_addr.s_addr;
874		dport = usin->sin_port;
875	} else {
876		/* We only care about consistency with ->connect() */
877		lock_sock(sk);
878		daddr = rs->rs_conn_addr;
879		dport = rs->rs_conn_port;
880		release_sock(sk);
881	}
882
883	/* racing with another thread binding seems ok here */
884	if (daddr == 0 || rs->rs_bound_addr == 0) {
885		ret = -ENOTCONN; /* XXX not a great errno */
886		goto out;
887	}
888
889	/* size of rm including all sgs */
890	ret = rds_rm_size(msg, payload_len);
891	if (ret < 0)
892		goto out;
893
894	rm = rds_message_alloc(ret, GFP_KERNEL);
895	if (!rm) {
896		ret = -ENOMEM;
897		goto out;
898	}
899
900	rm->data.m_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
901	/* XXX fix this to not allocate memory */
902	ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len);
903	if (ret)
904		goto out;
905
906	rm->m_daddr = daddr;
907
908	/* rds_conn_create has a spinlock that runs with IRQ off.
909	 * Caching the conn in the socket helps a lot. */
910	if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
911		conn = rs->rs_conn;
912	else {
913		conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr,
914					rs->rs_transport,
915					sock->sk->sk_allocation);
916		if (IS_ERR(conn)) {
917			ret = PTR_ERR(conn);
918			goto out;
919		}
920		rs->rs_conn = conn;
921	}
922
923	/* Parse any control messages the user may have included. */
924	ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
925	if (ret)
926		goto out;
927
928	if ((rm->m_rdma_cookie || rm->rdma.m_rdma_op.r_active) &&
929	    !conn->c_trans->xmit_rdma) {
930		if (printk_ratelimit())
931			printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
932				&rm->rdma.m_rdma_op, conn->c_trans->xmit_rdma);
933		ret = -EOPNOTSUPP;
934		goto out;
935	}
936
937	/* If the connection is down, trigger a connect. We may
938	 * have scheduled a delayed reconnect however - in this case
939	 * we should not interfere.
940	 */
941	if (rds_conn_state(conn) == RDS_CONN_DOWN &&
942	    !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
943		queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
944
945	ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
946	if (ret) {
947		rs->rs_seen_congestion = 1;
948		goto out;
949	}
950
951	while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
952				  dport, &queued)) {
953		rds_stats_inc(s_send_queue_full);
954		/* XXX make sure this is reasonable */
955		if (payload_len > rds_sk_sndbuf(rs)) {
956			ret = -EMSGSIZE;
957			goto out;
958		}
959		if (nonblock) {
960			ret = -EAGAIN;
961			goto out;
962		}
963
964		timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
965					rds_send_queue_rm(rs, conn, rm,
966							  rs->rs_bound_port,
967							  dport,
968							  &queued),
969					timeo);
970		rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
971		if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
972			continue;
973
974		ret = timeo;
975		if (ret == 0)
976			ret = -ETIMEDOUT;
977		goto out;
978	}
979
980	/*
981	 * By now we've committed to the send.  We reuse rds_send_worker()
982	 * to retry sends in the rds thread if the transport asks us to.
983	 */
984	rds_stats_inc(s_send_queued);
985
986	if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
987		rds_send_worker(&conn->c_send_w.work);
988
989	rds_message_put(rm);
990	return payload_len;
991
992out:
993	/* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
994	 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
995	 * or in any other way, we need to destroy the MR again */
996	if (allocated_mr)
997		rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
998
999	if (rm)
1000		rds_message_put(rm);
1001	return ret;
1002}
1003
1004/*
1005 * Reply to a ping packet.
1006 */
1007int
1008rds_send_pong(struct rds_connection *conn, __be16 dport)
1009{
1010	struct rds_message *rm;
1011	unsigned long flags;
1012	int ret = 0;
1013
1014	rm = rds_message_alloc(0, GFP_ATOMIC);
1015	if (!rm) {
1016		ret = -ENOMEM;
1017		goto out;
1018	}
1019
1020	rm->m_daddr = conn->c_faddr;
1021
1022	/* If the connection is down, trigger a connect. We may
1023	 * have scheduled a delayed reconnect however - in this case
1024	 * we should not interfere.
1025	 */
1026	if (rds_conn_state(conn) == RDS_CONN_DOWN &&
1027	    !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
1028		queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
1029
1030	ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1031	if (ret)
1032		goto out;
1033
1034	spin_lock_irqsave(&conn->c_lock, flags);
1035	list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1036	set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1037	rds_message_addref(rm);
1038	rm->m_inc.i_conn = conn;
1039
1040	rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1041				    conn->c_next_tx_seq);
1042	conn->c_next_tx_seq++;
1043	spin_unlock_irqrestore(&conn->c_lock, flags);
1044
1045	rds_stats_inc(s_send_queued);
1046	rds_stats_inc(s_send_pong);
1047
1048	queue_delayed_work(rds_wq, &conn->c_send_w, 0);
1049	rds_message_put(rm);
1050	return 0;
1051
1052out:
1053	if (rm)
1054		rds_message_put(rm);
1055	return ret;
1056}
1057