send.c revision 0f4b1c7e89e699f588807a914ec6e6396c851a72
1/*
2 * Copyright (c) 2006 Oracle.  All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/gfp.h>
35#include <net/sock.h>
36#include <linux/in.h>
37#include <linux/list.h>
38
39#include "rds.h"
40
41/* When transmitting messages in rds_send_xmit, we need to emerge from
42 * time to time and briefly release the CPU. Otherwise the softlock watchdog
43 * will kick our shin.
44 * Also, it seems fairer to not let one busy connection stall all the
45 * others.
46 *
47 * send_batch_count is the number of times we'll loop in send_xmit. Setting
48 * it to 0 will restore the old behavior (where we looped until we had
49 * drained the queue).
50 */
51static int send_batch_count = 64;
52module_param(send_batch_count, int, 0444);
53MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
54
55/*
56 * Reset the send state.  Callers must ensure that this doesn't race with
57 * rds_send_xmit().
58 */
59void rds_send_reset(struct rds_connection *conn)
60{
61	struct rds_message *rm, *tmp;
62	unsigned long flags;
63
64	if (conn->c_xmit_rm) {
65		rm = conn->c_xmit_rm;
66		conn->c_xmit_rm = NULL;
67		/* Tell the user the RDMA op is no longer mapped by the
68		 * transport. This isn't entirely true (it's flushed out
69		 * independently) but as the connection is down, there's
70		 * no ongoing RDMA to/from that memory */
71		rds_message_unmapped(rm);
72		rds_message_put(rm);
73	}
74
75	conn->c_xmit_sg = 0;
76	conn->c_xmit_hdr_off = 0;
77	conn->c_xmit_data_off = 0;
78	conn->c_xmit_atomic_sent = 0;
79	conn->c_xmit_rdma_sent = 0;
80	conn->c_xmit_data_sent = 0;
81
82	conn->c_map_queued = 0;
83
84	conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
85	conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
86
87	/* Mark messages as retransmissions, and move them to the send q */
88	spin_lock_irqsave(&conn->c_lock, flags);
89	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
90		set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
91		set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
92	}
93	list_splice_init(&conn->c_retrans, &conn->c_send_queue);
94	spin_unlock_irqrestore(&conn->c_lock, flags);
95}
96
97static int acquire_in_xmit(struct rds_connection *conn)
98{
99	return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0;
100}
101
102static void release_in_xmit(struct rds_connection *conn)
103{
104	clear_bit(RDS_IN_XMIT, &conn->c_flags);
105	smp_mb__after_clear_bit();
106	/*
107	 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
108	 * hot path and finding waiters is very rare.  We don't want to walk
109	 * the system-wide hashed waitqueue buckets in the fast path only to
110	 * almost never find waiters.
111	 */
112	if (waitqueue_active(&conn->c_waitq))
113		wake_up_all(&conn->c_waitq);
114}
115
116/*
117 * We're making the concious trade-off here to only send one message
118 * down the connection at a time.
119 *   Pro:
120 *      - tx queueing is a simple fifo list
121 *   	- reassembly is optional and easily done by transports per conn
122 *      - no per flow rx lookup at all, straight to the socket
123 *   	- less per-frag memory and wire overhead
124 *   Con:
125 *      - queued acks can be delayed behind large messages
126 *   Depends:
127 *      - small message latency is higher behind queued large messages
128 *      - large message latency isn't starved by intervening small sends
129 */
130int rds_send_xmit(struct rds_connection *conn)
131{
132	struct rds_message *rm;
133	unsigned long flags;
134	unsigned int tmp;
135	struct scatterlist *sg;
136	int ret = 0;
137	LIST_HEAD(to_be_dropped);
138
139restart:
140
141	/*
142	 * sendmsg calls here after having queued its message on the send
143	 * queue.  We only have one task feeding the connection at a time.  If
144	 * another thread is already feeding the queue then we back off.  This
145	 * avoids blocking the caller and trading per-connection data between
146	 * caches per message.
147	 */
148	if (!acquire_in_xmit(conn)) {
149		rds_stats_inc(s_send_lock_contention);
150		ret = -ENOMEM;
151		goto out;
152	}
153
154	/*
155	 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
156	 * we do the opposite to avoid races.
157	 */
158	if (!rds_conn_up(conn)) {
159		release_in_xmit(conn);
160		ret = 0;
161		goto out;
162	}
163
164	if (conn->c_trans->xmit_prepare)
165		conn->c_trans->xmit_prepare(conn);
166
167	/*
168	 * spin trying to push headers and data down the connection until
169	 * the connection doesn't make forward progress.
170	 */
171	while (1) {
172
173		rm = conn->c_xmit_rm;
174
175		/*
176		 * If between sending messages, we can send a pending congestion
177		 * map update.
178		 */
179		if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
180			rm = rds_cong_update_alloc(conn);
181			if (IS_ERR(rm)) {
182				ret = PTR_ERR(rm);
183				break;
184			}
185			rm->data.op_active = 1;
186
187			conn->c_xmit_rm = rm;
188		}
189
190		/*
191		 * If not already working on one, grab the next message.
192		 *
193		 * c_xmit_rm holds a ref while we're sending this message down
194		 * the connction.  We can use this ref while holding the
195		 * send_sem.. rds_send_reset() is serialized with it.
196		 */
197		if (!rm) {
198			unsigned int len;
199
200			spin_lock_irqsave(&conn->c_lock, flags);
201
202			if (!list_empty(&conn->c_send_queue)) {
203				rm = list_entry(conn->c_send_queue.next,
204						struct rds_message,
205						m_conn_item);
206				rds_message_addref(rm);
207
208				/*
209				 * Move the message from the send queue to the retransmit
210				 * list right away.
211				 */
212				list_move_tail(&rm->m_conn_item, &conn->c_retrans);
213			}
214
215			spin_unlock_irqrestore(&conn->c_lock, flags);
216
217			if (!rm)
218				break;
219
220			/* Unfortunately, the way Infiniband deals with
221			 * RDMA to a bad MR key is by moving the entire
222			 * queue pair to error state. We cold possibly
223			 * recover from that, but right now we drop the
224			 * connection.
225			 * Therefore, we never retransmit messages with RDMA ops.
226			 */
227			if (rm->rdma.op_active &&
228			    test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
229				spin_lock_irqsave(&conn->c_lock, flags);
230				if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
231					list_move(&rm->m_conn_item, &to_be_dropped);
232				spin_unlock_irqrestore(&conn->c_lock, flags);
233				continue;
234			}
235
236			/* Require an ACK every once in a while */
237			len = ntohl(rm->m_inc.i_hdr.h_len);
238			if (conn->c_unacked_packets == 0 ||
239			    conn->c_unacked_bytes < len) {
240				__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
241
242				conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
243				conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
244				rds_stats_inc(s_send_ack_required);
245			} else {
246				conn->c_unacked_bytes -= len;
247				conn->c_unacked_packets--;
248			}
249
250			conn->c_xmit_rm = rm;
251		}
252
253		/* The transport either sends the whole rdma or none of it */
254		if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
255			rm->m_final_op = &rm->rdma;
256			ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
257			if (ret)
258				break;
259			conn->c_xmit_rdma_sent = 1;
260
261			/* The transport owns the mapped memory for now.
262			 * You can't unmap it while it's on the send queue */
263			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
264		}
265
266		if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
267			rm->m_final_op = &rm->atomic;
268			ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
269			if (ret)
270				break;
271			conn->c_xmit_atomic_sent = 1;
272
273			/* The transport owns the mapped memory for now.
274			 * You can't unmap it while it's on the send queue */
275			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
276		}
277
278		/*
279		 * A number of cases require an RDS header to be sent
280		 * even if there is no data.
281		 * We permit 0-byte sends; rds-ping depends on this.
282		 * However, if there are exclusively attached silent ops,
283		 * we skip the hdr/data send, to enable silent operation.
284		 */
285		if (rm->data.op_nents == 0) {
286			int ops_present;
287			int all_ops_are_silent = 1;
288
289			ops_present = (rm->atomic.op_active || rm->rdma.op_active);
290			if (rm->atomic.op_active && !rm->atomic.op_silent)
291				all_ops_are_silent = 0;
292			if (rm->rdma.op_active && !rm->rdma.op_silent)
293				all_ops_are_silent = 0;
294
295			if (ops_present && all_ops_are_silent
296			    && !rm->m_rdma_cookie)
297				rm->data.op_active = 0;
298		}
299
300		if (rm->data.op_active && !conn->c_xmit_data_sent) {
301			rm->m_final_op = &rm->data;
302			ret = conn->c_trans->xmit(conn, rm,
303						  conn->c_xmit_hdr_off,
304						  conn->c_xmit_sg,
305						  conn->c_xmit_data_off);
306			if (ret <= 0)
307				break;
308
309			if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
310				tmp = min_t(int, ret,
311					    sizeof(struct rds_header) -
312					    conn->c_xmit_hdr_off);
313				conn->c_xmit_hdr_off += tmp;
314				ret -= tmp;
315			}
316
317			sg = &rm->data.op_sg[conn->c_xmit_sg];
318			while (ret) {
319				tmp = min_t(int, ret, sg->length -
320						      conn->c_xmit_data_off);
321				conn->c_xmit_data_off += tmp;
322				ret -= tmp;
323				if (conn->c_xmit_data_off == sg->length) {
324					conn->c_xmit_data_off = 0;
325					sg++;
326					conn->c_xmit_sg++;
327					BUG_ON(ret != 0 &&
328					       conn->c_xmit_sg == rm->data.op_nents);
329				}
330			}
331
332			if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
333			    (conn->c_xmit_sg == rm->data.op_nents))
334				conn->c_xmit_data_sent = 1;
335		}
336
337		/*
338		 * A rm will only take multiple times through this loop
339		 * if there is a data op. Thus, if the data is sent (or there was
340		 * none), then we're done with the rm.
341		 */
342		if (!rm->data.op_active || conn->c_xmit_data_sent) {
343			conn->c_xmit_rm = NULL;
344			conn->c_xmit_sg = 0;
345			conn->c_xmit_hdr_off = 0;
346			conn->c_xmit_data_off = 0;
347			conn->c_xmit_rdma_sent = 0;
348			conn->c_xmit_atomic_sent = 0;
349			conn->c_xmit_data_sent = 0;
350
351			rds_message_put(rm);
352		}
353	}
354
355	if (conn->c_trans->xmit_complete)
356		conn->c_trans->xmit_complete(conn);
357
358	release_in_xmit(conn);
359
360	/* Nuke any messages we decided not to retransmit. */
361	if (!list_empty(&to_be_dropped)) {
362		/* irqs on here, so we can put(), unlike above */
363		list_for_each_entry(rm, &to_be_dropped, m_conn_item)
364			rds_message_put(rm);
365		rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
366	}
367
368	/*
369	 * Other senders can queue a message after we last test the send queue
370	 * but before we clear RDS_IN_XMIT.  In that case they'd back off and
371	 * not try and send their newly queued message.  We need to check the
372	 * send queue after having cleared RDS_IN_XMIT so that their message
373	 * doesn't get stuck on the send queue.
374	 *
375	 * If the transport cannot continue (i.e ret != 0), then it must
376	 * call us when more room is available, such as from the tx
377	 * completion handler.
378	 */
379	if (ret == 0) {
380		smp_mb();
381		if (!list_empty(&conn->c_send_queue)) {
382			rds_stats_inc(s_send_lock_queue_raced);
383			goto restart;
384		}
385	}
386out:
387	return ret;
388}
389
390static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
391{
392	u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
393
394	assert_spin_locked(&rs->rs_lock);
395
396	BUG_ON(rs->rs_snd_bytes < len);
397	rs->rs_snd_bytes -= len;
398
399	if (rs->rs_snd_bytes == 0)
400		rds_stats_inc(s_send_queue_empty);
401}
402
403static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
404				    is_acked_func is_acked)
405{
406	if (is_acked)
407		return is_acked(rm, ack);
408	return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
409}
410
411/*
412 * This is pretty similar to what happens below in the ACK
413 * handling code - except that we call here as soon as we get
414 * the IB send completion on the RDMA op and the accompanying
415 * message.
416 */
417void rds_rdma_send_complete(struct rds_message *rm, int status)
418{
419	struct rds_sock *rs = NULL;
420	struct rm_rdma_op *ro;
421	struct rds_notifier *notifier;
422	unsigned long flags;
423
424	spin_lock_irqsave(&rm->m_rs_lock, flags);
425
426	ro = &rm->rdma;
427	if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
428	    ro->op_active && ro->op_notify && ro->op_notifier) {
429		notifier = ro->op_notifier;
430		rs = rm->m_rs;
431		sock_hold(rds_rs_to_sk(rs));
432
433		notifier->n_status = status;
434		spin_lock(&rs->rs_lock);
435		list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
436		spin_unlock(&rs->rs_lock);
437
438		ro->op_notifier = NULL;
439	}
440
441	spin_unlock_irqrestore(&rm->m_rs_lock, flags);
442
443	if (rs) {
444		rds_wake_sk_sleep(rs);
445		sock_put(rds_rs_to_sk(rs));
446	}
447}
448EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
449
450/*
451 * Just like above, except looks at atomic op
452 */
453void rds_atomic_send_complete(struct rds_message *rm, int status)
454{
455	struct rds_sock *rs = NULL;
456	struct rm_atomic_op *ao;
457	struct rds_notifier *notifier;
458	unsigned long flags;
459
460	spin_lock_irqsave(&rm->m_rs_lock, flags);
461
462	ao = &rm->atomic;
463	if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
464	    && ao->op_active && ao->op_notify && ao->op_notifier) {
465		notifier = ao->op_notifier;
466		rs = rm->m_rs;
467		sock_hold(rds_rs_to_sk(rs));
468
469		notifier->n_status = status;
470		spin_lock(&rs->rs_lock);
471		list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
472		spin_unlock(&rs->rs_lock);
473
474		ao->op_notifier = NULL;
475	}
476
477	spin_unlock_irqrestore(&rm->m_rs_lock, flags);
478
479	if (rs) {
480		rds_wake_sk_sleep(rs);
481		sock_put(rds_rs_to_sk(rs));
482	}
483}
484EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
485
486/*
487 * This is the same as rds_rdma_send_complete except we
488 * don't do any locking - we have all the ingredients (message,
489 * socket, socket lock) and can just move the notifier.
490 */
491static inline void
492__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
493{
494	struct rm_rdma_op *ro;
495	struct rm_atomic_op *ao;
496
497	ro = &rm->rdma;
498	if (ro->op_active && ro->op_notify && ro->op_notifier) {
499		ro->op_notifier->n_status = status;
500		list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
501		ro->op_notifier = NULL;
502	}
503
504	ao = &rm->atomic;
505	if (ao->op_active && ao->op_notify && ao->op_notifier) {
506		ao->op_notifier->n_status = status;
507		list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
508		ao->op_notifier = NULL;
509	}
510
511	/* No need to wake the app - caller does this */
512}
513
514/*
515 * This is called from the IB send completion when we detect
516 * a RDMA operation that failed with remote access error.
517 * So speed is not an issue here.
518 */
519struct rds_message *rds_send_get_message(struct rds_connection *conn,
520					 struct rm_rdma_op *op)
521{
522	struct rds_message *rm, *tmp, *found = NULL;
523	unsigned long flags;
524
525	spin_lock_irqsave(&conn->c_lock, flags);
526
527	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
528		if (&rm->rdma == op) {
529			atomic_inc(&rm->m_refcount);
530			found = rm;
531			goto out;
532		}
533	}
534
535	list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
536		if (&rm->rdma == op) {
537			atomic_inc(&rm->m_refcount);
538			found = rm;
539			break;
540		}
541	}
542
543out:
544	spin_unlock_irqrestore(&conn->c_lock, flags);
545
546	return found;
547}
548EXPORT_SYMBOL_GPL(rds_send_get_message);
549
550/*
551 * This removes messages from the socket's list if they're on it.  The list
552 * argument must be private to the caller, we must be able to modify it
553 * without locks.  The messages must have a reference held for their
554 * position on the list.  This function will drop that reference after
555 * removing the messages from the 'messages' list regardless of if it found
556 * the messages on the socket list or not.
557 */
558void rds_send_remove_from_sock(struct list_head *messages, int status)
559{
560	unsigned long flags;
561	struct rds_sock *rs = NULL;
562	struct rds_message *rm;
563
564	while (!list_empty(messages)) {
565		int was_on_sock = 0;
566
567		rm = list_entry(messages->next, struct rds_message,
568				m_conn_item);
569		list_del_init(&rm->m_conn_item);
570
571		/*
572		 * If we see this flag cleared then we're *sure* that someone
573		 * else beat us to removing it from the sock.  If we race
574		 * with their flag update we'll get the lock and then really
575		 * see that the flag has been cleared.
576		 *
577		 * The message spinlock makes sure nobody clears rm->m_rs
578		 * while we're messing with it. It does not prevent the
579		 * message from being removed from the socket, though.
580		 */
581		spin_lock_irqsave(&rm->m_rs_lock, flags);
582		if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
583			goto unlock_and_drop;
584
585		if (rs != rm->m_rs) {
586			if (rs) {
587				rds_wake_sk_sleep(rs);
588				sock_put(rds_rs_to_sk(rs));
589			}
590			rs = rm->m_rs;
591			sock_hold(rds_rs_to_sk(rs));
592		}
593		spin_lock(&rs->rs_lock);
594
595		if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
596			struct rm_rdma_op *ro = &rm->rdma;
597			struct rds_notifier *notifier;
598
599			list_del_init(&rm->m_sock_item);
600			rds_send_sndbuf_remove(rs, rm);
601
602			if (ro->op_active && ro->op_notifier &&
603			       (ro->op_notify || (ro->op_recverr && status))) {
604				notifier = ro->op_notifier;
605				list_add_tail(&notifier->n_list,
606						&rs->rs_notify_queue);
607				if (!notifier->n_status)
608					notifier->n_status = status;
609				rm->rdma.op_notifier = NULL;
610			}
611			was_on_sock = 1;
612			rm->m_rs = NULL;
613		}
614		spin_unlock(&rs->rs_lock);
615
616unlock_and_drop:
617		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
618		rds_message_put(rm);
619		if (was_on_sock)
620			rds_message_put(rm);
621	}
622
623	if (rs) {
624		rds_wake_sk_sleep(rs);
625		sock_put(rds_rs_to_sk(rs));
626	}
627}
628
629/*
630 * Transports call here when they've determined that the receiver queued
631 * messages up to, and including, the given sequence number.  Messages are
632 * moved to the retrans queue when rds_send_xmit picks them off the send
633 * queue. This means that in the TCP case, the message may not have been
634 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
635 * checks the RDS_MSG_HAS_ACK_SEQ bit.
636 *
637 * XXX It's not clear to me how this is safely serialized with socket
638 * destruction.  Maybe it should bail if it sees SOCK_DEAD.
639 */
640void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
641			 is_acked_func is_acked)
642{
643	struct rds_message *rm, *tmp;
644	unsigned long flags;
645	LIST_HEAD(list);
646
647	spin_lock_irqsave(&conn->c_lock, flags);
648
649	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
650		if (!rds_send_is_acked(rm, ack, is_acked))
651			break;
652
653		list_move(&rm->m_conn_item, &list);
654		clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
655	}
656
657	/* order flag updates with spin locks */
658	if (!list_empty(&list))
659		smp_mb__after_clear_bit();
660
661	spin_unlock_irqrestore(&conn->c_lock, flags);
662
663	/* now remove the messages from the sock list as needed */
664	rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
665}
666EXPORT_SYMBOL_GPL(rds_send_drop_acked);
667
668void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
669{
670	struct rds_message *rm, *tmp;
671	struct rds_connection *conn;
672	unsigned long flags;
673	LIST_HEAD(list);
674
675	/* get all the messages we're dropping under the rs lock */
676	spin_lock_irqsave(&rs->rs_lock, flags);
677
678	list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
679		if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
680			     dest->sin_port != rm->m_inc.i_hdr.h_dport))
681			continue;
682
683		list_move(&rm->m_sock_item, &list);
684		rds_send_sndbuf_remove(rs, rm);
685		clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
686	}
687
688	/* order flag updates with the rs lock */
689	smp_mb__after_clear_bit();
690
691	spin_unlock_irqrestore(&rs->rs_lock, flags);
692
693	if (list_empty(&list))
694		return;
695
696	/* Remove the messages from the conn */
697	list_for_each_entry(rm, &list, m_sock_item) {
698
699		conn = rm->m_inc.i_conn;
700
701		spin_lock_irqsave(&conn->c_lock, flags);
702		/*
703		 * Maybe someone else beat us to removing rm from the conn.
704		 * If we race with their flag update we'll get the lock and
705		 * then really see that the flag has been cleared.
706		 */
707		if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
708			spin_unlock_irqrestore(&conn->c_lock, flags);
709			continue;
710		}
711		list_del_init(&rm->m_conn_item);
712		spin_unlock_irqrestore(&conn->c_lock, flags);
713
714		/*
715		 * Couldn't grab m_rs_lock in top loop (lock ordering),
716		 * but we can now.
717		 */
718		spin_lock_irqsave(&rm->m_rs_lock, flags);
719
720		spin_lock(&rs->rs_lock);
721		__rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
722		spin_unlock(&rs->rs_lock);
723
724		rm->m_rs = NULL;
725		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
726
727		rds_message_put(rm);
728	}
729
730	rds_wake_sk_sleep(rs);
731
732	while (!list_empty(&list)) {
733		rm = list_entry(list.next, struct rds_message, m_sock_item);
734		list_del_init(&rm->m_sock_item);
735
736		rds_message_wait(rm);
737		rds_message_put(rm);
738	}
739}
740
741/*
742 * we only want this to fire once so we use the callers 'queued'.  It's
743 * possible that another thread can race with us and remove the
744 * message from the flow with RDS_CANCEL_SENT_TO.
745 */
746static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
747			     struct rds_message *rm, __be16 sport,
748			     __be16 dport, int *queued)
749{
750	unsigned long flags;
751	u32 len;
752
753	if (*queued)
754		goto out;
755
756	len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
757
758	/* this is the only place which holds both the socket's rs_lock
759	 * and the connection's c_lock */
760	spin_lock_irqsave(&rs->rs_lock, flags);
761
762	/*
763	 * If there is a little space in sndbuf, we don't queue anything,
764	 * and userspace gets -EAGAIN. But poll() indicates there's send
765	 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
766	 * freed up by incoming acks. So we check the *old* value of
767	 * rs_snd_bytes here to allow the last msg to exceed the buffer,
768	 * and poll() now knows no more data can be sent.
769	 */
770	if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
771		rs->rs_snd_bytes += len;
772
773		/* let recv side know we are close to send space exhaustion.
774		 * This is probably not the optimal way to do it, as this
775		 * means we set the flag on *all* messages as soon as our
776		 * throughput hits a certain threshold.
777		 */
778		if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
779			__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
780
781		list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
782		set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
783		rds_message_addref(rm);
784		rm->m_rs = rs;
785
786		/* The code ordering is a little weird, but we're
787		   trying to minimize the time we hold c_lock */
788		rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
789		rm->m_inc.i_conn = conn;
790		rds_message_addref(rm);
791
792		spin_lock(&conn->c_lock);
793		rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
794		list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
795		set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
796		spin_unlock(&conn->c_lock);
797
798		rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
799			 rm, len, rs, rs->rs_snd_bytes,
800			 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
801
802		*queued = 1;
803	}
804
805	spin_unlock_irqrestore(&rs->rs_lock, flags);
806out:
807	return *queued;
808}
809
810/*
811 * rds_message is getting to be quite complicated, and we'd like to allocate
812 * it all in one go. This figures out how big it needs to be up front.
813 */
814static int rds_rm_size(struct msghdr *msg, int data_len)
815{
816	struct cmsghdr *cmsg;
817	int size = 0;
818	int cmsg_groups = 0;
819	int retval;
820
821	for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
822		if (!CMSG_OK(msg, cmsg))
823			return -EINVAL;
824
825		if (cmsg->cmsg_level != SOL_RDS)
826			continue;
827
828		switch (cmsg->cmsg_type) {
829		case RDS_CMSG_RDMA_ARGS:
830			cmsg_groups |= 1;
831			retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
832			if (retval < 0)
833				return retval;
834			size += retval;
835
836			break;
837
838		case RDS_CMSG_RDMA_DEST:
839		case RDS_CMSG_RDMA_MAP:
840			cmsg_groups |= 2;
841			/* these are valid but do no add any size */
842			break;
843
844		case RDS_CMSG_ATOMIC_CSWP:
845		case RDS_CMSG_ATOMIC_FADD:
846			cmsg_groups |= 1;
847			size += sizeof(struct scatterlist);
848			break;
849
850		default:
851			return -EINVAL;
852		}
853
854	}
855
856	size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
857
858	/* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
859	if (cmsg_groups == 3)
860		return -EINVAL;
861
862	return size;
863}
864
865static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
866			 struct msghdr *msg, int *allocated_mr)
867{
868	struct cmsghdr *cmsg;
869	int ret = 0;
870
871	for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
872		if (!CMSG_OK(msg, cmsg))
873			return -EINVAL;
874
875		if (cmsg->cmsg_level != SOL_RDS)
876			continue;
877
878		/* As a side effect, RDMA_DEST and RDMA_MAP will set
879		 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
880		 */
881		switch (cmsg->cmsg_type) {
882		case RDS_CMSG_RDMA_ARGS:
883			ret = rds_cmsg_rdma_args(rs, rm, cmsg);
884			break;
885
886		case RDS_CMSG_RDMA_DEST:
887			ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
888			break;
889
890		case RDS_CMSG_RDMA_MAP:
891			ret = rds_cmsg_rdma_map(rs, rm, cmsg);
892			if (!ret)
893				*allocated_mr = 1;
894			break;
895		case RDS_CMSG_ATOMIC_CSWP:
896		case RDS_CMSG_ATOMIC_FADD:
897			ret = rds_cmsg_atomic(rs, rm, cmsg);
898			break;
899
900		default:
901			return -EINVAL;
902		}
903
904		if (ret)
905			break;
906	}
907
908	return ret;
909}
910
911int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
912		size_t payload_len)
913{
914	struct sock *sk = sock->sk;
915	struct rds_sock *rs = rds_sk_to_rs(sk);
916	struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
917	__be32 daddr;
918	__be16 dport;
919	struct rds_message *rm = NULL;
920	struct rds_connection *conn;
921	int ret = 0;
922	int queued = 0, allocated_mr = 0;
923	int nonblock = msg->msg_flags & MSG_DONTWAIT;
924	long timeo = sock_sndtimeo(sk, nonblock);
925
926	/* Mirror Linux UDP mirror of BSD error message compatibility */
927	/* XXX: Perhaps MSG_MORE someday */
928	if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
929		printk(KERN_INFO "msg_flags 0x%08X\n", msg->msg_flags);
930		ret = -EOPNOTSUPP;
931		goto out;
932	}
933
934	if (msg->msg_namelen) {
935		/* XXX fail non-unicast destination IPs? */
936		if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
937			ret = -EINVAL;
938			goto out;
939		}
940		daddr = usin->sin_addr.s_addr;
941		dport = usin->sin_port;
942	} else {
943		/* We only care about consistency with ->connect() */
944		lock_sock(sk);
945		daddr = rs->rs_conn_addr;
946		dport = rs->rs_conn_port;
947		release_sock(sk);
948	}
949
950	/* racing with another thread binding seems ok here */
951	if (daddr == 0 || rs->rs_bound_addr == 0) {
952		ret = -ENOTCONN; /* XXX not a great errno */
953		goto out;
954	}
955
956	/* size of rm including all sgs */
957	ret = rds_rm_size(msg, payload_len);
958	if (ret < 0)
959		goto out;
960
961	rm = rds_message_alloc(ret, GFP_KERNEL);
962	if (!rm) {
963		ret = -ENOMEM;
964		goto out;
965	}
966
967	/* Attach data to the rm */
968	if (payload_len) {
969		rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
970		ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len);
971		if (ret)
972			goto out;
973	}
974	rm->data.op_active = 1;
975
976	rm->m_daddr = daddr;
977
978	/* rds_conn_create has a spinlock that runs with IRQ off.
979	 * Caching the conn in the socket helps a lot. */
980	if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
981		conn = rs->rs_conn;
982	else {
983		conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr,
984					rs->rs_transport,
985					sock->sk->sk_allocation);
986		if (IS_ERR(conn)) {
987			ret = PTR_ERR(conn);
988			goto out;
989		}
990		rs->rs_conn = conn;
991	}
992
993	/* Parse any control messages the user may have included. */
994	ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
995	if (ret)
996		goto out;
997
998	if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
999		if (printk_ratelimit())
1000			printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1001			       &rm->rdma, conn->c_trans->xmit_rdma);
1002		ret = -EOPNOTSUPP;
1003		goto out;
1004	}
1005
1006	if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1007		if (printk_ratelimit())
1008			printk(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1009			       &rm->atomic, conn->c_trans->xmit_atomic);
1010		ret = -EOPNOTSUPP;
1011		goto out;
1012	}
1013
1014	rds_conn_connect_if_down(conn);
1015
1016	ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1017	if (ret) {
1018		rs->rs_seen_congestion = 1;
1019		goto out;
1020	}
1021
1022	while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
1023				  dport, &queued)) {
1024		rds_stats_inc(s_send_queue_full);
1025		/* XXX make sure this is reasonable */
1026		if (payload_len > rds_sk_sndbuf(rs)) {
1027			ret = -EMSGSIZE;
1028			goto out;
1029		}
1030		if (nonblock) {
1031			ret = -EAGAIN;
1032			goto out;
1033		}
1034
1035		timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1036					rds_send_queue_rm(rs, conn, rm,
1037							  rs->rs_bound_port,
1038							  dport,
1039							  &queued),
1040					timeo);
1041		rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1042		if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1043			continue;
1044
1045		ret = timeo;
1046		if (ret == 0)
1047			ret = -ETIMEDOUT;
1048		goto out;
1049	}
1050
1051	/*
1052	 * By now we've committed to the send.  We reuse rds_send_worker()
1053	 * to retry sends in the rds thread if the transport asks us to.
1054	 */
1055	rds_stats_inc(s_send_queued);
1056
1057	if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1058		rds_send_xmit(conn);
1059
1060	rds_message_put(rm);
1061	return payload_len;
1062
1063out:
1064	/* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1065	 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1066	 * or in any other way, we need to destroy the MR again */
1067	if (allocated_mr)
1068		rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1069
1070	if (rm)
1071		rds_message_put(rm);
1072	return ret;
1073}
1074
1075/*
1076 * Reply to a ping packet.
1077 */
1078int
1079rds_send_pong(struct rds_connection *conn, __be16 dport)
1080{
1081	struct rds_message *rm;
1082	unsigned long flags;
1083	int ret = 0;
1084
1085	rm = rds_message_alloc(0, GFP_ATOMIC);
1086	if (!rm) {
1087		ret = -ENOMEM;
1088		goto out;
1089	}
1090
1091	rm->m_daddr = conn->c_faddr;
1092	rm->data.op_active = 1;
1093
1094	rds_conn_connect_if_down(conn);
1095
1096	ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1097	if (ret)
1098		goto out;
1099
1100	spin_lock_irqsave(&conn->c_lock, flags);
1101	list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1102	set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1103	rds_message_addref(rm);
1104	rm->m_inc.i_conn = conn;
1105
1106	rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1107				    conn->c_next_tx_seq);
1108	conn->c_next_tx_seq++;
1109	spin_unlock_irqrestore(&conn->c_lock, flags);
1110
1111	rds_stats_inc(s_send_queued);
1112	rds_stats_inc(s_send_pong);
1113
1114	if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1115		rds_send_xmit(conn);
1116
1117	rds_message_put(rm);
1118	return 0;
1119
1120out:
1121	if (rm)
1122		rds_message_put(rm);
1123	return ret;
1124}
1125