ipoib_cm.c revision 26574401fef6766f6c3ca25b5c13febe662d2a32
1/*
2 * Copyright (c) 2006 Mellanox Technologies. All rights reserved
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <rdma/ib_cm.h>
34#include <rdma/ib_cache.h>
35#include <net/dst.h>
36#include <net/icmp.h>
37#include <linux/icmpv6.h>
38#include <linux/delay.h>
39#include <linux/vmalloc.h>
40
41#include "ipoib.h"
42
43int ipoib_max_conn_qp = 128;
44
45module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444);
46MODULE_PARM_DESC(max_nonsrq_conn_qp,
47		 "Max number of connected-mode QPs per interface "
48		 "(applied only if shared receive queue is not available)");
49
50#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
51static int data_debug_level;
52
53module_param_named(cm_data_debug_level, data_debug_level, int, 0644);
54MODULE_PARM_DESC(cm_data_debug_level,
55		 "Enable data path debug tracing for connected mode if > 0");
56#endif
57
58#define IPOIB_CM_IETF_ID 0x1000000000000000ULL
59
60#define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
61#define IPOIB_CM_RX_TIMEOUT     (2 * 256 * HZ)
62#define IPOIB_CM_RX_DELAY       (3 * 256 * HZ)
63#define IPOIB_CM_RX_UPDATE_MASK (0x3)
64
65static struct ib_qp_attr ipoib_cm_err_attr = {
66	.qp_state = IB_QPS_ERR
67};
68
69#define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
70
71static struct ib_send_wr ipoib_cm_rx_drain_wr = {
72	.wr_id = IPOIB_CM_RX_DRAIN_WRID,
73	.opcode = IB_WR_SEND,
74};
75
76static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
77			       struct ib_cm_event *event);
78
79static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
80				  u64 mapping[IPOIB_CM_RX_SG])
81{
82	int i;
83
84	ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
85
86	for (i = 0; i < frags; ++i)
87		ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
88}
89
90static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
91{
92	struct ipoib_dev_priv *priv = netdev_priv(dev);
93	struct ib_recv_wr *bad_wr;
94	int i, ret;
95
96	priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
97
98	for (i = 0; i < priv->cm.num_frags; ++i)
99		priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
100
101	ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
102	if (unlikely(ret)) {
103		ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
104		ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
105				      priv->cm.srq_ring[id].mapping);
106		dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
107		priv->cm.srq_ring[id].skb = NULL;
108	}
109
110	return ret;
111}
112
113static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
114					struct ipoib_cm_rx *rx,
115					struct ib_recv_wr *wr,
116					struct ib_sge *sge, int id)
117{
118	struct ipoib_dev_priv *priv = netdev_priv(dev);
119	struct ib_recv_wr *bad_wr;
120	int i, ret;
121
122	wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
123
124	for (i = 0; i < IPOIB_CM_RX_SG; ++i)
125		sge[i].addr = rx->rx_ring[id].mapping[i];
126
127	ret = ib_post_recv(rx->qp, wr, &bad_wr);
128	if (unlikely(ret)) {
129		ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
130		ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
131				      rx->rx_ring[id].mapping);
132		dev_kfree_skb_any(rx->rx_ring[id].skb);
133		rx->rx_ring[id].skb = NULL;
134	}
135
136	return ret;
137}
138
139static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
140					     struct ipoib_cm_rx_buf *rx_ring,
141					     int id, int frags,
142					     u64 mapping[IPOIB_CM_RX_SG])
143{
144	struct ipoib_dev_priv *priv = netdev_priv(dev);
145	struct sk_buff *skb;
146	int i;
147
148	skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
149	if (unlikely(!skb))
150		return NULL;
151
152	/*
153	 * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
154	 * IP header to a multiple of 16.
155	 */
156	skb_reserve(skb, 12);
157
158	mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
159				       DMA_FROM_DEVICE);
160	if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
161		dev_kfree_skb_any(skb);
162		return NULL;
163	}
164
165	for (i = 0; i < frags; i++) {
166		struct page *page = alloc_page(GFP_ATOMIC);
167
168		if (!page)
169			goto partial_error;
170		skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
171
172		mapping[i + 1] = ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[i].page,
173						 0, PAGE_SIZE, DMA_FROM_DEVICE);
174		if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
175			goto partial_error;
176	}
177
178	rx_ring[id].skb = skb;
179	return skb;
180
181partial_error:
182
183	ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
184
185	for (; i > 0; --i)
186		ib_dma_unmap_single(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
187
188	dev_kfree_skb_any(skb);
189	return NULL;
190}
191
192static void ipoib_cm_free_rx_ring(struct net_device *dev,
193				  struct ipoib_cm_rx_buf *rx_ring)
194{
195	struct ipoib_dev_priv *priv = netdev_priv(dev);
196	int i;
197
198	for (i = 0; i < ipoib_recvq_size; ++i)
199		if (rx_ring[i].skb) {
200			ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
201					      rx_ring[i].mapping);
202			dev_kfree_skb_any(rx_ring[i].skb);
203		}
204
205	vfree(rx_ring);
206}
207
208static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
209{
210	struct ib_send_wr *bad_wr;
211	struct ipoib_cm_rx *p;
212
213	/* We only reserved 1 extra slot in CQ for drain WRs, so
214	 * make sure we have at most 1 outstanding WR. */
215	if (list_empty(&priv->cm.rx_flush_list) ||
216	    !list_empty(&priv->cm.rx_drain_list))
217		return;
218
219	/*
220	 * QPs on flush list are error state.  This way, a "flush
221	 * error" WC will be immediately generated for each WR we post.
222	 */
223	p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
224	if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr))
225		ipoib_warn(priv, "failed to post drain wr\n");
226
227	list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
228}
229
230static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx)
231{
232	struct ipoib_cm_rx *p = ctx;
233	struct ipoib_dev_priv *priv = netdev_priv(p->dev);
234	unsigned long flags;
235
236	if (event->event != IB_EVENT_QP_LAST_WQE_REACHED)
237		return;
238
239	spin_lock_irqsave(&priv->lock, flags);
240	list_move(&p->list, &priv->cm.rx_flush_list);
241	p->state = IPOIB_CM_RX_FLUSH;
242	ipoib_cm_start_rx_drain(priv);
243	spin_unlock_irqrestore(&priv->lock, flags);
244}
245
246static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
247					   struct ipoib_cm_rx *p)
248{
249	struct ipoib_dev_priv *priv = netdev_priv(dev);
250	struct ib_qp_init_attr attr = {
251		.event_handler = ipoib_cm_rx_event_handler,
252		.send_cq = priv->recv_cq, /* For drain WR */
253		.recv_cq = priv->recv_cq,
254		.srq = priv->cm.srq,
255		.cap.max_send_wr = 1, /* For drain WR */
256		.cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
257		.sq_sig_type = IB_SIGNAL_ALL_WR,
258		.qp_type = IB_QPT_RC,
259		.qp_context = p,
260	};
261
262	if (!ipoib_cm_has_srq(dev)) {
263		attr.cap.max_recv_wr  = ipoib_recvq_size;
264		attr.cap.max_recv_sge = IPOIB_CM_RX_SG;
265	}
266
267	return ib_create_qp(priv->pd, &attr);
268}
269
270static int ipoib_cm_modify_rx_qp(struct net_device *dev,
271				 struct ib_cm_id *cm_id, struct ib_qp *qp,
272				 unsigned psn)
273{
274	struct ipoib_dev_priv *priv = netdev_priv(dev);
275	struct ib_qp_attr qp_attr;
276	int qp_attr_mask, ret;
277
278	qp_attr.qp_state = IB_QPS_INIT;
279	ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
280	if (ret) {
281		ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret);
282		return ret;
283	}
284	ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
285	if (ret) {
286		ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret);
287		return ret;
288	}
289	qp_attr.qp_state = IB_QPS_RTR;
290	ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
291	if (ret) {
292		ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
293		return ret;
294	}
295	qp_attr.rq_psn = psn;
296	ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
297	if (ret) {
298		ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
299		return ret;
300	}
301
302	/*
303	 * Current Mellanox HCA firmware won't generate completions
304	 * with error for drain WRs unless the QP has been moved to
305	 * RTS first. This work-around leaves a window where a QP has
306	 * moved to error asynchronously, but this will eventually get
307	 * fixed in firmware, so let's not error out if modify QP
308	 * fails.
309	 */
310	qp_attr.qp_state = IB_QPS_RTS;
311	ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
312	if (ret) {
313		ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
314		return 0;
315	}
316	ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
317	if (ret) {
318		ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
319		return 0;
320	}
321
322	return 0;
323}
324
325static void ipoib_cm_init_rx_wr(struct net_device *dev,
326				struct ib_recv_wr *wr,
327				struct ib_sge *sge)
328{
329	struct ipoib_dev_priv *priv = netdev_priv(dev);
330	int i;
331
332	for (i = 0; i < priv->cm.num_frags; ++i)
333		sge[i].lkey = priv->mr->lkey;
334
335	sge[0].length = IPOIB_CM_HEAD_SIZE;
336	for (i = 1; i < priv->cm.num_frags; ++i)
337		sge[i].length = PAGE_SIZE;
338
339	wr->next    = NULL;
340	wr->sg_list = sge;
341	wr->num_sge = priv->cm.num_frags;
342}
343
344static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id,
345				   struct ipoib_cm_rx *rx)
346{
347	struct ipoib_dev_priv *priv = netdev_priv(dev);
348	struct {
349		struct ib_recv_wr wr;
350		struct ib_sge sge[IPOIB_CM_RX_SG];
351	} *t;
352	int ret;
353	int i;
354
355	rx->rx_ring = vmalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
356	if (!rx->rx_ring) {
357		printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
358		       priv->ca->name, ipoib_recvq_size);
359		return -ENOMEM;
360	}
361
362	memset(rx->rx_ring, 0, ipoib_recvq_size * sizeof *rx->rx_ring);
363
364	t = kmalloc(sizeof *t, GFP_KERNEL);
365	if (!t) {
366		ret = -ENOMEM;
367		goto err_free;
368	}
369
370	ipoib_cm_init_rx_wr(dev, &t->wr, t->sge);
371
372	spin_lock_irq(&priv->lock);
373
374	if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) {
375		spin_unlock_irq(&priv->lock);
376		ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0);
377		ret = -EINVAL;
378		goto err_free;
379	} else
380		++priv->cm.nonsrq_conn_qp;
381
382	spin_unlock_irq(&priv->lock);
383
384	for (i = 0; i < ipoib_recvq_size; ++i) {
385		if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1,
386					   rx->rx_ring[i].mapping)) {
387			ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
388				ret = -ENOMEM;
389				goto err_count;
390		}
391		ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i);
392		if (ret) {
393			ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq "
394				   "failed for buf %d\n", i);
395			ret = -EIO;
396			goto err_count;
397		}
398	}
399
400	rx->recv_count = ipoib_recvq_size;
401
402	kfree(t);
403
404	return 0;
405
406err_count:
407	spin_lock_irq(&priv->lock);
408	--priv->cm.nonsrq_conn_qp;
409	spin_unlock_irq(&priv->lock);
410
411err_free:
412	kfree(t);
413	ipoib_cm_free_rx_ring(dev, rx->rx_ring);
414
415	return ret;
416}
417
418static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
419			     struct ib_qp *qp, struct ib_cm_req_event_param *req,
420			     unsigned psn)
421{
422	struct ipoib_dev_priv *priv = netdev_priv(dev);
423	struct ipoib_cm_data data = {};
424	struct ib_cm_rep_param rep = {};
425
426	data.qpn = cpu_to_be32(priv->qp->qp_num);
427	data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
428
429	rep.private_data = &data;
430	rep.private_data_len = sizeof data;
431	rep.flow_control = 0;
432	rep.rnr_retry_count = req->rnr_retry_count;
433	rep.srq = ipoib_cm_has_srq(dev);
434	rep.qp_num = qp->qp_num;
435	rep.starting_psn = psn;
436	return ib_send_cm_rep(cm_id, &rep);
437}
438
439static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
440{
441	struct net_device *dev = cm_id->context;
442	struct ipoib_dev_priv *priv = netdev_priv(dev);
443	struct ipoib_cm_rx *p;
444	unsigned psn;
445	int ret;
446
447	ipoib_dbg(priv, "REQ arrived\n");
448	p = kzalloc(sizeof *p, GFP_KERNEL);
449	if (!p)
450		return -ENOMEM;
451	p->dev = dev;
452	p->id = cm_id;
453	cm_id->context = p;
454	p->state = IPOIB_CM_RX_LIVE;
455	p->jiffies = jiffies;
456	INIT_LIST_HEAD(&p->list);
457
458	p->qp = ipoib_cm_create_rx_qp(dev, p);
459	if (IS_ERR(p->qp)) {
460		ret = PTR_ERR(p->qp);
461		goto err_qp;
462	}
463
464	psn = random32() & 0xffffff;
465	ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
466	if (ret)
467		goto err_modify;
468
469	if (!ipoib_cm_has_srq(dev)) {
470		ret = ipoib_cm_nonsrq_init_rx(dev, cm_id, p);
471		if (ret)
472			goto err_modify;
473	}
474
475	spin_lock_irq(&priv->lock);
476	queue_delayed_work(ipoib_workqueue,
477			   &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
478	/* Add this entry to passive ids list head, but do not re-add it
479	 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
480	p->jiffies = jiffies;
481	if (p->state == IPOIB_CM_RX_LIVE)
482		list_move(&p->list, &priv->cm.passive_ids);
483	spin_unlock_irq(&priv->lock);
484
485	ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
486	if (ret) {
487		ipoib_warn(priv, "failed to send REP: %d\n", ret);
488		if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
489			ipoib_warn(priv, "unable to move qp to error state\n");
490	}
491	return 0;
492
493err_modify:
494	ib_destroy_qp(p->qp);
495err_qp:
496	kfree(p);
497	return ret;
498}
499
500static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
501			       struct ib_cm_event *event)
502{
503	struct ipoib_cm_rx *p;
504	struct ipoib_dev_priv *priv;
505
506	switch (event->event) {
507	case IB_CM_REQ_RECEIVED:
508		return ipoib_cm_req_handler(cm_id, event);
509	case IB_CM_DREQ_RECEIVED:
510		p = cm_id->context;
511		ib_send_cm_drep(cm_id, NULL, 0);
512		/* Fall through */
513	case IB_CM_REJ_RECEIVED:
514		p = cm_id->context;
515		priv = netdev_priv(p->dev);
516		if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
517			ipoib_warn(priv, "unable to move qp to error state\n");
518		/* Fall through */
519	default:
520		return 0;
521	}
522}
523/* Adjust length of skb with fragments to match received data */
524static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
525			  unsigned int length, struct sk_buff *toskb)
526{
527	int i, num_frags;
528	unsigned int size;
529
530	/* put header into skb */
531	size = min(length, hdr_space);
532	skb->tail += size;
533	skb->len += size;
534	length -= size;
535
536	num_frags = skb_shinfo(skb)->nr_frags;
537	for (i = 0; i < num_frags; i++) {
538		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
539
540		if (length == 0) {
541			/* don't need this page */
542			skb_fill_page_desc(toskb, i, frag->page, 0, PAGE_SIZE);
543			--skb_shinfo(skb)->nr_frags;
544		} else {
545			size = min(length, (unsigned) PAGE_SIZE);
546
547			frag->size = size;
548			skb->data_len += size;
549			skb->truesize += size;
550			skb->len += size;
551			length -= size;
552		}
553	}
554}
555
556void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
557{
558	struct ipoib_dev_priv *priv = netdev_priv(dev);
559	struct ipoib_cm_rx_buf *rx_ring;
560	unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
561	struct sk_buff *skb, *newskb;
562	struct ipoib_cm_rx *p;
563	unsigned long flags;
564	u64 mapping[IPOIB_CM_RX_SG];
565	int frags;
566	int has_srq;
567	struct sk_buff *small_skb;
568
569	ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
570		       wr_id, wc->status);
571
572	if (unlikely(wr_id >= ipoib_recvq_size)) {
573		if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) {
574			spin_lock_irqsave(&priv->lock, flags);
575			list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
576			ipoib_cm_start_rx_drain(priv);
577			queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
578			spin_unlock_irqrestore(&priv->lock, flags);
579		} else
580			ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
581				   wr_id, ipoib_recvq_size);
582		return;
583	}
584
585	p = wc->qp->qp_context;
586
587	has_srq = ipoib_cm_has_srq(dev);
588	rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring;
589
590	skb = rx_ring[wr_id].skb;
591
592	if (unlikely(wc->status != IB_WC_SUCCESS)) {
593		ipoib_dbg(priv, "cm recv error "
594			   "(status=%d, wrid=%d vend_err %x)\n",
595			   wc->status, wr_id, wc->vendor_err);
596		++dev->stats.rx_dropped;
597		if (has_srq)
598			goto repost;
599		else {
600			if (!--p->recv_count) {
601				spin_lock_irqsave(&priv->lock, flags);
602				list_move(&p->list, &priv->cm.rx_reap_list);
603				spin_unlock_irqrestore(&priv->lock, flags);
604				queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
605			}
606			return;
607		}
608	}
609
610	if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) {
611		if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
612			spin_lock_irqsave(&priv->lock, flags);
613			p->jiffies = jiffies;
614			/* Move this entry to list head, but do not re-add it
615			 * if it has been moved out of list. */
616			if (p->state == IPOIB_CM_RX_LIVE)
617				list_move(&p->list, &priv->cm.passive_ids);
618			spin_unlock_irqrestore(&priv->lock, flags);
619		}
620	}
621
622	if (wc->byte_len < IPOIB_CM_COPYBREAK) {
623		int dlen = wc->byte_len;
624
625		small_skb = dev_alloc_skb(dlen + 12);
626		if (small_skb) {
627			skb_reserve(small_skb, 12);
628			ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
629						   dlen, DMA_FROM_DEVICE);
630			skb_copy_from_linear_data(skb, small_skb->data, dlen);
631			ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0],
632						      dlen, DMA_FROM_DEVICE);
633			skb_put(small_skb, dlen);
634			skb = small_skb;
635			goto copied;
636		}
637	}
638
639	frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
640					      (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
641
642	newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags, mapping);
643	if (unlikely(!newskb)) {
644		/*
645		 * If we can't allocate a new RX buffer, dump
646		 * this packet and reuse the old buffer.
647		 */
648		ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
649		++dev->stats.rx_dropped;
650		goto repost;
651	}
652
653	ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping);
654	memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping);
655
656	ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
657		       wc->byte_len, wc->slid);
658
659	skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
660
661copied:
662	skb->protocol = ((struct ipoib_header *) skb->data)->proto;
663	skb_reset_mac_header(skb);
664	skb_pull(skb, IPOIB_ENCAP_LEN);
665
666	dev->last_rx = jiffies;
667	++dev->stats.rx_packets;
668	dev->stats.rx_bytes += skb->len;
669
670	skb->dev = dev;
671	/* XXX get correct PACKET_ type here */
672	skb->pkt_type = PACKET_HOST;
673	netif_receive_skb(skb);
674
675repost:
676	if (has_srq) {
677		if (unlikely(ipoib_cm_post_receive_srq(dev, wr_id)))
678			ipoib_warn(priv, "ipoib_cm_post_receive_srq failed "
679				   "for buf %d\n", wr_id);
680	} else {
681		if (unlikely(ipoib_cm_post_receive_nonsrq(dev, p,
682							  &priv->cm.rx_wr,
683							  priv->cm.rx_sge,
684							  wr_id))) {
685			--p->recv_count;
686			ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed "
687				   "for buf %d\n", wr_id);
688		}
689	}
690}
691
692static inline int post_send(struct ipoib_dev_priv *priv,
693			    struct ipoib_cm_tx *tx,
694			    unsigned int wr_id,
695			    u64 addr, int len)
696{
697	struct ib_send_wr *bad_wr;
698
699	priv->tx_sge[0].addr          = addr;
700	priv->tx_sge[0].length        = len;
701
702	priv->tx_wr.num_sge	= 1;
703	priv->tx_wr.wr_id	= wr_id | IPOIB_OP_CM;
704
705	return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr);
706}
707
708void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
709{
710	struct ipoib_dev_priv *priv = netdev_priv(dev);
711	struct ipoib_cm_tx_buf *tx_req;
712	u64 addr;
713
714	if (unlikely(skb->len > tx->mtu)) {
715		ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
716			   skb->len, tx->mtu);
717		++dev->stats.tx_dropped;
718		++dev->stats.tx_errors;
719		ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
720		return;
721	}
722
723	ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
724		       tx->tx_head, skb->len, tx->qp->qp_num);
725
726	/*
727	 * We put the skb into the tx_ring _before_ we call post_send()
728	 * because it's entirely possible that the completion handler will
729	 * run before we execute anything after the post_send().  That
730	 * means we have to make sure everything is properly recorded and
731	 * our state is consistent before we call post_send().
732	 */
733	tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
734	tx_req->skb = skb;
735	addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
736	if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
737		++dev->stats.tx_errors;
738		dev_kfree_skb_any(skb);
739		return;
740	}
741
742	tx_req->mapping = addr;
743
744	if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
745			       addr, skb->len))) {
746		ipoib_warn(priv, "post_send failed\n");
747		++dev->stats.tx_errors;
748		ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
749		dev_kfree_skb_any(skb);
750	} else {
751		dev->trans_start = jiffies;
752		++tx->tx_head;
753
754		if (++priv->tx_outstanding == ipoib_sendq_size) {
755			ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
756				  tx->qp->qp_num);
757			netif_stop_queue(dev);
758		}
759	}
760}
761
762void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
763{
764	struct ipoib_dev_priv *priv = netdev_priv(dev);
765	struct ipoib_cm_tx *tx = wc->qp->qp_context;
766	unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
767	struct ipoib_cm_tx_buf *tx_req;
768	unsigned long flags;
769
770	ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
771		       wr_id, wc->status);
772
773	if (unlikely(wr_id >= ipoib_sendq_size)) {
774		ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
775			   wr_id, ipoib_sendq_size);
776		return;
777	}
778
779	tx_req = &tx->tx_ring[wr_id];
780
781	ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
782
783	/* FIXME: is this right? Shouldn't we only increment on success? */
784	++dev->stats.tx_packets;
785	dev->stats.tx_bytes += tx_req->skb->len;
786
787	dev_kfree_skb_any(tx_req->skb);
788
789	netif_tx_lock(dev);
790
791	++tx->tx_tail;
792	if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
793	    netif_queue_stopped(dev) &&
794	    test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
795		netif_wake_queue(dev);
796
797	if (wc->status != IB_WC_SUCCESS &&
798	    wc->status != IB_WC_WR_FLUSH_ERR) {
799		struct ipoib_neigh *neigh;
800
801		ipoib_dbg(priv, "failed cm send event "
802			   "(status=%d, wrid=%d vend_err %x)\n",
803			   wc->status, wr_id, wc->vendor_err);
804
805		spin_lock_irqsave(&priv->lock, flags);
806		neigh = tx->neigh;
807
808		if (neigh) {
809			neigh->cm = NULL;
810			list_del(&neigh->list);
811			if (neigh->ah)
812				ipoib_put_ah(neigh->ah);
813			ipoib_neigh_free(dev, neigh);
814
815			tx->neigh = NULL;
816		}
817
818		if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
819			list_move(&tx->list, &priv->cm.reap_list);
820			queue_work(ipoib_workqueue, &priv->cm.reap_task);
821		}
822
823		clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
824
825		spin_unlock_irqrestore(&priv->lock, flags);
826	}
827
828	netif_tx_unlock(dev);
829}
830
831int ipoib_cm_dev_open(struct net_device *dev)
832{
833	struct ipoib_dev_priv *priv = netdev_priv(dev);
834	int ret;
835
836	if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
837		return 0;
838
839	priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
840	if (IS_ERR(priv->cm.id)) {
841		printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
842		ret = PTR_ERR(priv->cm.id);
843		goto err_cm;
844	}
845
846	ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
847			   0, NULL);
848	if (ret) {
849		printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
850		       IPOIB_CM_IETF_ID | priv->qp->qp_num);
851		goto err_listen;
852	}
853
854	return 0;
855
856err_listen:
857	ib_destroy_cm_id(priv->cm.id);
858err_cm:
859	priv->cm.id = NULL;
860	return ret;
861}
862
863static void ipoib_cm_free_rx_reap_list(struct net_device *dev)
864{
865	struct ipoib_dev_priv *priv = netdev_priv(dev);
866	struct ipoib_cm_rx *rx, *n;
867	LIST_HEAD(list);
868
869	spin_lock_irq(&priv->lock);
870	list_splice_init(&priv->cm.rx_reap_list, &list);
871	spin_unlock_irq(&priv->lock);
872
873	list_for_each_entry_safe(rx, n, &list, list) {
874		ib_destroy_cm_id(rx->id);
875		ib_destroy_qp(rx->qp);
876		if (!ipoib_cm_has_srq(dev)) {
877			ipoib_cm_free_rx_ring(priv->dev, rx->rx_ring);
878			spin_lock_irq(&priv->lock);
879			--priv->cm.nonsrq_conn_qp;
880			spin_unlock_irq(&priv->lock);
881		}
882		kfree(rx);
883	}
884}
885
886void ipoib_cm_dev_stop(struct net_device *dev)
887{
888	struct ipoib_dev_priv *priv = netdev_priv(dev);
889	struct ipoib_cm_rx *p;
890	unsigned long begin;
891	int ret;
892
893	if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id)
894		return;
895
896	ib_destroy_cm_id(priv->cm.id);
897	priv->cm.id = NULL;
898
899	spin_lock_irq(&priv->lock);
900	while (!list_empty(&priv->cm.passive_ids)) {
901		p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
902		list_move(&p->list, &priv->cm.rx_error_list);
903		p->state = IPOIB_CM_RX_ERROR;
904		spin_unlock_irq(&priv->lock);
905		ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
906		if (ret)
907			ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
908		spin_lock_irq(&priv->lock);
909	}
910
911	/* Wait for all RX to be drained */
912	begin = jiffies;
913
914	while (!list_empty(&priv->cm.rx_error_list) ||
915	       !list_empty(&priv->cm.rx_flush_list) ||
916	       !list_empty(&priv->cm.rx_drain_list)) {
917		if (time_after(jiffies, begin + 5 * HZ)) {
918			ipoib_warn(priv, "RX drain timing out\n");
919
920			/*
921			 * assume the HW is wedged and just free up everything.
922			 */
923			list_splice_init(&priv->cm.rx_flush_list,
924					 &priv->cm.rx_reap_list);
925			list_splice_init(&priv->cm.rx_error_list,
926					 &priv->cm.rx_reap_list);
927			list_splice_init(&priv->cm.rx_drain_list,
928					 &priv->cm.rx_reap_list);
929			break;
930		}
931		spin_unlock_irq(&priv->lock);
932		msleep(1);
933		ipoib_drain_cq(dev);
934		spin_lock_irq(&priv->lock);
935	}
936
937	spin_unlock_irq(&priv->lock);
938
939	ipoib_cm_free_rx_reap_list(dev);
940
941	cancel_delayed_work(&priv->cm.stale_task);
942}
943
944static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
945{
946	struct ipoib_cm_tx *p = cm_id->context;
947	struct ipoib_dev_priv *priv = netdev_priv(p->dev);
948	struct ipoib_cm_data *data = event->private_data;
949	struct sk_buff_head skqueue;
950	struct ib_qp_attr qp_attr;
951	int qp_attr_mask, ret;
952	struct sk_buff *skb;
953
954	p->mtu = be32_to_cpu(data->mtu);
955
956	if (p->mtu <= IPOIB_ENCAP_LEN) {
957		ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
958			   p->mtu, IPOIB_ENCAP_LEN);
959		return -EINVAL;
960	}
961
962	qp_attr.qp_state = IB_QPS_RTR;
963	ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
964	if (ret) {
965		ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
966		return ret;
967	}
968
969	qp_attr.rq_psn = 0 /* FIXME */;
970	ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
971	if (ret) {
972		ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
973		return ret;
974	}
975
976	qp_attr.qp_state = IB_QPS_RTS;
977	ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
978	if (ret) {
979		ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
980		return ret;
981	}
982	ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
983	if (ret) {
984		ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
985		return ret;
986	}
987
988	skb_queue_head_init(&skqueue);
989
990	spin_lock_irq(&priv->lock);
991	set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
992	if (p->neigh)
993		while ((skb = __skb_dequeue(&p->neigh->queue)))
994			__skb_queue_tail(&skqueue, skb);
995	spin_unlock_irq(&priv->lock);
996
997	while ((skb = __skb_dequeue(&skqueue))) {
998		skb->dev = p->dev;
999		if (dev_queue_xmit(skb))
1000			ipoib_warn(priv, "dev_queue_xmit failed "
1001				   "to requeue packet\n");
1002	}
1003
1004	ret = ib_send_cm_rtu(cm_id, NULL, 0);
1005	if (ret) {
1006		ipoib_warn(priv, "failed to send RTU: %d\n", ret);
1007		return ret;
1008	}
1009	return 0;
1010}
1011
1012static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_cm_tx *tx)
1013{
1014	struct ipoib_dev_priv *priv = netdev_priv(dev);
1015	struct ib_qp_init_attr attr = {
1016		.send_cq		= priv->recv_cq,
1017		.recv_cq		= priv->recv_cq,
1018		.srq			= priv->cm.srq,
1019		.cap.max_send_wr	= ipoib_sendq_size,
1020		.cap.max_send_sge	= 1,
1021		.sq_sig_type		= IB_SIGNAL_ALL_WR,
1022		.qp_type		= IB_QPT_RC,
1023		.qp_context		= tx
1024	};
1025
1026	return ib_create_qp(priv->pd, &attr);
1027}
1028
1029static int ipoib_cm_send_req(struct net_device *dev,
1030			     struct ib_cm_id *id, struct ib_qp *qp,
1031			     u32 qpn,
1032			     struct ib_sa_path_rec *pathrec)
1033{
1034	struct ipoib_dev_priv *priv = netdev_priv(dev);
1035	struct ipoib_cm_data data = {};
1036	struct ib_cm_req_param req = {};
1037
1038	data.qpn = cpu_to_be32(priv->qp->qp_num);
1039	data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
1040
1041	req.primary_path		= pathrec;
1042	req.alternate_path		= NULL;
1043	req.service_id			= cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
1044	req.qp_num			= qp->qp_num;
1045	req.qp_type			= qp->qp_type;
1046	req.private_data		= &data;
1047	req.private_data_len		= sizeof data;
1048	req.flow_control		= 0;
1049
1050	req.starting_psn		= 0; /* FIXME */
1051
1052	/*
1053	 * Pick some arbitrary defaults here; we could make these
1054	 * module parameters if anyone cared about setting them.
1055	 */
1056	req.responder_resources		= 4;
1057	req.remote_cm_response_timeout	= 20;
1058	req.local_cm_response_timeout	= 20;
1059	req.retry_count			= 0; /* RFC draft warns against retries */
1060	req.rnr_retry_count		= 0; /* RFC draft warns against retries */
1061	req.max_cm_retries		= 15;
1062	req.srq				= ipoib_cm_has_srq(dev);
1063	return ib_send_cm_req(id, &req);
1064}
1065
1066static int ipoib_cm_modify_tx_init(struct net_device *dev,
1067				  struct ib_cm_id *cm_id, struct ib_qp *qp)
1068{
1069	struct ipoib_dev_priv *priv = netdev_priv(dev);
1070	struct ib_qp_attr qp_attr;
1071	int qp_attr_mask, ret;
1072	ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
1073	if (ret) {
1074		ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
1075		return ret;
1076	}
1077
1078	qp_attr.qp_state = IB_QPS_INIT;
1079	qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
1080	qp_attr.port_num = priv->port;
1081	qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
1082
1083	ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
1084	if (ret) {
1085		ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret);
1086		return ret;
1087	}
1088	return 0;
1089}
1090
1091static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
1092			    struct ib_sa_path_rec *pathrec)
1093{
1094	struct ipoib_dev_priv *priv = netdev_priv(p->dev);
1095	int ret;
1096
1097	p->tx_ring = vmalloc(ipoib_sendq_size * sizeof *p->tx_ring);
1098	if (!p->tx_ring) {
1099		ipoib_warn(priv, "failed to allocate tx ring\n");
1100		ret = -ENOMEM;
1101		goto err_tx;
1102	}
1103	memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
1104
1105	p->qp = ipoib_cm_create_tx_qp(p->dev, p);
1106	if (IS_ERR(p->qp)) {
1107		ret = PTR_ERR(p->qp);
1108		ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret);
1109		goto err_qp;
1110	}
1111
1112	p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
1113	if (IS_ERR(p->id)) {
1114		ret = PTR_ERR(p->id);
1115		ipoib_warn(priv, "failed to create tx cm id: %d\n", ret);
1116		goto err_id;
1117	}
1118
1119	ret = ipoib_cm_modify_tx_init(p->dev, p->id,  p->qp);
1120	if (ret) {
1121		ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
1122		goto err_modify;
1123	}
1124
1125	ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec);
1126	if (ret) {
1127		ipoib_warn(priv, "failed to send cm req: %d\n", ret);
1128		goto err_send_cm;
1129	}
1130
1131	ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n",
1132		  p->qp->qp_num, pathrec->dgid.raw, qpn);
1133
1134	return 0;
1135
1136err_send_cm:
1137err_modify:
1138	ib_destroy_cm_id(p->id);
1139err_id:
1140	p->id = NULL;
1141	ib_destroy_qp(p->qp);
1142err_qp:
1143	p->qp = NULL;
1144	vfree(p->tx_ring);
1145err_tx:
1146	return ret;
1147}
1148
1149static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
1150{
1151	struct ipoib_dev_priv *priv = netdev_priv(p->dev);
1152	struct ipoib_cm_tx_buf *tx_req;
1153	unsigned long begin;
1154
1155	ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
1156		  p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail);
1157
1158	if (p->id)
1159		ib_destroy_cm_id(p->id);
1160
1161	if (p->tx_ring) {
1162		/* Wait for all sends to complete */
1163		begin = jiffies;
1164		while ((int) p->tx_tail - (int) p->tx_head < 0) {
1165			if (time_after(jiffies, begin + 5 * HZ)) {
1166				ipoib_warn(priv, "timing out; %d sends not completed\n",
1167					   p->tx_head - p->tx_tail);
1168				goto timeout;
1169			}
1170
1171			msleep(1);
1172		}
1173	}
1174
1175timeout:
1176
1177	while ((int) p->tx_tail - (int) p->tx_head < 0) {
1178		tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
1179		ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len,
1180				    DMA_TO_DEVICE);
1181		dev_kfree_skb_any(tx_req->skb);
1182		++p->tx_tail;
1183		netif_tx_lock_bh(p->dev);
1184		if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
1185		    netif_queue_stopped(p->dev) &&
1186		    test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
1187			netif_wake_queue(p->dev);
1188		netif_tx_unlock_bh(p->dev);
1189	}
1190
1191	if (p->qp)
1192		ib_destroy_qp(p->qp);
1193
1194	vfree(p->tx_ring);
1195	kfree(p);
1196}
1197
1198static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
1199			       struct ib_cm_event *event)
1200{
1201	struct ipoib_cm_tx *tx = cm_id->context;
1202	struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
1203	struct net_device *dev = priv->dev;
1204	struct ipoib_neigh *neigh;
1205	unsigned long flags;
1206	int ret;
1207
1208	switch (event->event) {
1209	case IB_CM_DREQ_RECEIVED:
1210		ipoib_dbg(priv, "DREQ received.\n");
1211		ib_send_cm_drep(cm_id, NULL, 0);
1212		break;
1213	case IB_CM_REP_RECEIVED:
1214		ipoib_dbg(priv, "REP received.\n");
1215		ret = ipoib_cm_rep_handler(cm_id, event);
1216		if (ret)
1217			ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
1218				       NULL, 0, NULL, 0);
1219		break;
1220	case IB_CM_REQ_ERROR:
1221	case IB_CM_REJ_RECEIVED:
1222	case IB_CM_TIMEWAIT_EXIT:
1223		ipoib_dbg(priv, "CM error %d.\n", event->event);
1224		netif_tx_lock_bh(dev);
1225		spin_lock_irqsave(&priv->lock, flags);
1226		neigh = tx->neigh;
1227
1228		if (neigh) {
1229			neigh->cm = NULL;
1230			list_del(&neigh->list);
1231			if (neigh->ah)
1232				ipoib_put_ah(neigh->ah);
1233			ipoib_neigh_free(dev, neigh);
1234
1235			tx->neigh = NULL;
1236		}
1237
1238		if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1239			list_move(&tx->list, &priv->cm.reap_list);
1240			queue_work(ipoib_workqueue, &priv->cm.reap_task);
1241		}
1242
1243		spin_unlock_irqrestore(&priv->lock, flags);
1244		netif_tx_unlock_bh(dev);
1245		break;
1246	default:
1247		break;
1248	}
1249
1250	return 0;
1251}
1252
1253struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
1254				       struct ipoib_neigh *neigh)
1255{
1256	struct ipoib_dev_priv *priv = netdev_priv(dev);
1257	struct ipoib_cm_tx *tx;
1258
1259	tx = kzalloc(sizeof *tx, GFP_ATOMIC);
1260	if (!tx)
1261		return NULL;
1262
1263	neigh->cm = tx;
1264	tx->neigh = neigh;
1265	tx->path = path;
1266	tx->dev = dev;
1267	list_add(&tx->list, &priv->cm.start_list);
1268	set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
1269	queue_work(ipoib_workqueue, &priv->cm.start_task);
1270	return tx;
1271}
1272
1273void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
1274{
1275	struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
1276	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1277		list_move(&tx->list, &priv->cm.reap_list);
1278		queue_work(ipoib_workqueue, &priv->cm.reap_task);
1279		ipoib_dbg(priv, "Reap connection for gid %pI6\n",
1280			  tx->neigh->dgid.raw);
1281		tx->neigh = NULL;
1282	}
1283}
1284
1285static void ipoib_cm_tx_start(struct work_struct *work)
1286{
1287	struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1288						   cm.start_task);
1289	struct net_device *dev = priv->dev;
1290	struct ipoib_neigh *neigh;
1291	struct ipoib_cm_tx *p;
1292	unsigned long flags;
1293	int ret;
1294
1295	struct ib_sa_path_rec pathrec;
1296	u32 qpn;
1297
1298	netif_tx_lock_bh(dev);
1299	spin_lock_irqsave(&priv->lock, flags);
1300
1301	while (!list_empty(&priv->cm.start_list)) {
1302		p = list_entry(priv->cm.start_list.next, typeof(*p), list);
1303		list_del_init(&p->list);
1304		neigh = p->neigh;
1305		qpn = IPOIB_QPN(neigh->neighbour->ha);
1306		memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
1307
1308		spin_unlock_irqrestore(&priv->lock, flags);
1309		netif_tx_unlock_bh(dev);
1310
1311		ret = ipoib_cm_tx_init(p, qpn, &pathrec);
1312
1313		netif_tx_lock_bh(dev);
1314		spin_lock_irqsave(&priv->lock, flags);
1315
1316		if (ret) {
1317			neigh = p->neigh;
1318			if (neigh) {
1319				neigh->cm = NULL;
1320				list_del(&neigh->list);
1321				if (neigh->ah)
1322					ipoib_put_ah(neigh->ah);
1323				ipoib_neigh_free(dev, neigh);
1324			}
1325			list_del(&p->list);
1326			kfree(p);
1327		}
1328	}
1329
1330	spin_unlock_irqrestore(&priv->lock, flags);
1331	netif_tx_unlock_bh(dev);
1332}
1333
1334static void ipoib_cm_tx_reap(struct work_struct *work)
1335{
1336	struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1337						   cm.reap_task);
1338	struct net_device *dev = priv->dev;
1339	struct ipoib_cm_tx *p;
1340	unsigned long flags;
1341
1342	netif_tx_lock_bh(dev);
1343	spin_lock_irqsave(&priv->lock, flags);
1344
1345	while (!list_empty(&priv->cm.reap_list)) {
1346		p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
1347		list_del(&p->list);
1348		spin_unlock_irqrestore(&priv->lock, flags);
1349		netif_tx_unlock_bh(dev);
1350		ipoib_cm_tx_destroy(p);
1351		netif_tx_lock_bh(dev);
1352		spin_lock_irqsave(&priv->lock, flags);
1353	}
1354
1355	spin_unlock_irqrestore(&priv->lock, flags);
1356	netif_tx_unlock_bh(dev);
1357}
1358
1359static void ipoib_cm_skb_reap(struct work_struct *work)
1360{
1361	struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1362						   cm.skb_task);
1363	struct net_device *dev = priv->dev;
1364	struct sk_buff *skb;
1365	unsigned long flags;
1366	unsigned mtu = priv->mcast_mtu;
1367
1368	netif_tx_lock_bh(dev);
1369	spin_lock_irqsave(&priv->lock, flags);
1370
1371	while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
1372		spin_unlock_irqrestore(&priv->lock, flags);
1373		netif_tx_unlock_bh(dev);
1374
1375		if (skb->protocol == htons(ETH_P_IP))
1376			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1377#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1378		else if (skb->protocol == htons(ETH_P_IPV6))
1379			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, priv->dev);
1380#endif
1381		dev_kfree_skb_any(skb);
1382
1383		netif_tx_lock_bh(dev);
1384		spin_lock_irqsave(&priv->lock, flags);
1385	}
1386
1387	spin_unlock_irqrestore(&priv->lock, flags);
1388	netif_tx_unlock_bh(dev);
1389}
1390
1391void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
1392			   unsigned int mtu)
1393{
1394	struct ipoib_dev_priv *priv = netdev_priv(dev);
1395	int e = skb_queue_empty(&priv->cm.skb_queue);
1396
1397	if (skb->dst)
1398		skb->dst->ops->update_pmtu(skb->dst, mtu);
1399
1400	skb_queue_tail(&priv->cm.skb_queue, skb);
1401	if (e)
1402		queue_work(ipoib_workqueue, &priv->cm.skb_task);
1403}
1404
1405static void ipoib_cm_rx_reap(struct work_struct *work)
1406{
1407	ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv,
1408						cm.rx_reap_task)->dev);
1409}
1410
1411static void ipoib_cm_stale_task(struct work_struct *work)
1412{
1413	struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1414						   cm.stale_task.work);
1415	struct ipoib_cm_rx *p;
1416	int ret;
1417
1418	spin_lock_irq(&priv->lock);
1419	while (!list_empty(&priv->cm.passive_ids)) {
1420		/* List is sorted by LRU, start from tail,
1421		 * stop when we see a recently used entry */
1422		p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
1423		if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
1424			break;
1425		list_move(&p->list, &priv->cm.rx_error_list);
1426		p->state = IPOIB_CM_RX_ERROR;
1427		spin_unlock_irq(&priv->lock);
1428		ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
1429		if (ret)
1430			ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
1431		spin_lock_irq(&priv->lock);
1432	}
1433
1434	if (!list_empty(&priv->cm.passive_ids))
1435		queue_delayed_work(ipoib_workqueue,
1436				   &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
1437	spin_unlock_irq(&priv->lock);
1438}
1439
1440
1441static ssize_t show_mode(struct device *d, struct device_attribute *attr,
1442			 char *buf)
1443{
1444	struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(d));
1445
1446	if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
1447		return sprintf(buf, "connected\n");
1448	else
1449		return sprintf(buf, "datagram\n");
1450}
1451
1452static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1453			const char *buf, size_t count)
1454{
1455	struct net_device *dev = to_net_dev(d);
1456	struct ipoib_dev_priv *priv = netdev_priv(dev);
1457
1458	if (!rtnl_trylock())
1459		return restart_syscall();
1460
1461	/* flush paths if we switch modes so that connections are restarted */
1462	if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
1463		set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
1464		ipoib_warn(priv, "enabling connected mode "
1465			   "will cause multicast packet drops\n");
1466
1467		dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO);
1468		rtnl_unlock();
1469		priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
1470
1471		ipoib_flush_paths(dev);
1472		return count;
1473	}
1474
1475	if (!strcmp(buf, "datagram\n")) {
1476		clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
1477
1478		if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) {
1479			dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
1480			if (priv->hca_caps & IB_DEVICE_UD_TSO)
1481				dev->features |= NETIF_F_TSO;
1482		}
1483		dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
1484		rtnl_unlock();
1485		ipoib_flush_paths(dev);
1486
1487		return count;
1488	}
1489	rtnl_unlock();
1490
1491	return -EINVAL;
1492}
1493
1494static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
1495
1496int ipoib_cm_add_mode_attr(struct net_device *dev)
1497{
1498	return device_create_file(&dev->dev, &dev_attr_mode);
1499}
1500
1501static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
1502{
1503	struct ipoib_dev_priv *priv = netdev_priv(dev);
1504	struct ib_srq_init_attr srq_init_attr = {
1505		.attr = {
1506			.max_wr  = ipoib_recvq_size,
1507			.max_sge = max_sge
1508		}
1509	};
1510
1511	priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
1512	if (IS_ERR(priv->cm.srq)) {
1513		if (PTR_ERR(priv->cm.srq) != -ENOSYS)
1514			printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n",
1515			       priv->ca->name, PTR_ERR(priv->cm.srq));
1516		priv->cm.srq = NULL;
1517		return;
1518	}
1519
1520	priv->cm.srq_ring = vmalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
1521	if (!priv->cm.srq_ring) {
1522		printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
1523		       priv->ca->name, ipoib_recvq_size);
1524		ib_destroy_srq(priv->cm.srq);
1525		priv->cm.srq = NULL;
1526		return;
1527	}
1528
1529	memset(priv->cm.srq_ring, 0, ipoib_recvq_size * sizeof *priv->cm.srq_ring);
1530}
1531
1532int ipoib_cm_dev_init(struct net_device *dev)
1533{
1534	struct ipoib_dev_priv *priv = netdev_priv(dev);
1535	int i, ret;
1536	struct ib_device_attr attr;
1537
1538	INIT_LIST_HEAD(&priv->cm.passive_ids);
1539	INIT_LIST_HEAD(&priv->cm.reap_list);
1540	INIT_LIST_HEAD(&priv->cm.start_list);
1541	INIT_LIST_HEAD(&priv->cm.rx_error_list);
1542	INIT_LIST_HEAD(&priv->cm.rx_flush_list);
1543	INIT_LIST_HEAD(&priv->cm.rx_drain_list);
1544	INIT_LIST_HEAD(&priv->cm.rx_reap_list);
1545	INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start);
1546	INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap);
1547	INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap);
1548	INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap);
1549	INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task);
1550
1551	skb_queue_head_init(&priv->cm.skb_queue);
1552
1553	ret = ib_query_device(priv->ca, &attr);
1554	if (ret) {
1555		printk(KERN_WARNING "ib_query_device() failed with %d\n", ret);
1556		return ret;
1557	}
1558
1559	ipoib_dbg(priv, "max_srq_sge=%d\n", attr.max_srq_sge);
1560
1561	attr.max_srq_sge = min_t(int, IPOIB_CM_RX_SG, attr.max_srq_sge);
1562	ipoib_cm_create_srq(dev, attr.max_srq_sge);
1563	if (ipoib_cm_has_srq(dev)) {
1564		priv->cm.max_cm_mtu = attr.max_srq_sge * PAGE_SIZE - 0x10;
1565		priv->cm.num_frags  = attr.max_srq_sge;
1566		ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n",
1567			  priv->cm.max_cm_mtu, priv->cm.num_frags);
1568	} else {
1569		priv->cm.max_cm_mtu = IPOIB_CM_MTU;
1570		priv->cm.num_frags  = IPOIB_CM_RX_SG;
1571	}
1572
1573	ipoib_cm_init_rx_wr(dev, &priv->cm.rx_wr, priv->cm.rx_sge);
1574
1575	if (ipoib_cm_has_srq(dev)) {
1576		for (i = 0; i < ipoib_recvq_size; ++i) {
1577			if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
1578						   priv->cm.num_frags - 1,
1579						   priv->cm.srq_ring[i].mapping)) {
1580				ipoib_warn(priv, "failed to allocate "
1581					   "receive buffer %d\n", i);
1582				ipoib_cm_dev_cleanup(dev);
1583				return -ENOMEM;
1584			}
1585
1586			if (ipoib_cm_post_receive_srq(dev, i)) {
1587				ipoib_warn(priv, "ipoib_cm_post_receive_srq "
1588					   "failed for buf %d\n", i);
1589				ipoib_cm_dev_cleanup(dev);
1590				return -EIO;
1591			}
1592		}
1593	}
1594
1595	priv->dev->dev_addr[0] = IPOIB_FLAGS_RC;
1596	return 0;
1597}
1598
1599void ipoib_cm_dev_cleanup(struct net_device *dev)
1600{
1601	struct ipoib_dev_priv *priv = netdev_priv(dev);
1602	int ret;
1603
1604	if (!priv->cm.srq)
1605		return;
1606
1607	ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
1608
1609	ret = ib_destroy_srq(priv->cm.srq);
1610	if (ret)
1611		ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret);
1612
1613	priv->cm.srq = NULL;
1614	if (!priv->cm.srq_ring)
1615		return;
1616
1617	ipoib_cm_free_rx_ring(dev, priv->cm.srq_ring);
1618	priv->cm.srq_ring = NULL;
1619}
1620