cm.c revision 227eca83690da7dcbd698d3268e29402e0571723
1/*
2 * Copyright (c) 2004, 2005 Intel Corporation.  All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation.  All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses.  You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 *     Redistribution and use in source and binary forms, with or
14 *     without modification, are permitted provided that the following
15 *     conditions are met:
16 *
17 *      - Redistributions of source code must retain the above
18 *        copyright notice, this list of conditions and the following
19 *        disclaimer.
20 *
21 *      - Redistributions in binary form must reproduce the above
22 *        copyright notice, this list of conditions and the following
23 *        disclaimer in the documentation and/or other materials
24 *        provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 *
35 * $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $
36 */
37#include <linux/dma-mapping.h>
38#include <linux/err.h>
39#include <linux/idr.h>
40#include <linux/interrupt.h>
41#include <linux/pci.h>
42#include <linux/rbtree.h>
43#include <linux/spinlock.h>
44#include <linux/workqueue.h>
45
46#include <rdma/ib_cache.h>
47#include <rdma/ib_cm.h>
48#include "cm_msgs.h"
49
50MODULE_AUTHOR("Sean Hefty");
51MODULE_DESCRIPTION("InfiniBand CM");
52MODULE_LICENSE("Dual BSD/GPL");
53
54static void cm_add_one(struct ib_device *device);
55static void cm_remove_one(struct ib_device *device);
56
57static struct ib_client cm_client = {
58	.name   = "cm",
59	.add    = cm_add_one,
60	.remove = cm_remove_one
61};
62
63static struct ib_cm {
64	spinlock_t lock;
65	struct list_head device_list;
66	rwlock_t device_lock;
67	struct rb_root listen_service_table;
68	u64 listen_service_id;
69	/* struct rb_root peer_service_table; todo: fix peer to peer */
70	struct rb_root remote_qp_table;
71	struct rb_root remote_id_table;
72	struct rb_root remote_sidr_table;
73	struct idr local_id_table;
74	struct workqueue_struct *wq;
75} cm;
76
77struct cm_port {
78	struct cm_device *cm_dev;
79	struct ib_mad_agent *mad_agent;
80	u8 port_num;
81};
82
83struct cm_device {
84	struct list_head list;
85	struct ib_device *device;
86	__be64 ca_guid;
87	struct cm_port port[0];
88};
89
90struct cm_av {
91	struct cm_port *port;
92	union ib_gid dgid;
93	struct ib_ah_attr ah_attr;
94	u16 pkey_index;
95	u8 packet_life_time;
96};
97
98struct cm_work {
99	struct work_struct work;
100	struct list_head list;
101	struct cm_port *port;
102	struct ib_mad_recv_wc *mad_recv_wc;	/* Received MADs */
103	__be32 local_id;			/* Established / timewait */
104	__be32 remote_id;
105	struct ib_cm_event cm_event;
106	struct ib_sa_path_rec path[0];
107};
108
109struct cm_timewait_info {
110	struct cm_work work;			/* Must be first. */
111	struct rb_node remote_qp_node;
112	struct rb_node remote_id_node;
113	__be64 remote_ca_guid;
114	__be32 remote_qpn;
115	u8 inserted_remote_qp;
116	u8 inserted_remote_id;
117};
118
119struct cm_id_private {
120	struct ib_cm_id	id;
121
122	struct rb_node service_node;
123	struct rb_node sidr_id_node;
124	spinlock_t lock;
125	wait_queue_head_t wait;
126	atomic_t refcount;
127
128	struct ib_mad_send_buf *msg;
129	struct cm_timewait_info *timewait_info;
130	/* todo: use alternate port on send failure */
131	struct cm_av av;
132	struct cm_av alt_av;
133
134	void *private_data;
135	__be64 tid;
136	__be32 local_qpn;
137	__be32 remote_qpn;
138	enum ib_qp_type qp_type;
139	__be32 sq_psn;
140	__be32 rq_psn;
141	int timeout_ms;
142	enum ib_mtu path_mtu;
143	u8 private_data_len;
144	u8 max_cm_retries;
145	u8 peer_to_peer;
146	u8 responder_resources;
147	u8 initiator_depth;
148	u8 local_ack_timeout;
149	u8 retry_count;
150	u8 rnr_retry_count;
151	u8 service_timeout;
152
153	struct list_head work_list;
154	atomic_t work_count;
155};
156
157static void cm_work_handler(void *data);
158
159static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
160{
161	if (atomic_dec_and_test(&cm_id_priv->refcount))
162		wake_up(&cm_id_priv->wait);
163}
164
165static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
166			struct ib_mad_send_buf **msg)
167{
168	struct ib_mad_agent *mad_agent;
169	struct ib_mad_send_buf *m;
170	struct ib_ah *ah;
171
172	mad_agent = cm_id_priv->av.port->mad_agent;
173	ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
174	if (IS_ERR(ah))
175		return PTR_ERR(ah);
176
177	m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
178			       cm_id_priv->av.pkey_index,
179			       0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
180			       GFP_ATOMIC);
181	if (IS_ERR(m)) {
182		ib_destroy_ah(ah);
183		return PTR_ERR(m);
184	}
185
186	/* Timeout set by caller if response is expected. */
187	m->ah = ah;
188	m->retries = cm_id_priv->max_cm_retries;
189
190	atomic_inc(&cm_id_priv->refcount);
191	m->context[0] = cm_id_priv;
192	*msg = m;
193	return 0;
194}
195
196static int cm_alloc_response_msg(struct cm_port *port,
197				 struct ib_mad_recv_wc *mad_recv_wc,
198				 struct ib_mad_send_buf **msg)
199{
200	struct ib_mad_send_buf *m;
201	struct ib_ah *ah;
202
203	ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
204				  mad_recv_wc->recv_buf.grh, port->port_num);
205	if (IS_ERR(ah))
206		return PTR_ERR(ah);
207
208	m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
209			       0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
210			       GFP_ATOMIC);
211	if (IS_ERR(m)) {
212		ib_destroy_ah(ah);
213		return PTR_ERR(m);
214	}
215	m->ah = ah;
216	*msg = m;
217	return 0;
218}
219
220static void cm_free_msg(struct ib_mad_send_buf *msg)
221{
222	ib_destroy_ah(msg->ah);
223	if (msg->context[0])
224		cm_deref_id(msg->context[0]);
225	ib_free_send_mad(msg);
226}
227
228static void * cm_copy_private_data(const void *private_data,
229				   u8 private_data_len)
230{
231	void *data;
232
233	if (!private_data || !private_data_len)
234		return NULL;
235
236	data = kmalloc(private_data_len, GFP_KERNEL);
237	if (!data)
238		return ERR_PTR(-ENOMEM);
239
240	memcpy(data, private_data, private_data_len);
241	return data;
242}
243
244static void cm_set_private_data(struct cm_id_private *cm_id_priv,
245				 void *private_data, u8 private_data_len)
246{
247	if (cm_id_priv->private_data && cm_id_priv->private_data_len)
248		kfree(cm_id_priv->private_data);
249
250	cm_id_priv->private_data = private_data;
251	cm_id_priv->private_data_len = private_data_len;
252}
253
254static void cm_set_ah_attr(struct ib_ah_attr *ah_attr, u8 port_num,
255			   u16 dlid, u8 sl, u16 src_path_bits)
256{
257	memset(ah_attr, 0, sizeof ah_attr);
258	ah_attr->dlid = dlid;
259	ah_attr->sl = sl;
260	ah_attr->src_path_bits = src_path_bits;
261	ah_attr->port_num = port_num;
262}
263
264static void cm_init_av_for_response(struct cm_port *port,
265				    struct ib_wc *wc, struct cm_av *av)
266{
267	av->port = port;
268	av->pkey_index = wc->pkey_index;
269	cm_set_ah_attr(&av->ah_attr, port->port_num, wc->slid,
270		       wc->sl, wc->dlid_path_bits);
271}
272
273static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
274{
275	struct cm_device *cm_dev;
276	struct cm_port *port = NULL;
277	unsigned long flags;
278	int ret;
279	u8 p;
280
281	read_lock_irqsave(&cm.device_lock, flags);
282	list_for_each_entry(cm_dev, &cm.device_list, list) {
283		if (!ib_find_cached_gid(cm_dev->device, &path->sgid,
284					&p, NULL)) {
285			port = &cm_dev->port[p-1];
286			break;
287		}
288	}
289	read_unlock_irqrestore(&cm.device_lock, flags);
290
291	if (!port)
292		return -EINVAL;
293
294	ret = ib_find_cached_pkey(cm_dev->device, port->port_num,
295				  be16_to_cpu(path->pkey), &av->pkey_index);
296	if (ret)
297		return ret;
298
299	av->port = port;
300	cm_set_ah_attr(&av->ah_attr, av->port->port_num,
301		       be16_to_cpu(path->dlid), path->sl,
302		       be16_to_cpu(path->slid) & 0x7F);
303	av->packet_life_time = path->packet_life_time;
304	return 0;
305}
306
307static int cm_alloc_id(struct cm_id_private *cm_id_priv)
308{
309	unsigned long flags;
310	int ret;
311
312	do {
313		spin_lock_irqsave(&cm.lock, flags);
314		ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 1,
315					(__force int *) &cm_id_priv->id.local_id);
316		spin_unlock_irqrestore(&cm.lock, flags);
317	} while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
318	return ret;
319}
320
321static void cm_free_id(__be32 local_id)
322{
323	unsigned long flags;
324
325	spin_lock_irqsave(&cm.lock, flags);
326	idr_remove(&cm.local_id_table, (__force int) local_id);
327	spin_unlock_irqrestore(&cm.lock, flags);
328}
329
330static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
331{
332	struct cm_id_private *cm_id_priv;
333
334	cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id);
335	if (cm_id_priv) {
336		if (cm_id_priv->id.remote_id == remote_id)
337			atomic_inc(&cm_id_priv->refcount);
338		else
339			cm_id_priv = NULL;
340	}
341
342	return cm_id_priv;
343}
344
345static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
346{
347	struct cm_id_private *cm_id_priv;
348	unsigned long flags;
349
350	spin_lock_irqsave(&cm.lock, flags);
351	cm_id_priv = cm_get_id(local_id, remote_id);
352	spin_unlock_irqrestore(&cm.lock, flags);
353
354	return cm_id_priv;
355}
356
357static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
358{
359	struct rb_node **link = &cm.listen_service_table.rb_node;
360	struct rb_node *parent = NULL;
361	struct cm_id_private *cur_cm_id_priv;
362	__be64 service_id = cm_id_priv->id.service_id;
363	__be64 service_mask = cm_id_priv->id.service_mask;
364
365	while (*link) {
366		parent = *link;
367		cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
368					  service_node);
369		if ((cur_cm_id_priv->id.service_mask & service_id) ==
370		    (service_mask & cur_cm_id_priv->id.service_id) &&
371		    (cm_id_priv->id.device == cur_cm_id_priv->id.device))
372			return cur_cm_id_priv;
373
374		if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
375			link = &(*link)->rb_left;
376		else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
377			link = &(*link)->rb_right;
378		else if (service_id < cur_cm_id_priv->id.service_id)
379			link = &(*link)->rb_left;
380		else
381			link = &(*link)->rb_right;
382	}
383	rb_link_node(&cm_id_priv->service_node, parent, link);
384	rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
385	return NULL;
386}
387
388static struct cm_id_private * cm_find_listen(struct ib_device *device,
389					     __be64 service_id)
390{
391	struct rb_node *node = cm.listen_service_table.rb_node;
392	struct cm_id_private *cm_id_priv;
393
394	while (node) {
395		cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
396		if ((cm_id_priv->id.service_mask & service_id) ==
397		     cm_id_priv->id.service_id &&
398		    (cm_id_priv->id.device == device))
399			return cm_id_priv;
400
401		if (device < cm_id_priv->id.device)
402			node = node->rb_left;
403		else if (device > cm_id_priv->id.device)
404			node = node->rb_right;
405		else if (service_id < cm_id_priv->id.service_id)
406			node = node->rb_left;
407		else
408			node = node->rb_right;
409	}
410	return NULL;
411}
412
413static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
414						     *timewait_info)
415{
416	struct rb_node **link = &cm.remote_id_table.rb_node;
417	struct rb_node *parent = NULL;
418	struct cm_timewait_info *cur_timewait_info;
419	__be64 remote_ca_guid = timewait_info->remote_ca_guid;
420	__be32 remote_id = timewait_info->work.remote_id;
421
422	while (*link) {
423		parent = *link;
424		cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
425					     remote_id_node);
426		if (remote_id < cur_timewait_info->work.remote_id)
427			link = &(*link)->rb_left;
428		else if (remote_id > cur_timewait_info->work.remote_id)
429			link = &(*link)->rb_right;
430		else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
431			link = &(*link)->rb_left;
432		else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
433			link = &(*link)->rb_right;
434		else
435			return cur_timewait_info;
436	}
437	timewait_info->inserted_remote_id = 1;
438	rb_link_node(&timewait_info->remote_id_node, parent, link);
439	rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
440	return NULL;
441}
442
443static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
444						   __be32 remote_id)
445{
446	struct rb_node *node = cm.remote_id_table.rb_node;
447	struct cm_timewait_info *timewait_info;
448
449	while (node) {
450		timewait_info = rb_entry(node, struct cm_timewait_info,
451					 remote_id_node);
452		if (remote_id < timewait_info->work.remote_id)
453			node = node->rb_left;
454		else if (remote_id > timewait_info->work.remote_id)
455			node = node->rb_right;
456		else if (remote_ca_guid < timewait_info->remote_ca_guid)
457			node = node->rb_left;
458		else if (remote_ca_guid > timewait_info->remote_ca_guid)
459			node = node->rb_right;
460		else
461			return timewait_info;
462	}
463	return NULL;
464}
465
466static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
467						      *timewait_info)
468{
469	struct rb_node **link = &cm.remote_qp_table.rb_node;
470	struct rb_node *parent = NULL;
471	struct cm_timewait_info *cur_timewait_info;
472	__be64 remote_ca_guid = timewait_info->remote_ca_guid;
473	__be32 remote_qpn = timewait_info->remote_qpn;
474
475	while (*link) {
476		parent = *link;
477		cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
478					     remote_qp_node);
479		if (remote_qpn < cur_timewait_info->remote_qpn)
480			link = &(*link)->rb_left;
481		else if (remote_qpn > cur_timewait_info->remote_qpn)
482			link = &(*link)->rb_right;
483		else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
484			link = &(*link)->rb_left;
485		else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
486			link = &(*link)->rb_right;
487		else
488			return cur_timewait_info;
489	}
490	timewait_info->inserted_remote_qp = 1;
491	rb_link_node(&timewait_info->remote_qp_node, parent, link);
492	rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
493	return NULL;
494}
495
496static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
497						    *cm_id_priv)
498{
499	struct rb_node **link = &cm.remote_sidr_table.rb_node;
500	struct rb_node *parent = NULL;
501	struct cm_id_private *cur_cm_id_priv;
502	union ib_gid *port_gid = &cm_id_priv->av.dgid;
503	__be32 remote_id = cm_id_priv->id.remote_id;
504
505	while (*link) {
506		parent = *link;
507		cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
508					  sidr_id_node);
509		if (remote_id < cur_cm_id_priv->id.remote_id)
510			link = &(*link)->rb_left;
511		else if (remote_id > cur_cm_id_priv->id.remote_id)
512			link = &(*link)->rb_right;
513		else {
514			int cmp;
515			cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
516				     sizeof *port_gid);
517			if (cmp < 0)
518				link = &(*link)->rb_left;
519			else if (cmp > 0)
520				link = &(*link)->rb_right;
521			else
522				return cur_cm_id_priv;
523		}
524	}
525	rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
526	rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
527	return NULL;
528}
529
530static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
531			       enum ib_cm_sidr_status status)
532{
533	struct ib_cm_sidr_rep_param param;
534
535	memset(&param, 0, sizeof param);
536	param.status = status;
537	ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
538}
539
540struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
541				 ib_cm_handler cm_handler,
542				 void *context)
543{
544	struct cm_id_private *cm_id_priv;
545	int ret;
546
547	cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
548	if (!cm_id_priv)
549		return ERR_PTR(-ENOMEM);
550
551	cm_id_priv->id.state = IB_CM_IDLE;
552	cm_id_priv->id.device = device;
553	cm_id_priv->id.cm_handler = cm_handler;
554	cm_id_priv->id.context = context;
555	cm_id_priv->id.remote_cm_qpn = 1;
556	ret = cm_alloc_id(cm_id_priv);
557	if (ret)
558		goto error;
559
560	spin_lock_init(&cm_id_priv->lock);
561	init_waitqueue_head(&cm_id_priv->wait);
562	INIT_LIST_HEAD(&cm_id_priv->work_list);
563	atomic_set(&cm_id_priv->work_count, -1);
564	atomic_set(&cm_id_priv->refcount, 1);
565	return &cm_id_priv->id;
566
567error:
568	kfree(cm_id_priv);
569	return ERR_PTR(-ENOMEM);
570}
571EXPORT_SYMBOL(ib_create_cm_id);
572
573static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
574{
575	struct cm_work *work;
576
577	if (list_empty(&cm_id_priv->work_list))
578		return NULL;
579
580	work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
581	list_del(&work->list);
582	return work;
583}
584
585static void cm_free_work(struct cm_work *work)
586{
587	if (work->mad_recv_wc)
588		ib_free_recv_mad(work->mad_recv_wc);
589	kfree(work);
590}
591
592static inline int cm_convert_to_ms(int iba_time)
593{
594	/* approximate conversion to ms from 4.096us x 2^iba_time */
595	return 1 << max(iba_time - 8, 0);
596}
597
598static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
599{
600	unsigned long flags;
601
602	if (!timewait_info->inserted_remote_id &&
603	    !timewait_info->inserted_remote_qp)
604	    return;
605
606	spin_lock_irqsave(&cm.lock, flags);
607	if (timewait_info->inserted_remote_id) {
608		rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
609		timewait_info->inserted_remote_id = 0;
610	}
611
612	if (timewait_info->inserted_remote_qp) {
613		rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
614		timewait_info->inserted_remote_qp = 0;
615	}
616	spin_unlock_irqrestore(&cm.lock, flags);
617}
618
619static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
620{
621	struct cm_timewait_info *timewait_info;
622
623	timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
624	if (!timewait_info)
625		return ERR_PTR(-ENOMEM);
626
627	timewait_info->work.local_id = local_id;
628	INIT_WORK(&timewait_info->work.work, cm_work_handler,
629		  &timewait_info->work);
630	timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
631	return timewait_info;
632}
633
634static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
635{
636	int wait_time;
637
638	/*
639	 * The cm_id could be destroyed by the user before we exit timewait.
640	 * To protect against this, we search for the cm_id after exiting
641	 * timewait before notifying the user that we've exited timewait.
642	 */
643	cm_id_priv->id.state = IB_CM_TIMEWAIT;
644	wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout);
645	queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
646			   msecs_to_jiffies(wait_time));
647	cm_id_priv->timewait_info = NULL;
648}
649
650static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
651{
652	cm_id_priv->id.state = IB_CM_IDLE;
653	if (cm_id_priv->timewait_info) {
654		cm_cleanup_timewait(cm_id_priv->timewait_info);
655		kfree(cm_id_priv->timewait_info);
656		cm_id_priv->timewait_info = NULL;
657	}
658}
659
660void ib_destroy_cm_id(struct ib_cm_id *cm_id)
661{
662	struct cm_id_private *cm_id_priv;
663	struct cm_work *work;
664	unsigned long flags;
665
666	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
667retest:
668	spin_lock_irqsave(&cm_id_priv->lock, flags);
669	switch (cm_id->state) {
670	case IB_CM_LISTEN:
671		cm_id->state = IB_CM_IDLE;
672		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
673		spin_lock_irqsave(&cm.lock, flags);
674		rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
675		spin_unlock_irqrestore(&cm.lock, flags);
676		break;
677	case IB_CM_SIDR_REQ_SENT:
678		cm_id->state = IB_CM_IDLE;
679		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
680		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
681		break;
682	case IB_CM_SIDR_REQ_RCVD:
683		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
684		cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
685		break;
686	case IB_CM_REQ_SENT:
687		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
688		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
689		ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
690			       &cm_id_priv->av.port->cm_dev->ca_guid,
691			       sizeof cm_id_priv->av.port->cm_dev->ca_guid,
692			       NULL, 0);
693		break;
694	case IB_CM_MRA_REQ_RCVD:
695	case IB_CM_REP_SENT:
696	case IB_CM_MRA_REP_RCVD:
697		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
698		/* Fall through */
699	case IB_CM_REQ_RCVD:
700	case IB_CM_MRA_REQ_SENT:
701	case IB_CM_REP_RCVD:
702	case IB_CM_MRA_REP_SENT:
703		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
704		ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
705			       NULL, 0, NULL, 0);
706		break;
707	case IB_CM_ESTABLISHED:
708		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
709		ib_send_cm_dreq(cm_id, NULL, 0);
710		goto retest;
711	case IB_CM_DREQ_SENT:
712		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
713		cm_enter_timewait(cm_id_priv);
714		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
715		break;
716	case IB_CM_DREQ_RCVD:
717		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
718		ib_send_cm_drep(cm_id, NULL, 0);
719		break;
720	default:
721		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
722		break;
723	}
724
725	cm_free_id(cm_id->local_id);
726	atomic_dec(&cm_id_priv->refcount);
727	wait_event(cm_id_priv->wait, !atomic_read(&cm_id_priv->refcount));
728	while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
729		cm_free_work(work);
730	if (cm_id_priv->private_data && cm_id_priv->private_data_len)
731		kfree(cm_id_priv->private_data);
732	kfree(cm_id_priv);
733}
734EXPORT_SYMBOL(ib_destroy_cm_id);
735
736int ib_cm_listen(struct ib_cm_id *cm_id,
737		 __be64 service_id,
738		 __be64 service_mask)
739{
740	struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
741	unsigned long flags;
742	int ret = 0;
743
744	service_mask = service_mask ? service_mask :
745		       __constant_cpu_to_be64(~0ULL);
746	service_id &= service_mask;
747	if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
748	    (service_id != IB_CM_ASSIGN_SERVICE_ID))
749		return -EINVAL;
750
751	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
752	BUG_ON(cm_id->state != IB_CM_IDLE);
753
754	cm_id->state = IB_CM_LISTEN;
755
756	spin_lock_irqsave(&cm.lock, flags);
757	if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
758		cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
759		cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
760	} else {
761		cm_id->service_id = service_id;
762		cm_id->service_mask = service_mask;
763	}
764	cur_cm_id_priv = cm_insert_listen(cm_id_priv);
765	spin_unlock_irqrestore(&cm.lock, flags);
766
767	if (cur_cm_id_priv) {
768		cm_id->state = IB_CM_IDLE;
769		ret = -EBUSY;
770	}
771	return ret;
772}
773EXPORT_SYMBOL(ib_cm_listen);
774
775static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
776			  enum cm_msg_sequence msg_seq)
777{
778	u64 hi_tid, low_tid;
779
780	hi_tid   = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
781	low_tid  = (u64) ((__force u32)cm_id_priv->id.local_id |
782			  (msg_seq << 30));
783	return cpu_to_be64(hi_tid | low_tid);
784}
785
786static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
787			      __be16 attr_id, __be64 tid)
788{
789	hdr->base_version  = IB_MGMT_BASE_VERSION;
790	hdr->mgmt_class	   = IB_MGMT_CLASS_CM;
791	hdr->class_version = IB_CM_CLASS_VERSION;
792	hdr->method	   = IB_MGMT_METHOD_SEND;
793	hdr->attr_id	   = attr_id;
794	hdr->tid	   = tid;
795}
796
797static void cm_format_req(struct cm_req_msg *req_msg,
798			  struct cm_id_private *cm_id_priv,
799			  struct ib_cm_req_param *param)
800{
801	cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
802			  cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
803
804	req_msg->local_comm_id = cm_id_priv->id.local_id;
805	req_msg->service_id = param->service_id;
806	req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
807	cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
808	cm_req_set_resp_res(req_msg, param->responder_resources);
809	cm_req_set_init_depth(req_msg, param->initiator_depth);
810	cm_req_set_remote_resp_timeout(req_msg,
811				       param->remote_cm_response_timeout);
812	cm_req_set_qp_type(req_msg, param->qp_type);
813	cm_req_set_flow_ctrl(req_msg, param->flow_control);
814	cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
815	cm_req_set_local_resp_timeout(req_msg,
816				      param->local_cm_response_timeout);
817	cm_req_set_retry_count(req_msg, param->retry_count);
818	req_msg->pkey = param->primary_path->pkey;
819	cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
820	cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
821	cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
822	cm_req_set_srq(req_msg, param->srq);
823
824	req_msg->primary_local_lid = param->primary_path->slid;
825	req_msg->primary_remote_lid = param->primary_path->dlid;
826	req_msg->primary_local_gid = param->primary_path->sgid;
827	req_msg->primary_remote_gid = param->primary_path->dgid;
828	cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label);
829	cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate);
830	req_msg->primary_traffic_class = param->primary_path->traffic_class;
831	req_msg->primary_hop_limit = param->primary_path->hop_limit;
832	cm_req_set_primary_sl(req_msg, param->primary_path->sl);
833	cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */
834	cm_req_set_primary_local_ack_timeout(req_msg,
835		min(31, param->primary_path->packet_life_time + 1));
836
837	if (param->alternate_path) {
838		req_msg->alt_local_lid = param->alternate_path->slid;
839		req_msg->alt_remote_lid = param->alternate_path->dlid;
840		req_msg->alt_local_gid = param->alternate_path->sgid;
841		req_msg->alt_remote_gid = param->alternate_path->dgid;
842		cm_req_set_alt_flow_label(req_msg,
843					  param->alternate_path->flow_label);
844		cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate);
845		req_msg->alt_traffic_class = param->alternate_path->traffic_class;
846		req_msg->alt_hop_limit = param->alternate_path->hop_limit;
847		cm_req_set_alt_sl(req_msg, param->alternate_path->sl);
848		cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */
849		cm_req_set_alt_local_ack_timeout(req_msg,
850			min(31, param->alternate_path->packet_life_time + 1));
851	}
852
853	if (param->private_data && param->private_data_len)
854		memcpy(req_msg->private_data, param->private_data,
855		       param->private_data_len);
856}
857
858static inline int cm_validate_req_param(struct ib_cm_req_param *param)
859{
860	/* peer-to-peer not supported */
861	if (param->peer_to_peer)
862		return -EINVAL;
863
864	if (!param->primary_path)
865		return -EINVAL;
866
867	if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC)
868		return -EINVAL;
869
870	if (param->private_data &&
871	    param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
872		return -EINVAL;
873
874	if (param->alternate_path &&
875	    (param->alternate_path->pkey != param->primary_path->pkey ||
876	     param->alternate_path->mtu != param->primary_path->mtu))
877		return -EINVAL;
878
879	return 0;
880}
881
882int ib_send_cm_req(struct ib_cm_id *cm_id,
883		   struct ib_cm_req_param *param)
884{
885	struct cm_id_private *cm_id_priv;
886	struct cm_req_msg *req_msg;
887	unsigned long flags;
888	int ret;
889
890	ret = cm_validate_req_param(param);
891	if (ret)
892		return ret;
893
894	/* Verify that we're not in timewait. */
895	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
896	spin_lock_irqsave(&cm_id_priv->lock, flags);
897	if (cm_id->state != IB_CM_IDLE) {
898		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
899		ret = -EINVAL;
900		goto out;
901	}
902	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
903
904	cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
905							    id.local_id);
906	if (IS_ERR(cm_id_priv->timewait_info))
907		goto out;
908
909	ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
910	if (ret)
911		goto error1;
912	if (param->alternate_path) {
913		ret = cm_init_av_by_path(param->alternate_path,
914					 &cm_id_priv->alt_av);
915		if (ret)
916			goto error1;
917	}
918	cm_id->service_id = param->service_id;
919	cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
920	cm_id_priv->timeout_ms = cm_convert_to_ms(
921				    param->primary_path->packet_life_time) * 2 +
922				 cm_convert_to_ms(
923				    param->remote_cm_response_timeout);
924	cm_id_priv->max_cm_retries = param->max_cm_retries;
925	cm_id_priv->initiator_depth = param->initiator_depth;
926	cm_id_priv->responder_resources = param->responder_resources;
927	cm_id_priv->retry_count = param->retry_count;
928	cm_id_priv->path_mtu = param->primary_path->mtu;
929	cm_id_priv->qp_type = param->qp_type;
930
931	ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
932	if (ret)
933		goto error1;
934
935	req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
936	cm_format_req(req_msg, cm_id_priv, param);
937	cm_id_priv->tid = req_msg->hdr.tid;
938	cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
939	cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
940
941	cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
942	cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
943	cm_id_priv->local_ack_timeout =
944				cm_req_get_primary_local_ack_timeout(req_msg);
945
946	spin_lock_irqsave(&cm_id_priv->lock, flags);
947	ret = ib_post_send_mad(cm_id_priv->msg, NULL);
948	if (ret) {
949		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
950		goto error2;
951	}
952	BUG_ON(cm_id->state != IB_CM_IDLE);
953	cm_id->state = IB_CM_REQ_SENT;
954	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
955	return 0;
956
957error2:	cm_free_msg(cm_id_priv->msg);
958error1:	kfree(cm_id_priv->timewait_info);
959out:	return ret;
960}
961EXPORT_SYMBOL(ib_send_cm_req);
962
963static int cm_issue_rej(struct cm_port *port,
964			struct ib_mad_recv_wc *mad_recv_wc,
965			enum ib_cm_rej_reason reason,
966			enum cm_msg_response msg_rejected,
967			void *ari, u8 ari_length)
968{
969	struct ib_mad_send_buf *msg = NULL;
970	struct cm_rej_msg *rej_msg, *rcv_msg;
971	int ret;
972
973	ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
974	if (ret)
975		return ret;
976
977	/* We just need common CM header information.  Cast to any message. */
978	rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
979	rej_msg = (struct cm_rej_msg *) msg->mad;
980
981	cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
982	rej_msg->remote_comm_id = rcv_msg->local_comm_id;
983	rej_msg->local_comm_id = rcv_msg->remote_comm_id;
984	cm_rej_set_msg_rejected(rej_msg, msg_rejected);
985	rej_msg->reason = cpu_to_be16(reason);
986
987	if (ari && ari_length) {
988		cm_rej_set_reject_info_len(rej_msg, ari_length);
989		memcpy(rej_msg->ari, ari, ari_length);
990	}
991
992	ret = ib_post_send_mad(msg, NULL);
993	if (ret)
994		cm_free_msg(msg);
995
996	return ret;
997}
998
999static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
1000				    __be32 local_qpn, __be32 remote_qpn)
1001{
1002	return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
1003		((local_ca_guid == remote_ca_guid) &&
1004		 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
1005}
1006
1007static inline void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1008					    struct ib_sa_path_rec *primary_path,
1009					    struct ib_sa_path_rec *alt_path)
1010{
1011	memset(primary_path, 0, sizeof *primary_path);
1012	primary_path->dgid = req_msg->primary_local_gid;
1013	primary_path->sgid = req_msg->primary_remote_gid;
1014	primary_path->dlid = req_msg->primary_local_lid;
1015	primary_path->slid = req_msg->primary_remote_lid;
1016	primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1017	primary_path->hop_limit = req_msg->primary_hop_limit;
1018	primary_path->traffic_class = req_msg->primary_traffic_class;
1019	primary_path->reversible = 1;
1020	primary_path->pkey = req_msg->pkey;
1021	primary_path->sl = cm_req_get_primary_sl(req_msg);
1022	primary_path->mtu_selector = IB_SA_EQ;
1023	primary_path->mtu = cm_req_get_path_mtu(req_msg);
1024	primary_path->rate_selector = IB_SA_EQ;
1025	primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1026	primary_path->packet_life_time_selector = IB_SA_EQ;
1027	primary_path->packet_life_time =
1028		cm_req_get_primary_local_ack_timeout(req_msg);
1029	primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1030
1031	if (req_msg->alt_local_lid) {
1032		memset(alt_path, 0, sizeof *alt_path);
1033		alt_path->dgid = req_msg->alt_local_gid;
1034		alt_path->sgid = req_msg->alt_remote_gid;
1035		alt_path->dlid = req_msg->alt_local_lid;
1036		alt_path->slid = req_msg->alt_remote_lid;
1037		alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1038		alt_path->hop_limit = req_msg->alt_hop_limit;
1039		alt_path->traffic_class = req_msg->alt_traffic_class;
1040		alt_path->reversible = 1;
1041		alt_path->pkey = req_msg->pkey;
1042		alt_path->sl = cm_req_get_alt_sl(req_msg);
1043		alt_path->mtu_selector = IB_SA_EQ;
1044		alt_path->mtu = cm_req_get_path_mtu(req_msg);
1045		alt_path->rate_selector = IB_SA_EQ;
1046		alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1047		alt_path->packet_life_time_selector = IB_SA_EQ;
1048		alt_path->packet_life_time =
1049			cm_req_get_alt_local_ack_timeout(req_msg);
1050		alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1051	}
1052}
1053
1054static void cm_format_req_event(struct cm_work *work,
1055				struct cm_id_private *cm_id_priv,
1056				struct ib_cm_id *listen_id)
1057{
1058	struct cm_req_msg *req_msg;
1059	struct ib_cm_req_event_param *param;
1060
1061	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1062	param = &work->cm_event.param.req_rcvd;
1063	param->listen_id = listen_id;
1064	param->port = cm_id_priv->av.port->port_num;
1065	param->primary_path = &work->path[0];
1066	if (req_msg->alt_local_lid)
1067		param->alternate_path = &work->path[1];
1068	else
1069		param->alternate_path = NULL;
1070	param->remote_ca_guid = req_msg->local_ca_guid;
1071	param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1072	param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1073	param->qp_type = cm_req_get_qp_type(req_msg);
1074	param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1075	param->responder_resources = cm_req_get_init_depth(req_msg);
1076	param->initiator_depth = cm_req_get_resp_res(req_msg);
1077	param->local_cm_response_timeout =
1078					cm_req_get_remote_resp_timeout(req_msg);
1079	param->flow_control = cm_req_get_flow_ctrl(req_msg);
1080	param->remote_cm_response_timeout =
1081					cm_req_get_local_resp_timeout(req_msg);
1082	param->retry_count = cm_req_get_retry_count(req_msg);
1083	param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1084	param->srq = cm_req_get_srq(req_msg);
1085	work->cm_event.private_data = &req_msg->private_data;
1086}
1087
1088static void cm_process_work(struct cm_id_private *cm_id_priv,
1089			    struct cm_work *work)
1090{
1091	unsigned long flags;
1092	int ret;
1093
1094	/* We will typically only have the current event to report. */
1095	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1096	cm_free_work(work);
1097
1098	while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1099		spin_lock_irqsave(&cm_id_priv->lock, flags);
1100		work = cm_dequeue_work(cm_id_priv);
1101		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1102		BUG_ON(!work);
1103		ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1104						&work->cm_event);
1105		cm_free_work(work);
1106	}
1107	cm_deref_id(cm_id_priv);
1108	if (ret)
1109		ib_destroy_cm_id(&cm_id_priv->id);
1110}
1111
1112static void cm_format_mra(struct cm_mra_msg *mra_msg,
1113			  struct cm_id_private *cm_id_priv,
1114			  enum cm_msg_response msg_mraed, u8 service_timeout,
1115			  const void *private_data, u8 private_data_len)
1116{
1117	cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1118	cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1119	mra_msg->local_comm_id = cm_id_priv->id.local_id;
1120	mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1121	cm_mra_set_service_timeout(mra_msg, service_timeout);
1122
1123	if (private_data && private_data_len)
1124		memcpy(mra_msg->private_data, private_data, private_data_len);
1125}
1126
1127static void cm_format_rej(struct cm_rej_msg *rej_msg,
1128			  struct cm_id_private *cm_id_priv,
1129			  enum ib_cm_rej_reason reason,
1130			  void *ari,
1131			  u8 ari_length,
1132			  const void *private_data,
1133			  u8 private_data_len)
1134{
1135	cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1136	rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1137
1138	switch(cm_id_priv->id.state) {
1139	case IB_CM_REQ_RCVD:
1140		rej_msg->local_comm_id = 0;
1141		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1142		break;
1143	case IB_CM_MRA_REQ_SENT:
1144		rej_msg->local_comm_id = cm_id_priv->id.local_id;
1145		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1146		break;
1147	case IB_CM_REP_RCVD:
1148	case IB_CM_MRA_REP_SENT:
1149		rej_msg->local_comm_id = cm_id_priv->id.local_id;
1150		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1151		break;
1152	default:
1153		rej_msg->local_comm_id = cm_id_priv->id.local_id;
1154		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1155		break;
1156	}
1157
1158	rej_msg->reason = cpu_to_be16(reason);
1159	if (ari && ari_length) {
1160		cm_rej_set_reject_info_len(rej_msg, ari_length);
1161		memcpy(rej_msg->ari, ari, ari_length);
1162	}
1163
1164	if (private_data && private_data_len)
1165		memcpy(rej_msg->private_data, private_data, private_data_len);
1166}
1167
1168static void cm_dup_req_handler(struct cm_work *work,
1169			       struct cm_id_private *cm_id_priv)
1170{
1171	struct ib_mad_send_buf *msg = NULL;
1172	unsigned long flags;
1173	int ret;
1174
1175	/* Quick state check to discard duplicate REQs. */
1176	if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1177		return;
1178
1179	ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1180	if (ret)
1181		return;
1182
1183	spin_lock_irqsave(&cm_id_priv->lock, flags);
1184	switch (cm_id_priv->id.state) {
1185	case IB_CM_MRA_REQ_SENT:
1186		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1187			      CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1188			      cm_id_priv->private_data,
1189			      cm_id_priv->private_data_len);
1190		break;
1191	case IB_CM_TIMEWAIT:
1192		cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1193			      IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1194		break;
1195	default:
1196		goto unlock;
1197	}
1198	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1199
1200	ret = ib_post_send_mad(msg, NULL);
1201	if (ret)
1202		goto free;
1203	return;
1204
1205unlock:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1206free:	cm_free_msg(msg);
1207}
1208
1209static struct cm_id_private * cm_match_req(struct cm_work *work,
1210					   struct cm_id_private *cm_id_priv)
1211{
1212	struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1213	struct cm_timewait_info *timewait_info;
1214	struct cm_req_msg *req_msg;
1215	unsigned long flags;
1216
1217	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1218
1219	/* Check for duplicate REQ and stale connections. */
1220	spin_lock_irqsave(&cm.lock, flags);
1221	timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1222	if (!timewait_info)
1223		timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1224
1225	if (timewait_info) {
1226		cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1227					   timewait_info->work.remote_id);
1228		spin_unlock_irqrestore(&cm.lock, flags);
1229		if (cur_cm_id_priv) {
1230			cm_dup_req_handler(work, cur_cm_id_priv);
1231			cm_deref_id(cur_cm_id_priv);
1232		} else
1233			cm_issue_rej(work->port, work->mad_recv_wc,
1234				     IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1235				     NULL, 0);
1236		goto error;
1237	}
1238
1239	/* Find matching listen request. */
1240	listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
1241					   req_msg->service_id);
1242	if (!listen_cm_id_priv) {
1243		spin_unlock_irqrestore(&cm.lock, flags);
1244		cm_issue_rej(work->port, work->mad_recv_wc,
1245			     IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1246			     NULL, 0);
1247		goto error;
1248	}
1249	atomic_inc(&listen_cm_id_priv->refcount);
1250	atomic_inc(&cm_id_priv->refcount);
1251	cm_id_priv->id.state = IB_CM_REQ_RCVD;
1252	atomic_inc(&cm_id_priv->work_count);
1253	spin_unlock_irqrestore(&cm.lock, flags);
1254	return listen_cm_id_priv;
1255
1256error:	cm_cleanup_timewait(cm_id_priv->timewait_info);
1257	return NULL;
1258}
1259
1260static int cm_req_handler(struct cm_work *work)
1261{
1262	struct ib_cm_id *cm_id;
1263	struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1264	struct cm_req_msg *req_msg;
1265	int ret;
1266
1267	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1268
1269	cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
1270	if (IS_ERR(cm_id))
1271		return PTR_ERR(cm_id);
1272
1273	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1274	cm_id_priv->id.remote_id = req_msg->local_comm_id;
1275	cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1276				&cm_id_priv->av);
1277	cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1278							    id.local_id);
1279	if (IS_ERR(cm_id_priv->timewait_info)) {
1280		ret = PTR_ERR(cm_id_priv->timewait_info);
1281		goto error1;
1282	}
1283	cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1284	cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1285	cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1286
1287	listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1288	if (!listen_cm_id_priv) {
1289		ret = -EINVAL;
1290		goto error2;
1291	}
1292
1293	cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1294	cm_id_priv->id.context = listen_cm_id_priv->id.context;
1295	cm_id_priv->id.service_id = req_msg->service_id;
1296	cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
1297
1298	cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1299	ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
1300	if (ret)
1301		goto error3;
1302	if (req_msg->alt_local_lid) {
1303		ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
1304		if (ret)
1305			goto error3;
1306	}
1307	cm_id_priv->tid = req_msg->hdr.tid;
1308	cm_id_priv->timeout_ms = cm_convert_to_ms(
1309					cm_req_get_local_resp_timeout(req_msg));
1310	cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
1311	cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
1312	cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1313	cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1314	cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1315	cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1316	cm_id_priv->local_ack_timeout =
1317				cm_req_get_primary_local_ack_timeout(req_msg);
1318	cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1319	cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1320	cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
1321
1322	cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
1323	cm_process_work(cm_id_priv, work);
1324	cm_deref_id(listen_cm_id_priv);
1325	return 0;
1326
1327error3:	atomic_dec(&cm_id_priv->refcount);
1328	cm_deref_id(listen_cm_id_priv);
1329	cm_cleanup_timewait(cm_id_priv->timewait_info);
1330error2:	kfree(cm_id_priv->timewait_info);
1331	cm_id_priv->timewait_info = NULL;
1332error1:	ib_destroy_cm_id(&cm_id_priv->id);
1333	return ret;
1334}
1335
1336static void cm_format_rep(struct cm_rep_msg *rep_msg,
1337			  struct cm_id_private *cm_id_priv,
1338			  struct ib_cm_rep_param *param)
1339{
1340	cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
1341	rep_msg->local_comm_id = cm_id_priv->id.local_id;
1342	rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1343	cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
1344	cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1345	rep_msg->resp_resources = param->responder_resources;
1346	rep_msg->initiator_depth = param->initiator_depth;
1347	cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay);
1348	cm_rep_set_failover(rep_msg, param->failover_accepted);
1349	cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1350	cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1351	cm_rep_set_srq(rep_msg, param->srq);
1352	rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
1353
1354	if (param->private_data && param->private_data_len)
1355		memcpy(rep_msg->private_data, param->private_data,
1356		       param->private_data_len);
1357}
1358
1359int ib_send_cm_rep(struct ib_cm_id *cm_id,
1360		   struct ib_cm_rep_param *param)
1361{
1362	struct cm_id_private *cm_id_priv;
1363	struct ib_mad_send_buf *msg;
1364	struct cm_rep_msg *rep_msg;
1365	unsigned long flags;
1366	int ret;
1367
1368	if (param->private_data &&
1369	    param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
1370		return -EINVAL;
1371
1372	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1373	spin_lock_irqsave(&cm_id_priv->lock, flags);
1374	if (cm_id->state != IB_CM_REQ_RCVD &&
1375	    cm_id->state != IB_CM_MRA_REQ_SENT) {
1376		ret = -EINVAL;
1377		goto out;
1378	}
1379
1380	ret = cm_alloc_msg(cm_id_priv, &msg);
1381	if (ret)
1382		goto out;
1383
1384	rep_msg = (struct cm_rep_msg *) msg->mad;
1385	cm_format_rep(rep_msg, cm_id_priv, param);
1386	msg->timeout_ms = cm_id_priv->timeout_ms;
1387	msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
1388
1389	ret = ib_post_send_mad(msg, NULL);
1390	if (ret) {
1391		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1392		cm_free_msg(msg);
1393		return ret;
1394	}
1395
1396	cm_id->state = IB_CM_REP_SENT;
1397	cm_id_priv->msg = msg;
1398	cm_id_priv->initiator_depth = param->initiator_depth;
1399	cm_id_priv->responder_resources = param->responder_resources;
1400	cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
1401	cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg);
1402
1403out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1404	return ret;
1405}
1406EXPORT_SYMBOL(ib_send_cm_rep);
1407
1408static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
1409			  struct cm_id_private *cm_id_priv,
1410			  const void *private_data,
1411			  u8 private_data_len)
1412{
1413	cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
1414	rtu_msg->local_comm_id = cm_id_priv->id.local_id;
1415	rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
1416
1417	if (private_data && private_data_len)
1418		memcpy(rtu_msg->private_data, private_data, private_data_len);
1419}
1420
1421int ib_send_cm_rtu(struct ib_cm_id *cm_id,
1422		   const void *private_data,
1423		   u8 private_data_len)
1424{
1425	struct cm_id_private *cm_id_priv;
1426	struct ib_mad_send_buf *msg;
1427	unsigned long flags;
1428	void *data;
1429	int ret;
1430
1431	if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
1432		return -EINVAL;
1433
1434	data = cm_copy_private_data(private_data, private_data_len);
1435	if (IS_ERR(data))
1436		return PTR_ERR(data);
1437
1438	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1439	spin_lock_irqsave(&cm_id_priv->lock, flags);
1440	if (cm_id->state != IB_CM_REP_RCVD &&
1441	    cm_id->state != IB_CM_MRA_REP_SENT) {
1442		ret = -EINVAL;
1443		goto error;
1444	}
1445
1446	ret = cm_alloc_msg(cm_id_priv, &msg);
1447	if (ret)
1448		goto error;
1449
1450	cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1451		      private_data, private_data_len);
1452
1453	ret = ib_post_send_mad(msg, NULL);
1454	if (ret) {
1455		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1456		cm_free_msg(msg);
1457		kfree(data);
1458		return ret;
1459	}
1460
1461	cm_id->state = IB_CM_ESTABLISHED;
1462	cm_set_private_data(cm_id_priv, data, private_data_len);
1463	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1464	return 0;
1465
1466error:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1467	kfree(data);
1468	return ret;
1469}
1470EXPORT_SYMBOL(ib_send_cm_rtu);
1471
1472static void cm_format_rep_event(struct cm_work *work)
1473{
1474	struct cm_rep_msg *rep_msg;
1475	struct ib_cm_rep_event_param *param;
1476
1477	rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1478	param = &work->cm_event.param.rep_rcvd;
1479	param->remote_ca_guid = rep_msg->local_ca_guid;
1480	param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
1481	param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg));
1482	param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
1483	param->responder_resources = rep_msg->initiator_depth;
1484	param->initiator_depth = rep_msg->resp_resources;
1485	param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1486	param->failover_accepted = cm_rep_get_failover(rep_msg);
1487	param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
1488	param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1489	param->srq = cm_rep_get_srq(rep_msg);
1490	work->cm_event.private_data = &rep_msg->private_data;
1491}
1492
1493static void cm_dup_rep_handler(struct cm_work *work)
1494{
1495	struct cm_id_private *cm_id_priv;
1496	struct cm_rep_msg *rep_msg;
1497	struct ib_mad_send_buf *msg = NULL;
1498	unsigned long flags;
1499	int ret;
1500
1501	rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
1502	cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
1503				   rep_msg->local_comm_id);
1504	if (!cm_id_priv)
1505		return;
1506
1507	ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1508	if (ret)
1509		goto deref;
1510
1511	spin_lock_irqsave(&cm_id_priv->lock, flags);
1512	if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
1513		cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1514			      cm_id_priv->private_data,
1515			      cm_id_priv->private_data_len);
1516	else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
1517		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1518			      CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
1519			      cm_id_priv->private_data,
1520			      cm_id_priv->private_data_len);
1521	else
1522		goto unlock;
1523	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1524
1525	ret = ib_post_send_mad(msg, NULL);
1526	if (ret)
1527		goto free;
1528	goto deref;
1529
1530unlock:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1531free:	cm_free_msg(msg);
1532deref:	cm_deref_id(cm_id_priv);
1533}
1534
1535static int cm_rep_handler(struct cm_work *work)
1536{
1537	struct cm_id_private *cm_id_priv;
1538	struct cm_rep_msg *rep_msg;
1539	unsigned long flags;
1540	int ret;
1541
1542	rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1543	cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
1544	if (!cm_id_priv) {
1545		cm_dup_rep_handler(work);
1546		return -EINVAL;
1547	}
1548
1549	cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
1550	cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
1551	cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1552
1553	spin_lock_irqsave(&cm.lock, flags);
1554	/* Check for duplicate REP. */
1555	if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
1556		spin_unlock_irqrestore(&cm.lock, flags);
1557		ret = -EINVAL;
1558		goto error;
1559	}
1560	/* Check for a stale connection. */
1561	if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
1562		spin_unlock_irqrestore(&cm.lock, flags);
1563		cm_issue_rej(work->port, work->mad_recv_wc,
1564			     IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
1565			     NULL, 0);
1566		ret = -EINVAL;
1567		goto error;
1568	}
1569	spin_unlock_irqrestore(&cm.lock, flags);
1570
1571	cm_format_rep_event(work);
1572
1573	spin_lock_irqsave(&cm_id_priv->lock, flags);
1574	switch (cm_id_priv->id.state) {
1575	case IB_CM_REQ_SENT:
1576	case IB_CM_MRA_REQ_RCVD:
1577		break;
1578	default:
1579		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1580		ret = -EINVAL;
1581		goto error;
1582	}
1583	cm_id_priv->id.state = IB_CM_REP_RCVD;
1584	cm_id_priv->id.remote_id = rep_msg->local_comm_id;
1585	cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1586	cm_id_priv->initiator_depth = rep_msg->resp_resources;
1587	cm_id_priv->responder_resources = rep_msg->initiator_depth;
1588	cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
1589	cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1590
1591	/* todo: handle peer_to_peer */
1592
1593	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1594	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1595	if (!ret)
1596		list_add_tail(&work->list, &cm_id_priv->work_list);
1597	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1598
1599	if (ret)
1600		cm_process_work(cm_id_priv, work);
1601	else
1602		cm_deref_id(cm_id_priv);
1603	return 0;
1604
1605error:	cm_cleanup_timewait(cm_id_priv->timewait_info);
1606	cm_deref_id(cm_id_priv);
1607	return ret;
1608}
1609
1610static int cm_establish_handler(struct cm_work *work)
1611{
1612	struct cm_id_private *cm_id_priv;
1613	unsigned long flags;
1614	int ret;
1615
1616	/* See comment in ib_cm_establish about lookup. */
1617	cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
1618	if (!cm_id_priv)
1619		return -EINVAL;
1620
1621	spin_lock_irqsave(&cm_id_priv->lock, flags);
1622	if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
1623		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1624		goto out;
1625	}
1626
1627	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1628	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1629	if (!ret)
1630		list_add_tail(&work->list, &cm_id_priv->work_list);
1631	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1632
1633	if (ret)
1634		cm_process_work(cm_id_priv, work);
1635	else
1636		cm_deref_id(cm_id_priv);
1637	return 0;
1638out:
1639	cm_deref_id(cm_id_priv);
1640	return -EINVAL;
1641}
1642
1643static int cm_rtu_handler(struct cm_work *work)
1644{
1645	struct cm_id_private *cm_id_priv;
1646	struct cm_rtu_msg *rtu_msg;
1647	unsigned long flags;
1648	int ret;
1649
1650	rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
1651	cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
1652				   rtu_msg->local_comm_id);
1653	if (!cm_id_priv)
1654		return -EINVAL;
1655
1656	work->cm_event.private_data = &rtu_msg->private_data;
1657
1658	spin_lock_irqsave(&cm_id_priv->lock, flags);
1659	if (cm_id_priv->id.state != IB_CM_REP_SENT &&
1660	    cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
1661		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1662		goto out;
1663	}
1664	cm_id_priv->id.state = IB_CM_ESTABLISHED;
1665
1666	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1667	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1668	if (!ret)
1669		list_add_tail(&work->list, &cm_id_priv->work_list);
1670	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1671
1672	if (ret)
1673		cm_process_work(cm_id_priv, work);
1674	else
1675		cm_deref_id(cm_id_priv);
1676	return 0;
1677out:
1678	cm_deref_id(cm_id_priv);
1679	return -EINVAL;
1680}
1681
1682static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
1683			  struct cm_id_private *cm_id_priv,
1684			  const void *private_data,
1685			  u8 private_data_len)
1686{
1687	cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
1688			  cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
1689	dreq_msg->local_comm_id = cm_id_priv->id.local_id;
1690	dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
1691	cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
1692
1693	if (private_data && private_data_len)
1694		memcpy(dreq_msg->private_data, private_data, private_data_len);
1695}
1696
1697int ib_send_cm_dreq(struct ib_cm_id *cm_id,
1698		    const void *private_data,
1699		    u8 private_data_len)
1700{
1701	struct cm_id_private *cm_id_priv;
1702	struct ib_mad_send_buf *msg;
1703	unsigned long flags;
1704	int ret;
1705
1706	if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
1707		return -EINVAL;
1708
1709	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1710	spin_lock_irqsave(&cm_id_priv->lock, flags);
1711	if (cm_id->state != IB_CM_ESTABLISHED) {
1712		ret = -EINVAL;
1713		goto out;
1714	}
1715
1716	ret = cm_alloc_msg(cm_id_priv, &msg);
1717	if (ret) {
1718		cm_enter_timewait(cm_id_priv);
1719		goto out;
1720	}
1721
1722	cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
1723		       private_data, private_data_len);
1724	msg->timeout_ms = cm_id_priv->timeout_ms;
1725	msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
1726
1727	ret = ib_post_send_mad(msg, NULL);
1728	if (ret) {
1729		cm_enter_timewait(cm_id_priv);
1730		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1731		cm_free_msg(msg);
1732		return ret;
1733	}
1734
1735	cm_id->state = IB_CM_DREQ_SENT;
1736	cm_id_priv->msg = msg;
1737out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1738	return ret;
1739}
1740EXPORT_SYMBOL(ib_send_cm_dreq);
1741
1742static void cm_format_drep(struct cm_drep_msg *drep_msg,
1743			  struct cm_id_private *cm_id_priv,
1744			  const void *private_data,
1745			  u8 private_data_len)
1746{
1747	cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
1748	drep_msg->local_comm_id = cm_id_priv->id.local_id;
1749	drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1750
1751	if (private_data && private_data_len)
1752		memcpy(drep_msg->private_data, private_data, private_data_len);
1753}
1754
1755int ib_send_cm_drep(struct ib_cm_id *cm_id,
1756		    const void *private_data,
1757		    u8 private_data_len)
1758{
1759	struct cm_id_private *cm_id_priv;
1760	struct ib_mad_send_buf *msg;
1761	unsigned long flags;
1762	void *data;
1763	int ret;
1764
1765	if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
1766		return -EINVAL;
1767
1768	data = cm_copy_private_data(private_data, private_data_len);
1769	if (IS_ERR(data))
1770		return PTR_ERR(data);
1771
1772	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1773	spin_lock_irqsave(&cm_id_priv->lock, flags);
1774	if (cm_id->state != IB_CM_DREQ_RCVD) {
1775		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1776		kfree(data);
1777		return -EINVAL;
1778	}
1779
1780	cm_set_private_data(cm_id_priv, data, private_data_len);
1781	cm_enter_timewait(cm_id_priv);
1782
1783	ret = cm_alloc_msg(cm_id_priv, &msg);
1784	if (ret)
1785		goto out;
1786
1787	cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
1788		       private_data, private_data_len);
1789
1790	ret = ib_post_send_mad(msg, NULL);
1791	if (ret) {
1792		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1793		cm_free_msg(msg);
1794		return ret;
1795	}
1796
1797out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1798	return ret;
1799}
1800EXPORT_SYMBOL(ib_send_cm_drep);
1801
1802static int cm_dreq_handler(struct cm_work *work)
1803{
1804	struct cm_id_private *cm_id_priv;
1805	struct cm_dreq_msg *dreq_msg;
1806	struct ib_mad_send_buf *msg = NULL;
1807	unsigned long flags;
1808	int ret;
1809
1810	dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
1811	cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
1812				   dreq_msg->local_comm_id);
1813	if (!cm_id_priv)
1814		return -EINVAL;
1815
1816	work->cm_event.private_data = &dreq_msg->private_data;
1817
1818	spin_lock_irqsave(&cm_id_priv->lock, flags);
1819	if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
1820		goto unlock;
1821
1822	switch (cm_id_priv->id.state) {
1823	case IB_CM_REP_SENT:
1824	case IB_CM_DREQ_SENT:
1825		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1826		break;
1827	case IB_CM_ESTABLISHED:
1828	case IB_CM_MRA_REP_RCVD:
1829		break;
1830	case IB_CM_TIMEWAIT:
1831		if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
1832			goto unlock;
1833
1834		cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
1835			       cm_id_priv->private_data,
1836			       cm_id_priv->private_data_len);
1837		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1838
1839		if (ib_post_send_mad(msg, NULL))
1840			cm_free_msg(msg);
1841		goto deref;
1842	default:
1843		goto unlock;
1844	}
1845	cm_id_priv->id.state = IB_CM_DREQ_RCVD;
1846	cm_id_priv->tid = dreq_msg->hdr.tid;
1847	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1848	if (!ret)
1849		list_add_tail(&work->list, &cm_id_priv->work_list);
1850	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1851
1852	if (ret)
1853		cm_process_work(cm_id_priv, work);
1854	else
1855		cm_deref_id(cm_id_priv);
1856	return 0;
1857
1858unlock:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1859deref:	cm_deref_id(cm_id_priv);
1860	return -EINVAL;
1861}
1862
1863static int cm_drep_handler(struct cm_work *work)
1864{
1865	struct cm_id_private *cm_id_priv;
1866	struct cm_drep_msg *drep_msg;
1867	unsigned long flags;
1868	int ret;
1869
1870	drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
1871	cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
1872				   drep_msg->local_comm_id);
1873	if (!cm_id_priv)
1874		return -EINVAL;
1875
1876	work->cm_event.private_data = &drep_msg->private_data;
1877
1878	spin_lock_irqsave(&cm_id_priv->lock, flags);
1879	if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
1880	    cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
1881		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1882		goto out;
1883	}
1884	cm_enter_timewait(cm_id_priv);
1885
1886	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1887	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1888	if (!ret)
1889		list_add_tail(&work->list, &cm_id_priv->work_list);
1890	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1891
1892	if (ret)
1893		cm_process_work(cm_id_priv, work);
1894	else
1895		cm_deref_id(cm_id_priv);
1896	return 0;
1897out:
1898	cm_deref_id(cm_id_priv);
1899	return -EINVAL;
1900}
1901
1902int ib_send_cm_rej(struct ib_cm_id *cm_id,
1903		   enum ib_cm_rej_reason reason,
1904		   void *ari,
1905		   u8 ari_length,
1906		   const void *private_data,
1907		   u8 private_data_len)
1908{
1909	struct cm_id_private *cm_id_priv;
1910	struct ib_mad_send_buf *msg;
1911	unsigned long flags;
1912	int ret;
1913
1914	if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
1915	    (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
1916		return -EINVAL;
1917
1918	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1919
1920	spin_lock_irqsave(&cm_id_priv->lock, flags);
1921	switch (cm_id->state) {
1922	case IB_CM_REQ_SENT:
1923	case IB_CM_MRA_REQ_RCVD:
1924	case IB_CM_REQ_RCVD:
1925	case IB_CM_MRA_REQ_SENT:
1926	case IB_CM_REP_RCVD:
1927	case IB_CM_MRA_REP_SENT:
1928		ret = cm_alloc_msg(cm_id_priv, &msg);
1929		if (!ret)
1930			cm_format_rej((struct cm_rej_msg *) msg->mad,
1931				      cm_id_priv, reason, ari, ari_length,
1932				      private_data, private_data_len);
1933
1934		cm_reset_to_idle(cm_id_priv);
1935		break;
1936	case IB_CM_REP_SENT:
1937	case IB_CM_MRA_REP_RCVD:
1938		ret = cm_alloc_msg(cm_id_priv, &msg);
1939		if (!ret)
1940			cm_format_rej((struct cm_rej_msg *) msg->mad,
1941				      cm_id_priv, reason, ari, ari_length,
1942				      private_data, private_data_len);
1943
1944		cm_enter_timewait(cm_id_priv);
1945		break;
1946	default:
1947		ret = -EINVAL;
1948		goto out;
1949	}
1950
1951	if (ret)
1952		goto out;
1953
1954	ret = ib_post_send_mad(msg, NULL);
1955	if (ret)
1956		cm_free_msg(msg);
1957
1958out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1959	return ret;
1960}
1961EXPORT_SYMBOL(ib_send_cm_rej);
1962
1963static void cm_format_rej_event(struct cm_work *work)
1964{
1965	struct cm_rej_msg *rej_msg;
1966	struct ib_cm_rej_event_param *param;
1967
1968	rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
1969	param = &work->cm_event.param.rej_rcvd;
1970	param->ari = rej_msg->ari;
1971	param->ari_length = cm_rej_get_reject_info_len(rej_msg);
1972	param->reason = __be16_to_cpu(rej_msg->reason);
1973	work->cm_event.private_data = &rej_msg->private_data;
1974}
1975
1976static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
1977{
1978	struct cm_timewait_info *timewait_info;
1979	struct cm_id_private *cm_id_priv;
1980	unsigned long flags;
1981	__be32 remote_id;
1982
1983	remote_id = rej_msg->local_comm_id;
1984
1985	if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
1986		spin_lock_irqsave(&cm.lock, flags);
1987		timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
1988						  remote_id);
1989		if (!timewait_info) {
1990			spin_unlock_irqrestore(&cm.lock, flags);
1991			return NULL;
1992		}
1993		cm_id_priv = idr_find(&cm.local_id_table,
1994				      (__force int) timewait_info->work.local_id);
1995		if (cm_id_priv) {
1996			if (cm_id_priv->id.remote_id == remote_id)
1997				atomic_inc(&cm_id_priv->refcount);
1998			else
1999				cm_id_priv = NULL;
2000		}
2001		spin_unlock_irqrestore(&cm.lock, flags);
2002	} else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2003		cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2004	else
2005		cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2006
2007	return cm_id_priv;
2008}
2009
2010static int cm_rej_handler(struct cm_work *work)
2011{
2012	struct cm_id_private *cm_id_priv;
2013	struct cm_rej_msg *rej_msg;
2014	unsigned long flags;
2015	int ret;
2016
2017	rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2018	cm_id_priv = cm_acquire_rejected_id(rej_msg);
2019	if (!cm_id_priv)
2020		return -EINVAL;
2021
2022	cm_format_rej_event(work);
2023
2024	spin_lock_irqsave(&cm_id_priv->lock, flags);
2025	switch (cm_id_priv->id.state) {
2026	case IB_CM_REQ_SENT:
2027	case IB_CM_MRA_REQ_RCVD:
2028	case IB_CM_REP_SENT:
2029	case IB_CM_MRA_REP_RCVD:
2030		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2031		/* fall through */
2032	case IB_CM_REQ_RCVD:
2033	case IB_CM_MRA_REQ_SENT:
2034		if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2035			cm_enter_timewait(cm_id_priv);
2036		else
2037			cm_reset_to_idle(cm_id_priv);
2038		break;
2039	case IB_CM_DREQ_SENT:
2040		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2041		/* fall through */
2042	case IB_CM_REP_RCVD:
2043	case IB_CM_MRA_REP_SENT:
2044	case IB_CM_ESTABLISHED:
2045		cm_enter_timewait(cm_id_priv);
2046		break;
2047	default:
2048		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2049		ret = -EINVAL;
2050		goto out;
2051	}
2052
2053	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2054	if (!ret)
2055		list_add_tail(&work->list, &cm_id_priv->work_list);
2056	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2057
2058	if (ret)
2059		cm_process_work(cm_id_priv, work);
2060	else
2061		cm_deref_id(cm_id_priv);
2062	return 0;
2063out:
2064	cm_deref_id(cm_id_priv);
2065	return -EINVAL;
2066}
2067
2068int ib_send_cm_mra(struct ib_cm_id *cm_id,
2069		   u8 service_timeout,
2070		   const void *private_data,
2071		   u8 private_data_len)
2072{
2073	struct cm_id_private *cm_id_priv;
2074	struct ib_mad_send_buf *msg;
2075	void *data;
2076	unsigned long flags;
2077	int ret;
2078
2079	if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2080		return -EINVAL;
2081
2082	data = cm_copy_private_data(private_data, private_data_len);
2083	if (IS_ERR(data))
2084		return PTR_ERR(data);
2085
2086	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2087
2088	spin_lock_irqsave(&cm_id_priv->lock, flags);
2089	switch(cm_id_priv->id.state) {
2090	case IB_CM_REQ_RCVD:
2091		ret = cm_alloc_msg(cm_id_priv, &msg);
2092		if (ret)
2093			goto error1;
2094
2095		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2096			      CM_MSG_RESPONSE_REQ, service_timeout,
2097			      private_data, private_data_len);
2098		ret = ib_post_send_mad(msg, NULL);
2099		if (ret)
2100			goto error2;
2101		cm_id->state = IB_CM_MRA_REQ_SENT;
2102		break;
2103	case IB_CM_REP_RCVD:
2104		ret = cm_alloc_msg(cm_id_priv, &msg);
2105		if (ret)
2106			goto error1;
2107
2108		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2109			      CM_MSG_RESPONSE_REP, service_timeout,
2110			      private_data, private_data_len);
2111		ret = ib_post_send_mad(msg, NULL);
2112		if (ret)
2113			goto error2;
2114		cm_id->state = IB_CM_MRA_REP_SENT;
2115		break;
2116	case IB_CM_ESTABLISHED:
2117		ret = cm_alloc_msg(cm_id_priv, &msg);
2118		if (ret)
2119			goto error1;
2120
2121		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2122			      CM_MSG_RESPONSE_OTHER, service_timeout,
2123			      private_data, private_data_len);
2124		ret = ib_post_send_mad(msg, NULL);
2125		if (ret)
2126			goto error2;
2127		cm_id->lap_state = IB_CM_MRA_LAP_SENT;
2128		break;
2129	default:
2130		ret = -EINVAL;
2131		goto error1;
2132	}
2133	cm_id_priv->service_timeout = service_timeout;
2134	cm_set_private_data(cm_id_priv, data, private_data_len);
2135	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2136	return 0;
2137
2138error1:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2139	kfree(data);
2140	return ret;
2141
2142error2:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2143	kfree(data);
2144	cm_free_msg(msg);
2145	return ret;
2146}
2147EXPORT_SYMBOL(ib_send_cm_mra);
2148
2149static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2150{
2151	switch (cm_mra_get_msg_mraed(mra_msg)) {
2152	case CM_MSG_RESPONSE_REQ:
2153		return cm_acquire_id(mra_msg->remote_comm_id, 0);
2154	case CM_MSG_RESPONSE_REP:
2155	case CM_MSG_RESPONSE_OTHER:
2156		return cm_acquire_id(mra_msg->remote_comm_id,
2157				     mra_msg->local_comm_id);
2158	default:
2159		return NULL;
2160	}
2161}
2162
2163static int cm_mra_handler(struct cm_work *work)
2164{
2165	struct cm_id_private *cm_id_priv;
2166	struct cm_mra_msg *mra_msg;
2167	unsigned long flags;
2168	int timeout, ret;
2169
2170	mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
2171	cm_id_priv = cm_acquire_mraed_id(mra_msg);
2172	if (!cm_id_priv)
2173		return -EINVAL;
2174
2175	work->cm_event.private_data = &mra_msg->private_data;
2176	work->cm_event.param.mra_rcvd.service_timeout =
2177					cm_mra_get_service_timeout(mra_msg);
2178	timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2179		  cm_convert_to_ms(cm_id_priv->av.packet_life_time);
2180
2181	spin_lock_irqsave(&cm_id_priv->lock, flags);
2182	switch (cm_id_priv->id.state) {
2183	case IB_CM_REQ_SENT:
2184		if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
2185		    ib_modify_mad(cm_id_priv->av.port->mad_agent,
2186				  cm_id_priv->msg, timeout))
2187			goto out;
2188		cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
2189		break;
2190	case IB_CM_REP_SENT:
2191		if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
2192		    ib_modify_mad(cm_id_priv->av.port->mad_agent,
2193				  cm_id_priv->msg, timeout))
2194			goto out;
2195		cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
2196		break;
2197	case IB_CM_ESTABLISHED:
2198		if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2199		    cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2200		    ib_modify_mad(cm_id_priv->av.port->mad_agent,
2201				  cm_id_priv->msg, timeout))
2202			goto out;
2203		cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2204		break;
2205	default:
2206		goto out;
2207	}
2208
2209	cm_id_priv->msg->context[1] = (void *) (unsigned long)
2210				      cm_id_priv->id.state;
2211	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2212	if (!ret)
2213		list_add_tail(&work->list, &cm_id_priv->work_list);
2214	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2215
2216	if (ret)
2217		cm_process_work(cm_id_priv, work);
2218	else
2219		cm_deref_id(cm_id_priv);
2220	return 0;
2221out:
2222	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2223	cm_deref_id(cm_id_priv);
2224	return -EINVAL;
2225}
2226
2227static void cm_format_lap(struct cm_lap_msg *lap_msg,
2228			  struct cm_id_private *cm_id_priv,
2229			  struct ib_sa_path_rec *alternate_path,
2230			  const void *private_data,
2231			  u8 private_data_len)
2232{
2233	cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
2234			  cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
2235	lap_msg->local_comm_id = cm_id_priv->id.local_id;
2236	lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
2237	cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
2238	/* todo: need remote CM response timeout */
2239	cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
2240	lap_msg->alt_local_lid = alternate_path->slid;
2241	lap_msg->alt_remote_lid = alternate_path->dlid;
2242	lap_msg->alt_local_gid = alternate_path->sgid;
2243	lap_msg->alt_remote_gid = alternate_path->dgid;
2244	cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
2245	cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
2246	lap_msg->alt_hop_limit = alternate_path->hop_limit;
2247	cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
2248	cm_lap_set_sl(lap_msg, alternate_path->sl);
2249	cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
2250	cm_lap_set_local_ack_timeout(lap_msg,
2251		min(31, alternate_path->packet_life_time + 1));
2252
2253	if (private_data && private_data_len)
2254		memcpy(lap_msg->private_data, private_data, private_data_len);
2255}
2256
2257int ib_send_cm_lap(struct ib_cm_id *cm_id,
2258		   struct ib_sa_path_rec *alternate_path,
2259		   const void *private_data,
2260		   u8 private_data_len)
2261{
2262	struct cm_id_private *cm_id_priv;
2263	struct ib_mad_send_buf *msg;
2264	unsigned long flags;
2265	int ret;
2266
2267	if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
2268		return -EINVAL;
2269
2270	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2271	spin_lock_irqsave(&cm_id_priv->lock, flags);
2272	if (cm_id->state != IB_CM_ESTABLISHED ||
2273	    cm_id->lap_state != IB_CM_LAP_IDLE) {
2274		ret = -EINVAL;
2275		goto out;
2276	}
2277
2278	ret = cm_alloc_msg(cm_id_priv, &msg);
2279	if (ret)
2280		goto out;
2281
2282	cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
2283		      alternate_path, private_data, private_data_len);
2284	msg->timeout_ms = cm_id_priv->timeout_ms;
2285	msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
2286
2287	ret = ib_post_send_mad(msg, NULL);
2288	if (ret) {
2289		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2290		cm_free_msg(msg);
2291		return ret;
2292	}
2293
2294	cm_id->lap_state = IB_CM_LAP_SENT;
2295	cm_id_priv->msg = msg;
2296
2297out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2298	return ret;
2299}
2300EXPORT_SYMBOL(ib_send_cm_lap);
2301
2302static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
2303				    struct cm_lap_msg *lap_msg)
2304{
2305	memset(path, 0, sizeof *path);
2306	path->dgid = lap_msg->alt_local_gid;
2307	path->sgid = lap_msg->alt_remote_gid;
2308	path->dlid = lap_msg->alt_local_lid;
2309	path->slid = lap_msg->alt_remote_lid;
2310	path->flow_label = cm_lap_get_flow_label(lap_msg);
2311	path->hop_limit = lap_msg->alt_hop_limit;
2312	path->traffic_class = cm_lap_get_traffic_class(lap_msg);
2313	path->reversible = 1;
2314	/* pkey is same as in REQ */
2315	path->sl = cm_lap_get_sl(lap_msg);
2316	path->mtu_selector = IB_SA_EQ;
2317	/* mtu is same as in REQ */
2318	path->rate_selector = IB_SA_EQ;
2319	path->rate = cm_lap_get_packet_rate(lap_msg);
2320	path->packet_life_time_selector = IB_SA_EQ;
2321	path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
2322	path->packet_life_time -= (path->packet_life_time > 0);
2323}
2324
2325static int cm_lap_handler(struct cm_work *work)
2326{
2327	struct cm_id_private *cm_id_priv;
2328	struct cm_lap_msg *lap_msg;
2329	struct ib_cm_lap_event_param *param;
2330	struct ib_mad_send_buf *msg = NULL;
2331	unsigned long flags;
2332	int ret;
2333
2334	/* todo: verify LAP request and send reject APR if invalid. */
2335	lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
2336	cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
2337				   lap_msg->local_comm_id);
2338	if (!cm_id_priv)
2339		return -EINVAL;
2340
2341	param = &work->cm_event.param.lap_rcvd;
2342	param->alternate_path = &work->path[0];
2343	cm_format_path_from_lap(param->alternate_path, lap_msg);
2344	work->cm_event.private_data = &lap_msg->private_data;
2345
2346	spin_lock_irqsave(&cm_id_priv->lock, flags);
2347	if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
2348		goto unlock;
2349
2350	switch (cm_id_priv->id.lap_state) {
2351	case IB_CM_LAP_IDLE:
2352		break;
2353	case IB_CM_MRA_LAP_SENT:
2354		if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2355			goto unlock;
2356
2357		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2358			      CM_MSG_RESPONSE_OTHER,
2359			      cm_id_priv->service_timeout,
2360			      cm_id_priv->private_data,
2361			      cm_id_priv->private_data_len);
2362		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2363
2364		if (ib_post_send_mad(msg, NULL))
2365			cm_free_msg(msg);
2366		goto deref;
2367	default:
2368		goto unlock;
2369	}
2370
2371	cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
2372	cm_id_priv->tid = lap_msg->hdr.tid;
2373	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2374	if (!ret)
2375		list_add_tail(&work->list, &cm_id_priv->work_list);
2376	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2377
2378	if (ret)
2379		cm_process_work(cm_id_priv, work);
2380	else
2381		cm_deref_id(cm_id_priv);
2382	return 0;
2383
2384unlock:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2385deref:	cm_deref_id(cm_id_priv);
2386	return -EINVAL;
2387}
2388
2389static void cm_format_apr(struct cm_apr_msg *apr_msg,
2390			  struct cm_id_private *cm_id_priv,
2391			  enum ib_cm_apr_status status,
2392			  void *info,
2393			  u8 info_length,
2394			  const void *private_data,
2395			  u8 private_data_len)
2396{
2397	cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
2398	apr_msg->local_comm_id = cm_id_priv->id.local_id;
2399	apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
2400	apr_msg->ap_status = (u8) status;
2401
2402	if (info && info_length) {
2403		apr_msg->info_length = info_length;
2404		memcpy(apr_msg->info, info, info_length);
2405	}
2406
2407	if (private_data && private_data_len)
2408		memcpy(apr_msg->private_data, private_data, private_data_len);
2409}
2410
2411int ib_send_cm_apr(struct ib_cm_id *cm_id,
2412		   enum ib_cm_apr_status status,
2413		   void *info,
2414		   u8 info_length,
2415		   const void *private_data,
2416		   u8 private_data_len)
2417{
2418	struct cm_id_private *cm_id_priv;
2419	struct ib_mad_send_buf *msg;
2420	unsigned long flags;
2421	int ret;
2422
2423	if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
2424	    (info && info_length > IB_CM_APR_INFO_LENGTH))
2425		return -EINVAL;
2426
2427	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2428	spin_lock_irqsave(&cm_id_priv->lock, flags);
2429	if (cm_id->state != IB_CM_ESTABLISHED ||
2430	    (cm_id->lap_state != IB_CM_LAP_RCVD &&
2431	     cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
2432		ret = -EINVAL;
2433		goto out;
2434	}
2435
2436	ret = cm_alloc_msg(cm_id_priv, &msg);
2437	if (ret)
2438		goto out;
2439
2440	cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
2441		      info, info_length, private_data, private_data_len);
2442	ret = ib_post_send_mad(msg, NULL);
2443	if (ret) {
2444		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2445		cm_free_msg(msg);
2446		return ret;
2447	}
2448
2449	cm_id->lap_state = IB_CM_LAP_IDLE;
2450out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2451	return ret;
2452}
2453EXPORT_SYMBOL(ib_send_cm_apr);
2454
2455static int cm_apr_handler(struct cm_work *work)
2456{
2457	struct cm_id_private *cm_id_priv;
2458	struct cm_apr_msg *apr_msg;
2459	unsigned long flags;
2460	int ret;
2461
2462	apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
2463	cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
2464				   apr_msg->local_comm_id);
2465	if (!cm_id_priv)
2466		return -EINVAL; /* Unmatched reply. */
2467
2468	work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
2469	work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
2470	work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
2471	work->cm_event.private_data = &apr_msg->private_data;
2472
2473	spin_lock_irqsave(&cm_id_priv->lock, flags);
2474	if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
2475	    (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
2476	     cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
2477		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2478		goto out;
2479	}
2480	cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
2481	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2482	cm_id_priv->msg = NULL;
2483
2484	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2485	if (!ret)
2486		list_add_tail(&work->list, &cm_id_priv->work_list);
2487	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2488
2489	if (ret)
2490		cm_process_work(cm_id_priv, work);
2491	else
2492		cm_deref_id(cm_id_priv);
2493	return 0;
2494out:
2495	cm_deref_id(cm_id_priv);
2496	return -EINVAL;
2497}
2498
2499static int cm_timewait_handler(struct cm_work *work)
2500{
2501	struct cm_timewait_info *timewait_info;
2502	struct cm_id_private *cm_id_priv;
2503	unsigned long flags;
2504	int ret;
2505
2506	timewait_info = (struct cm_timewait_info *)work;
2507	cm_cleanup_timewait(timewait_info);
2508
2509	cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2510				   timewait_info->work.remote_id);
2511	if (!cm_id_priv)
2512		return -EINVAL;
2513
2514	spin_lock_irqsave(&cm_id_priv->lock, flags);
2515	if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
2516	    cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
2517		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2518		goto out;
2519	}
2520	cm_id_priv->id.state = IB_CM_IDLE;
2521	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2522	if (!ret)
2523		list_add_tail(&work->list, &cm_id_priv->work_list);
2524	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2525
2526	if (ret)
2527		cm_process_work(cm_id_priv, work);
2528	else
2529		cm_deref_id(cm_id_priv);
2530	return 0;
2531out:
2532	cm_deref_id(cm_id_priv);
2533	return -EINVAL;
2534}
2535
2536static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
2537			       struct cm_id_private *cm_id_priv,
2538			       struct ib_cm_sidr_req_param *param)
2539{
2540	cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
2541			  cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
2542	sidr_req_msg->request_id = cm_id_priv->id.local_id;
2543	sidr_req_msg->pkey = cpu_to_be16(param->pkey);
2544	sidr_req_msg->service_id = param->service_id;
2545
2546	if (param->private_data && param->private_data_len)
2547		memcpy(sidr_req_msg->private_data, param->private_data,
2548		       param->private_data_len);
2549}
2550
2551int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
2552			struct ib_cm_sidr_req_param *param)
2553{
2554	struct cm_id_private *cm_id_priv;
2555	struct ib_mad_send_buf *msg;
2556	unsigned long flags;
2557	int ret;
2558
2559	if (!param->path || (param->private_data &&
2560	     param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
2561		return -EINVAL;
2562
2563	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2564	ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
2565	if (ret)
2566		goto out;
2567
2568	cm_id->service_id = param->service_id;
2569	cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
2570	cm_id_priv->timeout_ms = param->timeout_ms;
2571	cm_id_priv->max_cm_retries = param->max_cm_retries;
2572	ret = cm_alloc_msg(cm_id_priv, &msg);
2573	if (ret)
2574		goto out;
2575
2576	cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
2577			   param);
2578	msg->timeout_ms = cm_id_priv->timeout_ms;
2579	msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
2580
2581	spin_lock_irqsave(&cm_id_priv->lock, flags);
2582	if (cm_id->state == IB_CM_IDLE)
2583		ret = ib_post_send_mad(msg, NULL);
2584	else
2585		ret = -EINVAL;
2586
2587	if (ret) {
2588		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2589		cm_free_msg(msg);
2590		goto out;
2591	}
2592	cm_id->state = IB_CM_SIDR_REQ_SENT;
2593	cm_id_priv->msg = msg;
2594	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2595out:
2596	return ret;
2597}
2598EXPORT_SYMBOL(ib_send_cm_sidr_req);
2599
2600static void cm_format_sidr_req_event(struct cm_work *work,
2601				     struct ib_cm_id *listen_id)
2602{
2603	struct cm_sidr_req_msg *sidr_req_msg;
2604	struct ib_cm_sidr_req_event_param *param;
2605
2606	sidr_req_msg = (struct cm_sidr_req_msg *)
2607				work->mad_recv_wc->recv_buf.mad;
2608	param = &work->cm_event.param.sidr_req_rcvd;
2609	param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
2610	param->listen_id = listen_id;
2611	param->port = work->port->port_num;
2612	work->cm_event.private_data = &sidr_req_msg->private_data;
2613}
2614
2615static int cm_sidr_req_handler(struct cm_work *work)
2616{
2617	struct ib_cm_id *cm_id;
2618	struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
2619	struct cm_sidr_req_msg *sidr_req_msg;
2620	struct ib_wc *wc;
2621	unsigned long flags;
2622
2623	cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
2624	if (IS_ERR(cm_id))
2625		return PTR_ERR(cm_id);
2626	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2627
2628	/* Record SGID/SLID and request ID for lookup. */
2629	sidr_req_msg = (struct cm_sidr_req_msg *)
2630				work->mad_recv_wc->recv_buf.mad;
2631	wc = work->mad_recv_wc->wc;
2632	cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
2633	cm_id_priv->av.dgid.global.interface_id = 0;
2634	cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2635				&cm_id_priv->av);
2636	cm_id_priv->id.remote_id = sidr_req_msg->request_id;
2637	cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
2638	cm_id_priv->tid = sidr_req_msg->hdr.tid;
2639	atomic_inc(&cm_id_priv->work_count);
2640
2641	spin_lock_irqsave(&cm.lock, flags);
2642	cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
2643	if (cur_cm_id_priv) {
2644		spin_unlock_irqrestore(&cm.lock, flags);
2645		goto out; /* Duplicate message. */
2646	}
2647	cur_cm_id_priv = cm_find_listen(cm_id->device,
2648					sidr_req_msg->service_id);
2649	if (!cur_cm_id_priv) {
2650		rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
2651		spin_unlock_irqrestore(&cm.lock, flags);
2652		/* todo: reply with no match */
2653		goto out; /* No match. */
2654	}
2655	atomic_inc(&cur_cm_id_priv->refcount);
2656	spin_unlock_irqrestore(&cm.lock, flags);
2657
2658	cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
2659	cm_id_priv->id.context = cur_cm_id_priv->id.context;
2660	cm_id_priv->id.service_id = sidr_req_msg->service_id;
2661	cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
2662
2663	cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
2664	cm_process_work(cm_id_priv, work);
2665	cm_deref_id(cur_cm_id_priv);
2666	return 0;
2667out:
2668	ib_destroy_cm_id(&cm_id_priv->id);
2669	return -EINVAL;
2670}
2671
2672static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
2673			       struct cm_id_private *cm_id_priv,
2674			       struct ib_cm_sidr_rep_param *param)
2675{
2676	cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
2677			  cm_id_priv->tid);
2678	sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
2679	sidr_rep_msg->status = param->status;
2680	cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
2681	sidr_rep_msg->service_id = cm_id_priv->id.service_id;
2682	sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
2683
2684	if (param->info && param->info_length)
2685		memcpy(sidr_rep_msg->info, param->info, param->info_length);
2686
2687	if (param->private_data && param->private_data_len)
2688		memcpy(sidr_rep_msg->private_data, param->private_data,
2689		       param->private_data_len);
2690}
2691
2692int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
2693			struct ib_cm_sidr_rep_param *param)
2694{
2695	struct cm_id_private *cm_id_priv;
2696	struct ib_mad_send_buf *msg;
2697	unsigned long flags;
2698	int ret;
2699
2700	if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
2701	    (param->private_data &&
2702	     param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
2703		return -EINVAL;
2704
2705	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2706	spin_lock_irqsave(&cm_id_priv->lock, flags);
2707	if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
2708		ret = -EINVAL;
2709		goto error;
2710	}
2711
2712	ret = cm_alloc_msg(cm_id_priv, &msg);
2713	if (ret)
2714		goto error;
2715
2716	cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
2717			   param);
2718	ret = ib_post_send_mad(msg, NULL);
2719	if (ret) {
2720		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2721		cm_free_msg(msg);
2722		return ret;
2723	}
2724	cm_id->state = IB_CM_IDLE;
2725	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2726
2727	spin_lock_irqsave(&cm.lock, flags);
2728	rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
2729	spin_unlock_irqrestore(&cm.lock, flags);
2730	return 0;
2731
2732error:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2733	return ret;
2734}
2735EXPORT_SYMBOL(ib_send_cm_sidr_rep);
2736
2737static void cm_format_sidr_rep_event(struct cm_work *work)
2738{
2739	struct cm_sidr_rep_msg *sidr_rep_msg;
2740	struct ib_cm_sidr_rep_event_param *param;
2741
2742	sidr_rep_msg = (struct cm_sidr_rep_msg *)
2743				work->mad_recv_wc->recv_buf.mad;
2744	param = &work->cm_event.param.sidr_rep_rcvd;
2745	param->status = sidr_rep_msg->status;
2746	param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
2747	param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
2748	param->info = &sidr_rep_msg->info;
2749	param->info_len = sidr_rep_msg->info_length;
2750	work->cm_event.private_data = &sidr_rep_msg->private_data;
2751}
2752
2753static int cm_sidr_rep_handler(struct cm_work *work)
2754{
2755	struct cm_sidr_rep_msg *sidr_rep_msg;
2756	struct cm_id_private *cm_id_priv;
2757	unsigned long flags;
2758
2759	sidr_rep_msg = (struct cm_sidr_rep_msg *)
2760				work->mad_recv_wc->recv_buf.mad;
2761	cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
2762	if (!cm_id_priv)
2763		return -EINVAL; /* Unmatched reply. */
2764
2765	spin_lock_irqsave(&cm_id_priv->lock, flags);
2766	if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
2767		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2768		goto out;
2769	}
2770	cm_id_priv->id.state = IB_CM_IDLE;
2771	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2772	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2773
2774	cm_format_sidr_rep_event(work);
2775	cm_process_work(cm_id_priv, work);
2776	return 0;
2777out:
2778	cm_deref_id(cm_id_priv);
2779	return -EINVAL;
2780}
2781
2782static void cm_process_send_error(struct ib_mad_send_buf *msg,
2783				  enum ib_wc_status wc_status)
2784{
2785	struct cm_id_private *cm_id_priv;
2786	struct ib_cm_event cm_event;
2787	enum ib_cm_state state;
2788	unsigned long flags;
2789	int ret;
2790
2791	memset(&cm_event, 0, sizeof cm_event);
2792	cm_id_priv = msg->context[0];
2793
2794	/* Discard old sends or ones without a response. */
2795	spin_lock_irqsave(&cm_id_priv->lock, flags);
2796	state = (enum ib_cm_state) (unsigned long) msg->context[1];
2797	if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
2798		goto discard;
2799
2800	switch (state) {
2801	case IB_CM_REQ_SENT:
2802	case IB_CM_MRA_REQ_RCVD:
2803		cm_reset_to_idle(cm_id_priv);
2804		cm_event.event = IB_CM_REQ_ERROR;
2805		break;
2806	case IB_CM_REP_SENT:
2807	case IB_CM_MRA_REP_RCVD:
2808		cm_reset_to_idle(cm_id_priv);
2809		cm_event.event = IB_CM_REP_ERROR;
2810		break;
2811	case IB_CM_DREQ_SENT:
2812		cm_enter_timewait(cm_id_priv);
2813		cm_event.event = IB_CM_DREQ_ERROR;
2814		break;
2815	case IB_CM_SIDR_REQ_SENT:
2816		cm_id_priv->id.state = IB_CM_IDLE;
2817		cm_event.event = IB_CM_SIDR_REQ_ERROR;
2818		break;
2819	default:
2820		goto discard;
2821	}
2822	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2823	cm_event.param.send_status = wc_status;
2824
2825	/* No other events can occur on the cm_id at this point. */
2826	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
2827	cm_free_msg(msg);
2828	if (ret)
2829		ib_destroy_cm_id(&cm_id_priv->id);
2830	return;
2831discard:
2832	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2833	cm_free_msg(msg);
2834}
2835
2836static void cm_send_handler(struct ib_mad_agent *mad_agent,
2837			    struct ib_mad_send_wc *mad_send_wc)
2838{
2839	struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
2840
2841	switch (mad_send_wc->status) {
2842	case IB_WC_SUCCESS:
2843	case IB_WC_WR_FLUSH_ERR:
2844		cm_free_msg(msg);
2845		break;
2846	default:
2847		if (msg->context[0] && msg->context[1])
2848			cm_process_send_error(msg, mad_send_wc->status);
2849		else
2850			cm_free_msg(msg);
2851		break;
2852	}
2853}
2854
2855static void cm_work_handler(void *data)
2856{
2857	struct cm_work *work = data;
2858	int ret;
2859
2860	switch (work->cm_event.event) {
2861	case IB_CM_REQ_RECEIVED:
2862		ret = cm_req_handler(work);
2863		break;
2864	case IB_CM_MRA_RECEIVED:
2865		ret = cm_mra_handler(work);
2866		break;
2867	case IB_CM_REJ_RECEIVED:
2868		ret = cm_rej_handler(work);
2869		break;
2870	case IB_CM_REP_RECEIVED:
2871		ret = cm_rep_handler(work);
2872		break;
2873	case IB_CM_RTU_RECEIVED:
2874		ret = cm_rtu_handler(work);
2875		break;
2876	case IB_CM_USER_ESTABLISHED:
2877		ret = cm_establish_handler(work);
2878		break;
2879	case IB_CM_DREQ_RECEIVED:
2880		ret = cm_dreq_handler(work);
2881		break;
2882	case IB_CM_DREP_RECEIVED:
2883		ret = cm_drep_handler(work);
2884		break;
2885	case IB_CM_SIDR_REQ_RECEIVED:
2886		ret = cm_sidr_req_handler(work);
2887		break;
2888	case IB_CM_SIDR_REP_RECEIVED:
2889		ret = cm_sidr_rep_handler(work);
2890		break;
2891	case IB_CM_LAP_RECEIVED:
2892		ret = cm_lap_handler(work);
2893		break;
2894	case IB_CM_APR_RECEIVED:
2895		ret = cm_apr_handler(work);
2896		break;
2897	case IB_CM_TIMEWAIT_EXIT:
2898		ret = cm_timewait_handler(work);
2899		break;
2900	default:
2901		ret = -EINVAL;
2902		break;
2903	}
2904	if (ret)
2905		cm_free_work(work);
2906}
2907
2908int ib_cm_establish(struct ib_cm_id *cm_id)
2909{
2910	struct cm_id_private *cm_id_priv;
2911	struct cm_work *work;
2912	unsigned long flags;
2913	int ret = 0;
2914
2915	work = kmalloc(sizeof *work, GFP_ATOMIC);
2916	if (!work)
2917		return -ENOMEM;
2918
2919	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2920	spin_lock_irqsave(&cm_id_priv->lock, flags);
2921	switch (cm_id->state)
2922	{
2923	case IB_CM_REP_SENT:
2924	case IB_CM_MRA_REP_RCVD:
2925		cm_id->state = IB_CM_ESTABLISHED;
2926		break;
2927	case IB_CM_ESTABLISHED:
2928		ret = -EISCONN;
2929		break;
2930	default:
2931		ret = -EINVAL;
2932		break;
2933	}
2934	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2935
2936	if (ret) {
2937		kfree(work);
2938		goto out;
2939	}
2940
2941	/*
2942	 * The CM worker thread may try to destroy the cm_id before it
2943	 * can execute this work item.  To prevent potential deadlock,
2944	 * we need to find the cm_id once we're in the context of the
2945	 * worker thread, rather than holding a reference on it.
2946	 */
2947	INIT_WORK(&work->work, cm_work_handler, work);
2948	work->local_id = cm_id->local_id;
2949	work->remote_id = cm_id->remote_id;
2950	work->mad_recv_wc = NULL;
2951	work->cm_event.event = IB_CM_USER_ESTABLISHED;
2952	queue_work(cm.wq, &work->work);
2953out:
2954	return ret;
2955}
2956EXPORT_SYMBOL(ib_cm_establish);
2957
2958static void cm_recv_handler(struct ib_mad_agent *mad_agent,
2959			    struct ib_mad_recv_wc *mad_recv_wc)
2960{
2961	struct cm_work *work;
2962	enum ib_cm_event_type event;
2963	int paths = 0;
2964
2965	switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
2966	case CM_REQ_ATTR_ID:
2967		paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
2968						    alt_local_lid != 0);
2969		event = IB_CM_REQ_RECEIVED;
2970		break;
2971	case CM_MRA_ATTR_ID:
2972		event = IB_CM_MRA_RECEIVED;
2973		break;
2974	case CM_REJ_ATTR_ID:
2975		event = IB_CM_REJ_RECEIVED;
2976		break;
2977	case CM_REP_ATTR_ID:
2978		event = IB_CM_REP_RECEIVED;
2979		break;
2980	case CM_RTU_ATTR_ID:
2981		event = IB_CM_RTU_RECEIVED;
2982		break;
2983	case CM_DREQ_ATTR_ID:
2984		event = IB_CM_DREQ_RECEIVED;
2985		break;
2986	case CM_DREP_ATTR_ID:
2987		event = IB_CM_DREP_RECEIVED;
2988		break;
2989	case CM_SIDR_REQ_ATTR_ID:
2990		event = IB_CM_SIDR_REQ_RECEIVED;
2991		break;
2992	case CM_SIDR_REP_ATTR_ID:
2993		event = IB_CM_SIDR_REP_RECEIVED;
2994		break;
2995	case CM_LAP_ATTR_ID:
2996		paths = 1;
2997		event = IB_CM_LAP_RECEIVED;
2998		break;
2999	case CM_APR_ATTR_ID:
3000		event = IB_CM_APR_RECEIVED;
3001		break;
3002	default:
3003		ib_free_recv_mad(mad_recv_wc);
3004		return;
3005	}
3006
3007	work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
3008		       GFP_KERNEL);
3009	if (!work) {
3010		ib_free_recv_mad(mad_recv_wc);
3011		return;
3012	}
3013
3014	INIT_WORK(&work->work, cm_work_handler, work);
3015	work->cm_event.event = event;
3016	work->mad_recv_wc = mad_recv_wc;
3017	work->port = (struct cm_port *)mad_agent->context;
3018	queue_work(cm.wq, &work->work);
3019}
3020
3021static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3022				struct ib_qp_attr *qp_attr,
3023				int *qp_attr_mask)
3024{
3025	unsigned long flags;
3026	int ret;
3027
3028	spin_lock_irqsave(&cm_id_priv->lock, flags);
3029	switch (cm_id_priv->id.state) {
3030	case IB_CM_REQ_SENT:
3031	case IB_CM_MRA_REQ_RCVD:
3032	case IB_CM_REQ_RCVD:
3033	case IB_CM_MRA_REQ_SENT:
3034	case IB_CM_REP_RCVD:
3035	case IB_CM_MRA_REP_SENT:
3036	case IB_CM_REP_SENT:
3037	case IB_CM_MRA_REP_RCVD:
3038	case IB_CM_ESTABLISHED:
3039		*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3040				IB_QP_PKEY_INDEX | IB_QP_PORT;
3041		qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
3042					   IB_ACCESS_REMOTE_WRITE;
3043		if (cm_id_priv->responder_resources)
3044			qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ;
3045		qp_attr->pkey_index = cm_id_priv->av.pkey_index;
3046		qp_attr->port_num = cm_id_priv->av.port->port_num;
3047		ret = 0;
3048		break;
3049	default:
3050		ret = -EINVAL;
3051		break;
3052	}
3053	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3054	return ret;
3055}
3056
3057static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3058			       struct ib_qp_attr *qp_attr,
3059			       int *qp_attr_mask)
3060{
3061	unsigned long flags;
3062	int ret;
3063
3064	spin_lock_irqsave(&cm_id_priv->lock, flags);
3065	switch (cm_id_priv->id.state) {
3066	case IB_CM_REQ_RCVD:
3067	case IB_CM_MRA_REQ_SENT:
3068	case IB_CM_REP_RCVD:
3069	case IB_CM_MRA_REP_SENT:
3070	case IB_CM_REP_SENT:
3071	case IB_CM_MRA_REP_RCVD:
3072	case IB_CM_ESTABLISHED:
3073		*qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
3074				IB_QP_DEST_QPN | IB_QP_RQ_PSN;
3075		qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3076		qp_attr->path_mtu = cm_id_priv->path_mtu;
3077		qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3078		qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
3079		if (cm_id_priv->qp_type == IB_QPT_RC) {
3080			*qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
3081					 IB_QP_MIN_RNR_TIMER;
3082			qp_attr->max_dest_rd_atomic =
3083					cm_id_priv->responder_resources;
3084			qp_attr->min_rnr_timer = 0;
3085		}
3086		if (cm_id_priv->alt_av.ah_attr.dlid) {
3087			*qp_attr_mask |= IB_QP_ALT_PATH;
3088			qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3089		}
3090		ret = 0;
3091		break;
3092	default:
3093		ret = -EINVAL;
3094		break;
3095	}
3096	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3097	return ret;
3098}
3099
3100static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3101			       struct ib_qp_attr *qp_attr,
3102			       int *qp_attr_mask)
3103{
3104	unsigned long flags;
3105	int ret;
3106
3107	spin_lock_irqsave(&cm_id_priv->lock, flags);
3108	switch (cm_id_priv->id.state) {
3109	case IB_CM_REP_RCVD:
3110	case IB_CM_MRA_REP_SENT:
3111	case IB_CM_REP_SENT:
3112	case IB_CM_MRA_REP_RCVD:
3113	case IB_CM_ESTABLISHED:
3114		*qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
3115		qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
3116		if (cm_id_priv->qp_type == IB_QPT_RC) {
3117			*qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
3118					 IB_QP_RNR_RETRY |
3119					 IB_QP_MAX_QP_RD_ATOMIC;
3120			qp_attr->timeout = cm_id_priv->local_ack_timeout;
3121			qp_attr->retry_cnt = cm_id_priv->retry_count;
3122			qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
3123			qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
3124		}
3125		if (cm_id_priv->alt_av.ah_attr.dlid) {
3126			*qp_attr_mask |= IB_QP_PATH_MIG_STATE;
3127			qp_attr->path_mig_state = IB_MIG_REARM;
3128		}
3129		ret = 0;
3130		break;
3131	default:
3132		ret = -EINVAL;
3133		break;
3134	}
3135	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3136	return ret;
3137}
3138
3139int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
3140		       struct ib_qp_attr *qp_attr,
3141		       int *qp_attr_mask)
3142{
3143	struct cm_id_private *cm_id_priv;
3144	int ret;
3145
3146	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3147	switch (qp_attr->qp_state) {
3148	case IB_QPS_INIT:
3149		ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
3150		break;
3151	case IB_QPS_RTR:
3152		ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
3153		break;
3154	case IB_QPS_RTS:
3155		ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
3156		break;
3157	default:
3158		ret = -EINVAL;
3159		break;
3160	}
3161	return ret;
3162}
3163EXPORT_SYMBOL(ib_cm_init_qp_attr);
3164
3165static __be64 cm_get_ca_guid(struct ib_device *device)
3166{
3167	struct ib_device_attr *device_attr;
3168	__be64 guid;
3169	int ret;
3170
3171	device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
3172	if (!device_attr)
3173		return 0;
3174
3175	ret = ib_query_device(device, device_attr);
3176	guid = ret ? 0 : device_attr->node_guid;
3177	kfree(device_attr);
3178	return guid;
3179}
3180
3181static void cm_add_one(struct ib_device *device)
3182{
3183	struct cm_device *cm_dev;
3184	struct cm_port *port;
3185	struct ib_mad_reg_req reg_req = {
3186		.mgmt_class = IB_MGMT_CLASS_CM,
3187		.mgmt_class_version = IB_CM_CLASS_VERSION
3188	};
3189	struct ib_port_modify port_modify = {
3190		.set_port_cap_mask = IB_PORT_CM_SUP
3191	};
3192	unsigned long flags;
3193	int ret;
3194	u8 i;
3195
3196	cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) *
3197			 device->phys_port_cnt, GFP_KERNEL);
3198	if (!cm_dev)
3199		return;
3200
3201	cm_dev->device = device;
3202	cm_dev->ca_guid = cm_get_ca_guid(device);
3203	if (!cm_dev->ca_guid)
3204		goto error1;
3205
3206	set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
3207	for (i = 1; i <= device->phys_port_cnt; i++) {
3208		port = &cm_dev->port[i-1];
3209		port->cm_dev = cm_dev;
3210		port->port_num = i;
3211		port->mad_agent = ib_register_mad_agent(device, i,
3212							IB_QPT_GSI,
3213							&reg_req,
3214							0,
3215							cm_send_handler,
3216							cm_recv_handler,
3217							port);
3218		if (IS_ERR(port->mad_agent))
3219			goto error2;
3220
3221		ret = ib_modify_port(device, i, 0, &port_modify);
3222		if (ret)
3223			goto error3;
3224	}
3225	ib_set_client_data(device, &cm_client, cm_dev);
3226
3227	write_lock_irqsave(&cm.device_lock, flags);
3228	list_add_tail(&cm_dev->list, &cm.device_list);
3229	write_unlock_irqrestore(&cm.device_lock, flags);
3230	return;
3231
3232error3:
3233	ib_unregister_mad_agent(port->mad_agent);
3234error2:
3235	port_modify.set_port_cap_mask = 0;
3236	port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
3237	while (--i) {
3238		port = &cm_dev->port[i-1];
3239		ib_modify_port(device, port->port_num, 0, &port_modify);
3240		ib_unregister_mad_agent(port->mad_agent);
3241	}
3242error1:
3243	kfree(cm_dev);
3244}
3245
3246static void cm_remove_one(struct ib_device *device)
3247{
3248	struct cm_device *cm_dev;
3249	struct cm_port *port;
3250	struct ib_port_modify port_modify = {
3251		.clr_port_cap_mask = IB_PORT_CM_SUP
3252	};
3253	unsigned long flags;
3254	int i;
3255
3256	cm_dev = ib_get_client_data(device, &cm_client);
3257	if (!cm_dev)
3258		return;
3259
3260	write_lock_irqsave(&cm.device_lock, flags);
3261	list_del(&cm_dev->list);
3262	write_unlock_irqrestore(&cm.device_lock, flags);
3263
3264	for (i = 1; i <= device->phys_port_cnt; i++) {
3265		port = &cm_dev->port[i-1];
3266		ib_modify_port(device, port->port_num, 0, &port_modify);
3267		ib_unregister_mad_agent(port->mad_agent);
3268	}
3269	kfree(cm_dev);
3270}
3271
3272static int __init ib_cm_init(void)
3273{
3274	int ret;
3275
3276	memset(&cm, 0, sizeof cm);
3277	INIT_LIST_HEAD(&cm.device_list);
3278	rwlock_init(&cm.device_lock);
3279	spin_lock_init(&cm.lock);
3280	cm.listen_service_table = RB_ROOT;
3281	cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
3282	cm.remote_id_table = RB_ROOT;
3283	cm.remote_qp_table = RB_ROOT;
3284	cm.remote_sidr_table = RB_ROOT;
3285	idr_init(&cm.local_id_table);
3286	idr_pre_get(&cm.local_id_table, GFP_KERNEL);
3287
3288	cm.wq = create_workqueue("ib_cm");
3289	if (!cm.wq)
3290		return -ENOMEM;
3291
3292	ret = ib_register_client(&cm_client);
3293	if (ret)
3294		goto error;
3295
3296	return 0;
3297error:
3298	destroy_workqueue(cm.wq);
3299	return ret;
3300}
3301
3302static void __exit ib_cm_cleanup(void)
3303{
3304	flush_workqueue(cm.wq);
3305	destroy_workqueue(cm.wq);
3306	ib_unregister_client(&cm_client);
3307	idr_destroy(&cm.local_id_table);
3308}
3309
3310module_init(ib_cm_init);
3311module_exit(ib_cm_cleanup);
3312
3313