cm.c revision e971b8cd19d39366b9fdc9eadafec988d785264d
1/*
2 * Copyright (c) 2004-2006 Intel Corporation.  All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation.  All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses.  You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 *     Redistribution and use in source and binary forms, with or
14 *     without modification, are permitted provided that the following
15 *     conditions are met:
16 *
17 *      - Redistributions of source code must retain the above
18 *        copyright notice, this list of conditions and the following
19 *        disclaimer.
20 *
21 *      - Redistributions in binary form must reproduce the above
22 *        copyright notice, this list of conditions and the following
23 *        disclaimer in the documentation and/or other materials
24 *        provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 *
35 * $Id: cm.c 4311 2005-12-05 18:42:01Z sean.hefty $
36 */
37
38#include <linux/completion.h>
39#include <linux/dma-mapping.h>
40#include <linux/err.h>
41#include <linux/idr.h>
42#include <linux/interrupt.h>
43#include <linux/pci.h>
44#include <linux/random.h>
45#include <linux/rbtree.h>
46#include <linux/spinlock.h>
47#include <linux/workqueue.h>
48
49#include <rdma/ib_cache.h>
50#include <rdma/ib_cm.h>
51#include "cm_msgs.h"
52
53MODULE_AUTHOR("Sean Hefty");
54MODULE_DESCRIPTION("InfiniBand CM");
55MODULE_LICENSE("Dual BSD/GPL");
56
57static void cm_add_one(struct ib_device *device);
58static void cm_remove_one(struct ib_device *device);
59
60static struct ib_client cm_client = {
61	.name   = "cm",
62	.add    = cm_add_one,
63	.remove = cm_remove_one
64};
65
66static struct ib_cm {
67	spinlock_t lock;
68	struct list_head device_list;
69	rwlock_t device_lock;
70	struct rb_root listen_service_table;
71	u64 listen_service_id;
72	/* struct rb_root peer_service_table; todo: fix peer to peer */
73	struct rb_root remote_qp_table;
74	struct rb_root remote_id_table;
75	struct rb_root remote_sidr_table;
76	struct idr local_id_table;
77	__be32 random_id_operand;
78	struct list_head timewait_list;
79	struct workqueue_struct *wq;
80} cm;
81
82struct cm_port {
83	struct cm_device *cm_dev;
84	struct ib_mad_agent *mad_agent;
85	u8 port_num;
86};
87
88struct cm_device {
89	struct list_head list;
90	struct ib_device *device;
91	struct cm_port port[0];
92};
93
94struct cm_av {
95	struct cm_port *port;
96	union ib_gid dgid;
97	struct ib_ah_attr ah_attr;
98	u16 pkey_index;
99	u8 packet_life_time;
100};
101
102struct cm_work {
103	struct delayed_work work;
104	struct list_head list;
105	struct cm_port *port;
106	struct ib_mad_recv_wc *mad_recv_wc;	/* Received MADs */
107	__be32 local_id;			/* Established / timewait */
108	__be32 remote_id;
109	struct ib_cm_event cm_event;
110	struct ib_sa_path_rec path[0];
111};
112
113struct cm_timewait_info {
114	struct cm_work work;			/* Must be first. */
115	struct list_head list;
116	struct rb_node remote_qp_node;
117	struct rb_node remote_id_node;
118	__be64 remote_ca_guid;
119	__be32 remote_qpn;
120	u8 inserted_remote_qp;
121	u8 inserted_remote_id;
122};
123
124struct cm_id_private {
125	struct ib_cm_id	id;
126
127	struct rb_node service_node;
128	struct rb_node sidr_id_node;
129	spinlock_t lock;	/* Do not acquire inside cm.lock */
130	struct completion comp;
131	atomic_t refcount;
132
133	struct ib_mad_send_buf *msg;
134	struct cm_timewait_info *timewait_info;
135	/* todo: use alternate port on send failure */
136	struct cm_av av;
137	struct cm_av alt_av;
138	struct ib_cm_compare_data *compare_data;
139
140	void *private_data;
141	__be64 tid;
142	__be32 local_qpn;
143	__be32 remote_qpn;
144	enum ib_qp_type qp_type;
145	__be32 sq_psn;
146	__be32 rq_psn;
147	int timeout_ms;
148	enum ib_mtu path_mtu;
149	__be16 pkey;
150	u8 private_data_len;
151	u8 max_cm_retries;
152	u8 peer_to_peer;
153	u8 responder_resources;
154	u8 initiator_depth;
155	u8 retry_count;
156	u8 rnr_retry_count;
157	u8 service_timeout;
158
159	struct list_head work_list;
160	atomic_t work_count;
161};
162
163static void cm_work_handler(struct work_struct *work);
164
165static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
166{
167	if (atomic_dec_and_test(&cm_id_priv->refcount))
168		complete(&cm_id_priv->comp);
169}
170
171static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
172			struct ib_mad_send_buf **msg)
173{
174	struct ib_mad_agent *mad_agent;
175	struct ib_mad_send_buf *m;
176	struct ib_ah *ah;
177
178	mad_agent = cm_id_priv->av.port->mad_agent;
179	ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
180	if (IS_ERR(ah))
181		return PTR_ERR(ah);
182
183	m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
184			       cm_id_priv->av.pkey_index,
185			       0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
186			       GFP_ATOMIC);
187	if (IS_ERR(m)) {
188		ib_destroy_ah(ah);
189		return PTR_ERR(m);
190	}
191
192	/* Timeout set by caller if response is expected. */
193	m->ah = ah;
194	m->retries = cm_id_priv->max_cm_retries;
195
196	atomic_inc(&cm_id_priv->refcount);
197	m->context[0] = cm_id_priv;
198	*msg = m;
199	return 0;
200}
201
202static int cm_alloc_response_msg(struct cm_port *port,
203				 struct ib_mad_recv_wc *mad_recv_wc,
204				 struct ib_mad_send_buf **msg)
205{
206	struct ib_mad_send_buf *m;
207	struct ib_ah *ah;
208
209	ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
210				  mad_recv_wc->recv_buf.grh, port->port_num);
211	if (IS_ERR(ah))
212		return PTR_ERR(ah);
213
214	m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
215			       0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
216			       GFP_ATOMIC);
217	if (IS_ERR(m)) {
218		ib_destroy_ah(ah);
219		return PTR_ERR(m);
220	}
221	m->ah = ah;
222	*msg = m;
223	return 0;
224}
225
226static void cm_free_msg(struct ib_mad_send_buf *msg)
227{
228	ib_destroy_ah(msg->ah);
229	if (msg->context[0])
230		cm_deref_id(msg->context[0]);
231	ib_free_send_mad(msg);
232}
233
234static void * cm_copy_private_data(const void *private_data,
235				   u8 private_data_len)
236{
237	void *data;
238
239	if (!private_data || !private_data_len)
240		return NULL;
241
242	data = kmemdup(private_data, private_data_len, GFP_KERNEL);
243	if (!data)
244		return ERR_PTR(-ENOMEM);
245
246	return data;
247}
248
249static void cm_set_private_data(struct cm_id_private *cm_id_priv,
250				 void *private_data, u8 private_data_len)
251{
252	if (cm_id_priv->private_data && cm_id_priv->private_data_len)
253		kfree(cm_id_priv->private_data);
254
255	cm_id_priv->private_data = private_data;
256	cm_id_priv->private_data_len = private_data_len;
257}
258
259static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
260				    struct ib_grh *grh, struct cm_av *av)
261{
262	av->port = port;
263	av->pkey_index = wc->pkey_index;
264	ib_init_ah_from_wc(port->cm_dev->device, port->port_num, wc,
265			   grh, &av->ah_attr);
266}
267
268static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
269{
270	struct cm_device *cm_dev;
271	struct cm_port *port = NULL;
272	unsigned long flags;
273	int ret;
274	u8 p;
275
276	read_lock_irqsave(&cm.device_lock, flags);
277	list_for_each_entry(cm_dev, &cm.device_list, list) {
278		if (!ib_find_cached_gid(cm_dev->device, &path->sgid,
279					&p, NULL)) {
280			port = &cm_dev->port[p-1];
281			break;
282		}
283	}
284	read_unlock_irqrestore(&cm.device_lock, flags);
285
286	if (!port)
287		return -EINVAL;
288
289	ret = ib_find_cached_pkey(cm_dev->device, port->port_num,
290				  be16_to_cpu(path->pkey), &av->pkey_index);
291	if (ret)
292		return ret;
293
294	av->port = port;
295	ib_init_ah_from_path(cm_dev->device, port->port_num, path,
296			     &av->ah_attr);
297	av->packet_life_time = path->packet_life_time;
298	return 0;
299}
300
301static int cm_alloc_id(struct cm_id_private *cm_id_priv)
302{
303	unsigned long flags;
304	int ret, id;
305	static int next_id;
306
307	do {
308		spin_lock_irqsave(&cm.lock, flags);
309		ret = idr_get_new_above(&cm.local_id_table, cm_id_priv,
310					next_id++, &id);
311		spin_unlock_irqrestore(&cm.lock, flags);
312	} while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
313
314	cm_id_priv->id.local_id = (__force __be32) (id ^ cm.random_id_operand);
315	return ret;
316}
317
318static void cm_free_id(__be32 local_id)
319{
320	unsigned long flags;
321
322	spin_lock_irqsave(&cm.lock, flags);
323	idr_remove(&cm.local_id_table,
324		   (__force int) (local_id ^ cm.random_id_operand));
325	spin_unlock_irqrestore(&cm.lock, flags);
326}
327
328static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
329{
330	struct cm_id_private *cm_id_priv;
331
332	cm_id_priv = idr_find(&cm.local_id_table,
333			      (__force int) (local_id ^ cm.random_id_operand));
334	if (cm_id_priv) {
335		if (cm_id_priv->id.remote_id == remote_id)
336			atomic_inc(&cm_id_priv->refcount);
337		else
338			cm_id_priv = NULL;
339	}
340
341	return cm_id_priv;
342}
343
344static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
345{
346	struct cm_id_private *cm_id_priv;
347	unsigned long flags;
348
349	spin_lock_irqsave(&cm.lock, flags);
350	cm_id_priv = cm_get_id(local_id, remote_id);
351	spin_unlock_irqrestore(&cm.lock, flags);
352
353	return cm_id_priv;
354}
355
356static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask)
357{
358	int i;
359
360	for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++)
361		((unsigned long *) dst)[i] = ((unsigned long *) src)[i] &
362					     ((unsigned long *) mask)[i];
363}
364
365static int cm_compare_data(struct ib_cm_compare_data *src_data,
366			   struct ib_cm_compare_data *dst_data)
367{
368	u8 src[IB_CM_COMPARE_SIZE];
369	u8 dst[IB_CM_COMPARE_SIZE];
370
371	if (!src_data || !dst_data)
372		return 0;
373
374	cm_mask_copy(src, src_data->data, dst_data->mask);
375	cm_mask_copy(dst, dst_data->data, src_data->mask);
376	return memcmp(src, dst, IB_CM_COMPARE_SIZE);
377}
378
379static int cm_compare_private_data(u8 *private_data,
380				   struct ib_cm_compare_data *dst_data)
381{
382	u8 src[IB_CM_COMPARE_SIZE];
383
384	if (!dst_data)
385		return 0;
386
387	cm_mask_copy(src, private_data, dst_data->mask);
388	return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE);
389}
390
391static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
392{
393	struct rb_node **link = &cm.listen_service_table.rb_node;
394	struct rb_node *parent = NULL;
395	struct cm_id_private *cur_cm_id_priv;
396	__be64 service_id = cm_id_priv->id.service_id;
397	__be64 service_mask = cm_id_priv->id.service_mask;
398	int data_cmp;
399
400	while (*link) {
401		parent = *link;
402		cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
403					  service_node);
404		data_cmp = cm_compare_data(cm_id_priv->compare_data,
405					   cur_cm_id_priv->compare_data);
406		if ((cur_cm_id_priv->id.service_mask & service_id) ==
407		    (service_mask & cur_cm_id_priv->id.service_id) &&
408		    (cm_id_priv->id.device == cur_cm_id_priv->id.device) &&
409		    !data_cmp)
410			return cur_cm_id_priv;
411
412		if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
413			link = &(*link)->rb_left;
414		else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
415			link = &(*link)->rb_right;
416		else if (service_id < cur_cm_id_priv->id.service_id)
417			link = &(*link)->rb_left;
418		else if (service_id > cur_cm_id_priv->id.service_id)
419			link = &(*link)->rb_right;
420		else if (data_cmp < 0)
421			link = &(*link)->rb_left;
422		else
423			link = &(*link)->rb_right;
424	}
425	rb_link_node(&cm_id_priv->service_node, parent, link);
426	rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
427	return NULL;
428}
429
430static struct cm_id_private * cm_find_listen(struct ib_device *device,
431					     __be64 service_id,
432					     u8 *private_data)
433{
434	struct rb_node *node = cm.listen_service_table.rb_node;
435	struct cm_id_private *cm_id_priv;
436	int data_cmp;
437
438	while (node) {
439		cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
440		data_cmp = cm_compare_private_data(private_data,
441						   cm_id_priv->compare_data);
442		if ((cm_id_priv->id.service_mask & service_id) ==
443		     cm_id_priv->id.service_id &&
444		    (cm_id_priv->id.device == device) && !data_cmp)
445			return cm_id_priv;
446
447		if (device < cm_id_priv->id.device)
448			node = node->rb_left;
449		else if (device > cm_id_priv->id.device)
450			node = node->rb_right;
451		else if (service_id < cm_id_priv->id.service_id)
452			node = node->rb_left;
453		else if (service_id > cm_id_priv->id.service_id)
454			node = node->rb_right;
455		else if (data_cmp < 0)
456			node = node->rb_left;
457		else
458			node = node->rb_right;
459	}
460	return NULL;
461}
462
463static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
464						     *timewait_info)
465{
466	struct rb_node **link = &cm.remote_id_table.rb_node;
467	struct rb_node *parent = NULL;
468	struct cm_timewait_info *cur_timewait_info;
469	__be64 remote_ca_guid = timewait_info->remote_ca_guid;
470	__be32 remote_id = timewait_info->work.remote_id;
471
472	while (*link) {
473		parent = *link;
474		cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
475					     remote_id_node);
476		if (remote_id < cur_timewait_info->work.remote_id)
477			link = &(*link)->rb_left;
478		else if (remote_id > cur_timewait_info->work.remote_id)
479			link = &(*link)->rb_right;
480		else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
481			link = &(*link)->rb_left;
482		else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
483			link = &(*link)->rb_right;
484		else
485			return cur_timewait_info;
486	}
487	timewait_info->inserted_remote_id = 1;
488	rb_link_node(&timewait_info->remote_id_node, parent, link);
489	rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
490	return NULL;
491}
492
493static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
494						   __be32 remote_id)
495{
496	struct rb_node *node = cm.remote_id_table.rb_node;
497	struct cm_timewait_info *timewait_info;
498
499	while (node) {
500		timewait_info = rb_entry(node, struct cm_timewait_info,
501					 remote_id_node);
502		if (remote_id < timewait_info->work.remote_id)
503			node = node->rb_left;
504		else if (remote_id > timewait_info->work.remote_id)
505			node = node->rb_right;
506		else if (remote_ca_guid < timewait_info->remote_ca_guid)
507			node = node->rb_left;
508		else if (remote_ca_guid > timewait_info->remote_ca_guid)
509			node = node->rb_right;
510		else
511			return timewait_info;
512	}
513	return NULL;
514}
515
516static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
517						      *timewait_info)
518{
519	struct rb_node **link = &cm.remote_qp_table.rb_node;
520	struct rb_node *parent = NULL;
521	struct cm_timewait_info *cur_timewait_info;
522	__be64 remote_ca_guid = timewait_info->remote_ca_guid;
523	__be32 remote_qpn = timewait_info->remote_qpn;
524
525	while (*link) {
526		parent = *link;
527		cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
528					     remote_qp_node);
529		if (remote_qpn < cur_timewait_info->remote_qpn)
530			link = &(*link)->rb_left;
531		else if (remote_qpn > cur_timewait_info->remote_qpn)
532			link = &(*link)->rb_right;
533		else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
534			link = &(*link)->rb_left;
535		else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
536			link = &(*link)->rb_right;
537		else
538			return cur_timewait_info;
539	}
540	timewait_info->inserted_remote_qp = 1;
541	rb_link_node(&timewait_info->remote_qp_node, parent, link);
542	rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
543	return NULL;
544}
545
546static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
547						    *cm_id_priv)
548{
549	struct rb_node **link = &cm.remote_sidr_table.rb_node;
550	struct rb_node *parent = NULL;
551	struct cm_id_private *cur_cm_id_priv;
552	union ib_gid *port_gid = &cm_id_priv->av.dgid;
553	__be32 remote_id = cm_id_priv->id.remote_id;
554
555	while (*link) {
556		parent = *link;
557		cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
558					  sidr_id_node);
559		if (remote_id < cur_cm_id_priv->id.remote_id)
560			link = &(*link)->rb_left;
561		else if (remote_id > cur_cm_id_priv->id.remote_id)
562			link = &(*link)->rb_right;
563		else {
564			int cmp;
565			cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
566				     sizeof *port_gid);
567			if (cmp < 0)
568				link = &(*link)->rb_left;
569			else if (cmp > 0)
570				link = &(*link)->rb_right;
571			else
572				return cur_cm_id_priv;
573		}
574	}
575	rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
576	rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
577	return NULL;
578}
579
580static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
581			       enum ib_cm_sidr_status status)
582{
583	struct ib_cm_sidr_rep_param param;
584
585	memset(&param, 0, sizeof param);
586	param.status = status;
587	ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
588}
589
590struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
591				 ib_cm_handler cm_handler,
592				 void *context)
593{
594	struct cm_id_private *cm_id_priv;
595	int ret;
596
597	cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
598	if (!cm_id_priv)
599		return ERR_PTR(-ENOMEM);
600
601	cm_id_priv->id.state = IB_CM_IDLE;
602	cm_id_priv->id.device = device;
603	cm_id_priv->id.cm_handler = cm_handler;
604	cm_id_priv->id.context = context;
605	cm_id_priv->id.remote_cm_qpn = 1;
606	ret = cm_alloc_id(cm_id_priv);
607	if (ret)
608		goto error;
609
610	spin_lock_init(&cm_id_priv->lock);
611	init_completion(&cm_id_priv->comp);
612	INIT_LIST_HEAD(&cm_id_priv->work_list);
613	atomic_set(&cm_id_priv->work_count, -1);
614	atomic_set(&cm_id_priv->refcount, 1);
615	return &cm_id_priv->id;
616
617error:
618	kfree(cm_id_priv);
619	return ERR_PTR(-ENOMEM);
620}
621EXPORT_SYMBOL(ib_create_cm_id);
622
623static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
624{
625	struct cm_work *work;
626
627	if (list_empty(&cm_id_priv->work_list))
628		return NULL;
629
630	work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
631	list_del(&work->list);
632	return work;
633}
634
635static void cm_free_work(struct cm_work *work)
636{
637	if (work->mad_recv_wc)
638		ib_free_recv_mad(work->mad_recv_wc);
639	kfree(work);
640}
641
642static inline int cm_convert_to_ms(int iba_time)
643{
644	/* approximate conversion to ms from 4.096us x 2^iba_time */
645	return 1 << max(iba_time - 8, 0);
646}
647
648static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
649{
650	if (timewait_info->inserted_remote_id) {
651		rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
652		timewait_info->inserted_remote_id = 0;
653	}
654
655	if (timewait_info->inserted_remote_qp) {
656		rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
657		timewait_info->inserted_remote_qp = 0;
658	}
659}
660
661static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
662{
663	struct cm_timewait_info *timewait_info;
664
665	timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
666	if (!timewait_info)
667		return ERR_PTR(-ENOMEM);
668
669	timewait_info->work.local_id = local_id;
670	INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
671	timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
672	return timewait_info;
673}
674
675static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
676{
677	int wait_time;
678	unsigned long flags;
679
680	spin_lock_irqsave(&cm.lock, flags);
681	cm_cleanup_timewait(cm_id_priv->timewait_info);
682	list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
683	spin_unlock_irqrestore(&cm.lock, flags);
684
685	/*
686	 * The cm_id could be destroyed by the user before we exit timewait.
687	 * To protect against this, we search for the cm_id after exiting
688	 * timewait before notifying the user that we've exited timewait.
689	 */
690	cm_id_priv->id.state = IB_CM_TIMEWAIT;
691	wait_time = cm_convert_to_ms(cm_id_priv->av.packet_life_time + 1);
692	queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
693			   msecs_to_jiffies(wait_time));
694	cm_id_priv->timewait_info = NULL;
695}
696
697static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
698{
699	unsigned long flags;
700
701	cm_id_priv->id.state = IB_CM_IDLE;
702	if (cm_id_priv->timewait_info) {
703		spin_lock_irqsave(&cm.lock, flags);
704		cm_cleanup_timewait(cm_id_priv->timewait_info);
705		spin_unlock_irqrestore(&cm.lock, flags);
706		kfree(cm_id_priv->timewait_info);
707		cm_id_priv->timewait_info = NULL;
708	}
709}
710
711static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
712{
713	struct cm_id_private *cm_id_priv;
714	struct cm_work *work;
715	unsigned long flags;
716
717	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
718retest:
719	spin_lock_irqsave(&cm_id_priv->lock, flags);
720	switch (cm_id->state) {
721	case IB_CM_LISTEN:
722		cm_id->state = IB_CM_IDLE;
723		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
724		spin_lock_irqsave(&cm.lock, flags);
725		rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
726		spin_unlock_irqrestore(&cm.lock, flags);
727		break;
728	case IB_CM_SIDR_REQ_SENT:
729		cm_id->state = IB_CM_IDLE;
730		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
731		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
732		break;
733	case IB_CM_SIDR_REQ_RCVD:
734		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
735		cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
736		break;
737	case IB_CM_REQ_SENT:
738		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
739		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
740		ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
741			       &cm_id_priv->id.device->node_guid,
742			       sizeof cm_id_priv->id.device->node_guid,
743			       NULL, 0);
744		break;
745	case IB_CM_REQ_RCVD:
746		if (err == -ENOMEM) {
747			/* Do not reject to allow future retries. */
748			cm_reset_to_idle(cm_id_priv);
749			spin_unlock_irqrestore(&cm_id_priv->lock, flags);
750		} else {
751			spin_unlock_irqrestore(&cm_id_priv->lock, flags);
752			ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
753				       NULL, 0, NULL, 0);
754		}
755		break;
756	case IB_CM_MRA_REQ_RCVD:
757	case IB_CM_REP_SENT:
758	case IB_CM_MRA_REP_RCVD:
759		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
760		/* Fall through */
761	case IB_CM_MRA_REQ_SENT:
762	case IB_CM_REP_RCVD:
763	case IB_CM_MRA_REP_SENT:
764		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
765		ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
766			       NULL, 0, NULL, 0);
767		break;
768	case IB_CM_ESTABLISHED:
769		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
770		ib_send_cm_dreq(cm_id, NULL, 0);
771		goto retest;
772	case IB_CM_DREQ_SENT:
773		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
774		cm_enter_timewait(cm_id_priv);
775		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
776		break;
777	case IB_CM_DREQ_RCVD:
778		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
779		ib_send_cm_drep(cm_id, NULL, 0);
780		break;
781	default:
782		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
783		break;
784	}
785
786	cm_free_id(cm_id->local_id);
787	cm_deref_id(cm_id_priv);
788	wait_for_completion(&cm_id_priv->comp);
789	while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
790		cm_free_work(work);
791	kfree(cm_id_priv->compare_data);
792	kfree(cm_id_priv->private_data);
793	kfree(cm_id_priv);
794}
795
796void ib_destroy_cm_id(struct ib_cm_id *cm_id)
797{
798	cm_destroy_id(cm_id, 0);
799}
800EXPORT_SYMBOL(ib_destroy_cm_id);
801
802int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
803		 struct ib_cm_compare_data *compare_data)
804{
805	struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
806	unsigned long flags;
807	int ret = 0;
808
809	service_mask = service_mask ? service_mask :
810		       __constant_cpu_to_be64(~0ULL);
811	service_id &= service_mask;
812	if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
813	    (service_id != IB_CM_ASSIGN_SERVICE_ID))
814		return -EINVAL;
815
816	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
817	if (cm_id->state != IB_CM_IDLE)
818		return -EINVAL;
819
820	if (compare_data) {
821		cm_id_priv->compare_data = kzalloc(sizeof *compare_data,
822						   GFP_KERNEL);
823		if (!cm_id_priv->compare_data)
824			return -ENOMEM;
825		cm_mask_copy(cm_id_priv->compare_data->data,
826			     compare_data->data, compare_data->mask);
827		memcpy(cm_id_priv->compare_data->mask, compare_data->mask,
828		       IB_CM_COMPARE_SIZE);
829	}
830
831	cm_id->state = IB_CM_LISTEN;
832
833	spin_lock_irqsave(&cm.lock, flags);
834	if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
835		cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
836		cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
837	} else {
838		cm_id->service_id = service_id;
839		cm_id->service_mask = service_mask;
840	}
841	cur_cm_id_priv = cm_insert_listen(cm_id_priv);
842	spin_unlock_irqrestore(&cm.lock, flags);
843
844	if (cur_cm_id_priv) {
845		cm_id->state = IB_CM_IDLE;
846		kfree(cm_id_priv->compare_data);
847		cm_id_priv->compare_data = NULL;
848		ret = -EBUSY;
849	}
850	return ret;
851}
852EXPORT_SYMBOL(ib_cm_listen);
853
854static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
855			  enum cm_msg_sequence msg_seq)
856{
857	u64 hi_tid, low_tid;
858
859	hi_tid   = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
860	low_tid  = (u64) ((__force u32)cm_id_priv->id.local_id |
861			  (msg_seq << 30));
862	return cpu_to_be64(hi_tid | low_tid);
863}
864
865static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
866			      __be16 attr_id, __be64 tid)
867{
868	hdr->base_version  = IB_MGMT_BASE_VERSION;
869	hdr->mgmt_class	   = IB_MGMT_CLASS_CM;
870	hdr->class_version = IB_CM_CLASS_VERSION;
871	hdr->method	   = IB_MGMT_METHOD_SEND;
872	hdr->attr_id	   = attr_id;
873	hdr->tid	   = tid;
874}
875
876static void cm_format_req(struct cm_req_msg *req_msg,
877			  struct cm_id_private *cm_id_priv,
878			  struct ib_cm_req_param *param)
879{
880	cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
881			  cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
882
883	req_msg->local_comm_id = cm_id_priv->id.local_id;
884	req_msg->service_id = param->service_id;
885	req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
886	cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
887	cm_req_set_resp_res(req_msg, param->responder_resources);
888	cm_req_set_init_depth(req_msg, param->initiator_depth);
889	cm_req_set_remote_resp_timeout(req_msg,
890				       param->remote_cm_response_timeout);
891	cm_req_set_qp_type(req_msg, param->qp_type);
892	cm_req_set_flow_ctrl(req_msg, param->flow_control);
893	cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
894	cm_req_set_local_resp_timeout(req_msg,
895				      param->local_cm_response_timeout);
896	cm_req_set_retry_count(req_msg, param->retry_count);
897	req_msg->pkey = param->primary_path->pkey;
898	cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
899	cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
900	cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
901	cm_req_set_srq(req_msg, param->srq);
902
903	req_msg->primary_local_lid = param->primary_path->slid;
904	req_msg->primary_remote_lid = param->primary_path->dlid;
905	req_msg->primary_local_gid = param->primary_path->sgid;
906	req_msg->primary_remote_gid = param->primary_path->dgid;
907	cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label);
908	cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate);
909	req_msg->primary_traffic_class = param->primary_path->traffic_class;
910	req_msg->primary_hop_limit = param->primary_path->hop_limit;
911	cm_req_set_primary_sl(req_msg, param->primary_path->sl);
912	cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */
913	cm_req_set_primary_local_ack_timeout(req_msg,
914		min(31, param->primary_path->packet_life_time + 1));
915
916	if (param->alternate_path) {
917		req_msg->alt_local_lid = param->alternate_path->slid;
918		req_msg->alt_remote_lid = param->alternate_path->dlid;
919		req_msg->alt_local_gid = param->alternate_path->sgid;
920		req_msg->alt_remote_gid = param->alternate_path->dgid;
921		cm_req_set_alt_flow_label(req_msg,
922					  param->alternate_path->flow_label);
923		cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate);
924		req_msg->alt_traffic_class = param->alternate_path->traffic_class;
925		req_msg->alt_hop_limit = param->alternate_path->hop_limit;
926		cm_req_set_alt_sl(req_msg, param->alternate_path->sl);
927		cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */
928		cm_req_set_alt_local_ack_timeout(req_msg,
929			min(31, param->alternate_path->packet_life_time + 1));
930	}
931
932	if (param->private_data && param->private_data_len)
933		memcpy(req_msg->private_data, param->private_data,
934		       param->private_data_len);
935}
936
937static int cm_validate_req_param(struct ib_cm_req_param *param)
938{
939	/* peer-to-peer not supported */
940	if (param->peer_to_peer)
941		return -EINVAL;
942
943	if (!param->primary_path)
944		return -EINVAL;
945
946	if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC)
947		return -EINVAL;
948
949	if (param->private_data &&
950	    param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
951		return -EINVAL;
952
953	if (param->alternate_path &&
954	    (param->alternate_path->pkey != param->primary_path->pkey ||
955	     param->alternate_path->mtu != param->primary_path->mtu))
956		return -EINVAL;
957
958	return 0;
959}
960
961int ib_send_cm_req(struct ib_cm_id *cm_id,
962		   struct ib_cm_req_param *param)
963{
964	struct cm_id_private *cm_id_priv;
965	struct cm_req_msg *req_msg;
966	unsigned long flags;
967	int ret;
968
969	ret = cm_validate_req_param(param);
970	if (ret)
971		return ret;
972
973	/* Verify that we're not in timewait. */
974	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
975	spin_lock_irqsave(&cm_id_priv->lock, flags);
976	if (cm_id->state != IB_CM_IDLE) {
977		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
978		ret = -EINVAL;
979		goto out;
980	}
981	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
982
983	cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
984							    id.local_id);
985	if (IS_ERR(cm_id_priv->timewait_info)) {
986		ret = PTR_ERR(cm_id_priv->timewait_info);
987		goto out;
988	}
989
990	ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
991	if (ret)
992		goto error1;
993	if (param->alternate_path) {
994		ret = cm_init_av_by_path(param->alternate_path,
995					 &cm_id_priv->alt_av);
996		if (ret)
997			goto error1;
998	}
999	cm_id->service_id = param->service_id;
1000	cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
1001	cm_id_priv->timeout_ms = cm_convert_to_ms(
1002				    param->primary_path->packet_life_time) * 2 +
1003				 cm_convert_to_ms(
1004				    param->remote_cm_response_timeout);
1005	cm_id_priv->max_cm_retries = param->max_cm_retries;
1006	cm_id_priv->initiator_depth = param->initiator_depth;
1007	cm_id_priv->responder_resources = param->responder_resources;
1008	cm_id_priv->retry_count = param->retry_count;
1009	cm_id_priv->path_mtu = param->primary_path->mtu;
1010	cm_id_priv->pkey = param->primary_path->pkey;
1011	cm_id_priv->qp_type = param->qp_type;
1012
1013	ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
1014	if (ret)
1015		goto error1;
1016
1017	req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
1018	cm_format_req(req_msg, cm_id_priv, param);
1019	cm_id_priv->tid = req_msg->hdr.tid;
1020	cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
1021	cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
1022
1023	cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
1024	cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
1025
1026	spin_lock_irqsave(&cm_id_priv->lock, flags);
1027	ret = ib_post_send_mad(cm_id_priv->msg, NULL);
1028	if (ret) {
1029		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1030		goto error2;
1031	}
1032	BUG_ON(cm_id->state != IB_CM_IDLE);
1033	cm_id->state = IB_CM_REQ_SENT;
1034	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1035	return 0;
1036
1037error2:	cm_free_msg(cm_id_priv->msg);
1038error1:	kfree(cm_id_priv->timewait_info);
1039out:	return ret;
1040}
1041EXPORT_SYMBOL(ib_send_cm_req);
1042
1043static int cm_issue_rej(struct cm_port *port,
1044			struct ib_mad_recv_wc *mad_recv_wc,
1045			enum ib_cm_rej_reason reason,
1046			enum cm_msg_response msg_rejected,
1047			void *ari, u8 ari_length)
1048{
1049	struct ib_mad_send_buf *msg = NULL;
1050	struct cm_rej_msg *rej_msg, *rcv_msg;
1051	int ret;
1052
1053	ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1054	if (ret)
1055		return ret;
1056
1057	/* We just need common CM header information.  Cast to any message. */
1058	rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1059	rej_msg = (struct cm_rej_msg *) msg->mad;
1060
1061	cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1062	rej_msg->remote_comm_id = rcv_msg->local_comm_id;
1063	rej_msg->local_comm_id = rcv_msg->remote_comm_id;
1064	cm_rej_set_msg_rejected(rej_msg, msg_rejected);
1065	rej_msg->reason = cpu_to_be16(reason);
1066
1067	if (ari && ari_length) {
1068		cm_rej_set_reject_info_len(rej_msg, ari_length);
1069		memcpy(rej_msg->ari, ari, ari_length);
1070	}
1071
1072	ret = ib_post_send_mad(msg, NULL);
1073	if (ret)
1074		cm_free_msg(msg);
1075
1076	return ret;
1077}
1078
1079static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
1080				    __be32 local_qpn, __be32 remote_qpn)
1081{
1082	return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
1083		((local_ca_guid == remote_ca_guid) &&
1084		 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
1085}
1086
1087static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1088					    struct ib_sa_path_rec *primary_path,
1089					    struct ib_sa_path_rec *alt_path)
1090{
1091	memset(primary_path, 0, sizeof *primary_path);
1092	primary_path->dgid = req_msg->primary_local_gid;
1093	primary_path->sgid = req_msg->primary_remote_gid;
1094	primary_path->dlid = req_msg->primary_local_lid;
1095	primary_path->slid = req_msg->primary_remote_lid;
1096	primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1097	primary_path->hop_limit = req_msg->primary_hop_limit;
1098	primary_path->traffic_class = req_msg->primary_traffic_class;
1099	primary_path->reversible = 1;
1100	primary_path->pkey = req_msg->pkey;
1101	primary_path->sl = cm_req_get_primary_sl(req_msg);
1102	primary_path->mtu_selector = IB_SA_EQ;
1103	primary_path->mtu = cm_req_get_path_mtu(req_msg);
1104	primary_path->rate_selector = IB_SA_EQ;
1105	primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1106	primary_path->packet_life_time_selector = IB_SA_EQ;
1107	primary_path->packet_life_time =
1108		cm_req_get_primary_local_ack_timeout(req_msg);
1109	primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1110
1111	if (req_msg->alt_local_lid) {
1112		memset(alt_path, 0, sizeof *alt_path);
1113		alt_path->dgid = req_msg->alt_local_gid;
1114		alt_path->sgid = req_msg->alt_remote_gid;
1115		alt_path->dlid = req_msg->alt_local_lid;
1116		alt_path->slid = req_msg->alt_remote_lid;
1117		alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1118		alt_path->hop_limit = req_msg->alt_hop_limit;
1119		alt_path->traffic_class = req_msg->alt_traffic_class;
1120		alt_path->reversible = 1;
1121		alt_path->pkey = req_msg->pkey;
1122		alt_path->sl = cm_req_get_alt_sl(req_msg);
1123		alt_path->mtu_selector = IB_SA_EQ;
1124		alt_path->mtu = cm_req_get_path_mtu(req_msg);
1125		alt_path->rate_selector = IB_SA_EQ;
1126		alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1127		alt_path->packet_life_time_selector = IB_SA_EQ;
1128		alt_path->packet_life_time =
1129			cm_req_get_alt_local_ack_timeout(req_msg);
1130		alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1131	}
1132}
1133
1134static void cm_format_req_event(struct cm_work *work,
1135				struct cm_id_private *cm_id_priv,
1136				struct ib_cm_id *listen_id)
1137{
1138	struct cm_req_msg *req_msg;
1139	struct ib_cm_req_event_param *param;
1140
1141	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1142	param = &work->cm_event.param.req_rcvd;
1143	param->listen_id = listen_id;
1144	param->port = cm_id_priv->av.port->port_num;
1145	param->primary_path = &work->path[0];
1146	if (req_msg->alt_local_lid)
1147		param->alternate_path = &work->path[1];
1148	else
1149		param->alternate_path = NULL;
1150	param->remote_ca_guid = req_msg->local_ca_guid;
1151	param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1152	param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1153	param->qp_type = cm_req_get_qp_type(req_msg);
1154	param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1155	param->responder_resources = cm_req_get_init_depth(req_msg);
1156	param->initiator_depth = cm_req_get_resp_res(req_msg);
1157	param->local_cm_response_timeout =
1158					cm_req_get_remote_resp_timeout(req_msg);
1159	param->flow_control = cm_req_get_flow_ctrl(req_msg);
1160	param->remote_cm_response_timeout =
1161					cm_req_get_local_resp_timeout(req_msg);
1162	param->retry_count = cm_req_get_retry_count(req_msg);
1163	param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1164	param->srq = cm_req_get_srq(req_msg);
1165	work->cm_event.private_data = &req_msg->private_data;
1166}
1167
1168static void cm_process_work(struct cm_id_private *cm_id_priv,
1169			    struct cm_work *work)
1170{
1171	unsigned long flags;
1172	int ret;
1173
1174	/* We will typically only have the current event to report. */
1175	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1176	cm_free_work(work);
1177
1178	while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1179		spin_lock_irqsave(&cm_id_priv->lock, flags);
1180		work = cm_dequeue_work(cm_id_priv);
1181		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1182		BUG_ON(!work);
1183		ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1184						&work->cm_event);
1185		cm_free_work(work);
1186	}
1187	cm_deref_id(cm_id_priv);
1188	if (ret)
1189		cm_destroy_id(&cm_id_priv->id, ret);
1190}
1191
1192static void cm_format_mra(struct cm_mra_msg *mra_msg,
1193			  struct cm_id_private *cm_id_priv,
1194			  enum cm_msg_response msg_mraed, u8 service_timeout,
1195			  const void *private_data, u8 private_data_len)
1196{
1197	cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1198	cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1199	mra_msg->local_comm_id = cm_id_priv->id.local_id;
1200	mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1201	cm_mra_set_service_timeout(mra_msg, service_timeout);
1202
1203	if (private_data && private_data_len)
1204		memcpy(mra_msg->private_data, private_data, private_data_len);
1205}
1206
1207static void cm_format_rej(struct cm_rej_msg *rej_msg,
1208			  struct cm_id_private *cm_id_priv,
1209			  enum ib_cm_rej_reason reason,
1210			  void *ari,
1211			  u8 ari_length,
1212			  const void *private_data,
1213			  u8 private_data_len)
1214{
1215	cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1216	rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1217
1218	switch(cm_id_priv->id.state) {
1219	case IB_CM_REQ_RCVD:
1220		rej_msg->local_comm_id = 0;
1221		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1222		break;
1223	case IB_CM_MRA_REQ_SENT:
1224		rej_msg->local_comm_id = cm_id_priv->id.local_id;
1225		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1226		break;
1227	case IB_CM_REP_RCVD:
1228	case IB_CM_MRA_REP_SENT:
1229		rej_msg->local_comm_id = cm_id_priv->id.local_id;
1230		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1231		break;
1232	default:
1233		rej_msg->local_comm_id = cm_id_priv->id.local_id;
1234		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1235		break;
1236	}
1237
1238	rej_msg->reason = cpu_to_be16(reason);
1239	if (ari && ari_length) {
1240		cm_rej_set_reject_info_len(rej_msg, ari_length);
1241		memcpy(rej_msg->ari, ari, ari_length);
1242	}
1243
1244	if (private_data && private_data_len)
1245		memcpy(rej_msg->private_data, private_data, private_data_len);
1246}
1247
1248static void cm_dup_req_handler(struct cm_work *work,
1249			       struct cm_id_private *cm_id_priv)
1250{
1251	struct ib_mad_send_buf *msg = NULL;
1252	unsigned long flags;
1253	int ret;
1254
1255	/* Quick state check to discard duplicate REQs. */
1256	if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1257		return;
1258
1259	ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1260	if (ret)
1261		return;
1262
1263	spin_lock_irqsave(&cm_id_priv->lock, flags);
1264	switch (cm_id_priv->id.state) {
1265	case IB_CM_MRA_REQ_SENT:
1266		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1267			      CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1268			      cm_id_priv->private_data,
1269			      cm_id_priv->private_data_len);
1270		break;
1271	case IB_CM_TIMEWAIT:
1272		cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1273			      IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1274		break;
1275	default:
1276		goto unlock;
1277	}
1278	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1279
1280	ret = ib_post_send_mad(msg, NULL);
1281	if (ret)
1282		goto free;
1283	return;
1284
1285unlock:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1286free:	cm_free_msg(msg);
1287}
1288
1289static struct cm_id_private * cm_match_req(struct cm_work *work,
1290					   struct cm_id_private *cm_id_priv)
1291{
1292	struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1293	struct cm_timewait_info *timewait_info;
1294	struct cm_req_msg *req_msg;
1295	unsigned long flags;
1296
1297	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1298
1299	/* Check for duplicate REQ and stale connections. */
1300	spin_lock_irqsave(&cm.lock, flags);
1301	timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1302	if (!timewait_info)
1303		timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1304
1305	if (timewait_info) {
1306		cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1307					   timewait_info->work.remote_id);
1308		cm_cleanup_timewait(cm_id_priv->timewait_info);
1309		spin_unlock_irqrestore(&cm.lock, flags);
1310		if (cur_cm_id_priv) {
1311			cm_dup_req_handler(work, cur_cm_id_priv);
1312			cm_deref_id(cur_cm_id_priv);
1313		} else
1314			cm_issue_rej(work->port, work->mad_recv_wc,
1315				     IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1316				     NULL, 0);
1317		listen_cm_id_priv = NULL;
1318		goto out;
1319	}
1320
1321	/* Find matching listen request. */
1322	listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
1323					   req_msg->service_id,
1324					   req_msg->private_data);
1325	if (!listen_cm_id_priv) {
1326		cm_cleanup_timewait(cm_id_priv->timewait_info);
1327		spin_unlock_irqrestore(&cm.lock, flags);
1328		cm_issue_rej(work->port, work->mad_recv_wc,
1329			     IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1330			     NULL, 0);
1331		goto out;
1332	}
1333	atomic_inc(&listen_cm_id_priv->refcount);
1334	atomic_inc(&cm_id_priv->refcount);
1335	cm_id_priv->id.state = IB_CM_REQ_RCVD;
1336	atomic_inc(&cm_id_priv->work_count);
1337	spin_unlock_irqrestore(&cm.lock, flags);
1338out:
1339	return listen_cm_id_priv;
1340}
1341
1342static int cm_req_handler(struct cm_work *work)
1343{
1344	struct ib_cm_id *cm_id;
1345	struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1346	struct cm_req_msg *req_msg;
1347	int ret;
1348
1349	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1350
1351	cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
1352	if (IS_ERR(cm_id))
1353		return PTR_ERR(cm_id);
1354
1355	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1356	cm_id_priv->id.remote_id = req_msg->local_comm_id;
1357	cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1358				work->mad_recv_wc->recv_buf.grh,
1359				&cm_id_priv->av);
1360	cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1361							    id.local_id);
1362	if (IS_ERR(cm_id_priv->timewait_info)) {
1363		ret = PTR_ERR(cm_id_priv->timewait_info);
1364		goto destroy;
1365	}
1366	cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1367	cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1368	cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1369
1370	listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1371	if (!listen_cm_id_priv) {
1372		ret = -EINVAL;
1373		kfree(cm_id_priv->timewait_info);
1374		goto destroy;
1375	}
1376
1377	cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1378	cm_id_priv->id.context = listen_cm_id_priv->id.context;
1379	cm_id_priv->id.service_id = req_msg->service_id;
1380	cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
1381
1382	cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1383	ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
1384	if (ret) {
1385		ib_get_cached_gid(work->port->cm_dev->device,
1386				  work->port->port_num, 0, &work->path[0].sgid);
1387		ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
1388			       &work->path[0].sgid, sizeof work->path[0].sgid,
1389			       NULL, 0);
1390		goto rejected;
1391	}
1392	if (req_msg->alt_local_lid) {
1393		ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
1394		if (ret) {
1395			ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
1396				       &work->path[0].sgid,
1397				       sizeof work->path[0].sgid, NULL, 0);
1398			goto rejected;
1399		}
1400	}
1401	cm_id_priv->tid = req_msg->hdr.tid;
1402	cm_id_priv->timeout_ms = cm_convert_to_ms(
1403					cm_req_get_local_resp_timeout(req_msg));
1404	cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
1405	cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
1406	cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1407	cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1408	cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1409	cm_id_priv->pkey = req_msg->pkey;
1410	cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1411	cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1412	cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1413	cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
1414
1415	cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
1416	cm_process_work(cm_id_priv, work);
1417	cm_deref_id(listen_cm_id_priv);
1418	return 0;
1419
1420rejected:
1421	atomic_dec(&cm_id_priv->refcount);
1422	cm_deref_id(listen_cm_id_priv);
1423destroy:
1424	ib_destroy_cm_id(cm_id);
1425	return ret;
1426}
1427
1428static void cm_format_rep(struct cm_rep_msg *rep_msg,
1429			  struct cm_id_private *cm_id_priv,
1430			  struct ib_cm_rep_param *param)
1431{
1432	cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
1433	rep_msg->local_comm_id = cm_id_priv->id.local_id;
1434	rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1435	cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
1436	cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1437	rep_msg->resp_resources = param->responder_resources;
1438	rep_msg->initiator_depth = param->initiator_depth;
1439	cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay);
1440	cm_rep_set_failover(rep_msg, param->failover_accepted);
1441	cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1442	cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1443	cm_rep_set_srq(rep_msg, param->srq);
1444	rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1445
1446	if (param->private_data && param->private_data_len)
1447		memcpy(rep_msg->private_data, param->private_data,
1448		       param->private_data_len);
1449}
1450
1451int ib_send_cm_rep(struct ib_cm_id *cm_id,
1452		   struct ib_cm_rep_param *param)
1453{
1454	struct cm_id_private *cm_id_priv;
1455	struct ib_mad_send_buf *msg;
1456	struct cm_rep_msg *rep_msg;
1457	unsigned long flags;
1458	int ret;
1459
1460	if (param->private_data &&
1461	    param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
1462		return -EINVAL;
1463
1464	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1465	spin_lock_irqsave(&cm_id_priv->lock, flags);
1466	if (cm_id->state != IB_CM_REQ_RCVD &&
1467	    cm_id->state != IB_CM_MRA_REQ_SENT) {
1468		ret = -EINVAL;
1469		goto out;
1470	}
1471
1472	ret = cm_alloc_msg(cm_id_priv, &msg);
1473	if (ret)
1474		goto out;
1475
1476	rep_msg = (struct cm_rep_msg *) msg->mad;
1477	cm_format_rep(rep_msg, cm_id_priv, param);
1478	msg->timeout_ms = cm_id_priv->timeout_ms;
1479	msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
1480
1481	ret = ib_post_send_mad(msg, NULL);
1482	if (ret) {
1483		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1484		cm_free_msg(msg);
1485		return ret;
1486	}
1487
1488	cm_id->state = IB_CM_REP_SENT;
1489	cm_id_priv->msg = msg;
1490	cm_id_priv->initiator_depth = param->initiator_depth;
1491	cm_id_priv->responder_resources = param->responder_resources;
1492	cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
1493	cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg);
1494
1495out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1496	return ret;
1497}
1498EXPORT_SYMBOL(ib_send_cm_rep);
1499
1500static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
1501			  struct cm_id_private *cm_id_priv,
1502			  const void *private_data,
1503			  u8 private_data_len)
1504{
1505	cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
1506	rtu_msg->local_comm_id = cm_id_priv->id.local_id;
1507	rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
1508
1509	if (private_data && private_data_len)
1510		memcpy(rtu_msg->private_data, private_data, private_data_len);
1511}
1512
1513int ib_send_cm_rtu(struct ib_cm_id *cm_id,
1514		   const void *private_data,
1515		   u8 private_data_len)
1516{
1517	struct cm_id_private *cm_id_priv;
1518	struct ib_mad_send_buf *msg;
1519	unsigned long flags;
1520	void *data;
1521	int ret;
1522
1523	if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
1524		return -EINVAL;
1525
1526	data = cm_copy_private_data(private_data, private_data_len);
1527	if (IS_ERR(data))
1528		return PTR_ERR(data);
1529
1530	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1531	spin_lock_irqsave(&cm_id_priv->lock, flags);
1532	if (cm_id->state != IB_CM_REP_RCVD &&
1533	    cm_id->state != IB_CM_MRA_REP_SENT) {
1534		ret = -EINVAL;
1535		goto error;
1536	}
1537
1538	ret = cm_alloc_msg(cm_id_priv, &msg);
1539	if (ret)
1540		goto error;
1541
1542	cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1543		      private_data, private_data_len);
1544
1545	ret = ib_post_send_mad(msg, NULL);
1546	if (ret) {
1547		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1548		cm_free_msg(msg);
1549		kfree(data);
1550		return ret;
1551	}
1552
1553	cm_id->state = IB_CM_ESTABLISHED;
1554	cm_set_private_data(cm_id_priv, data, private_data_len);
1555	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1556	return 0;
1557
1558error:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1559	kfree(data);
1560	return ret;
1561}
1562EXPORT_SYMBOL(ib_send_cm_rtu);
1563
1564static void cm_format_rep_event(struct cm_work *work)
1565{
1566	struct cm_rep_msg *rep_msg;
1567	struct ib_cm_rep_event_param *param;
1568
1569	rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1570	param = &work->cm_event.param.rep_rcvd;
1571	param->remote_ca_guid = rep_msg->local_ca_guid;
1572	param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
1573	param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg));
1574	param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
1575	param->responder_resources = rep_msg->initiator_depth;
1576	param->initiator_depth = rep_msg->resp_resources;
1577	param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1578	param->failover_accepted = cm_rep_get_failover(rep_msg);
1579	param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
1580	param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1581	param->srq = cm_rep_get_srq(rep_msg);
1582	work->cm_event.private_data = &rep_msg->private_data;
1583}
1584
1585static void cm_dup_rep_handler(struct cm_work *work)
1586{
1587	struct cm_id_private *cm_id_priv;
1588	struct cm_rep_msg *rep_msg;
1589	struct ib_mad_send_buf *msg = NULL;
1590	unsigned long flags;
1591	int ret;
1592
1593	rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
1594	cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
1595				   rep_msg->local_comm_id);
1596	if (!cm_id_priv)
1597		return;
1598
1599	ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1600	if (ret)
1601		goto deref;
1602
1603	spin_lock_irqsave(&cm_id_priv->lock, flags);
1604	if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
1605		cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1606			      cm_id_priv->private_data,
1607			      cm_id_priv->private_data_len);
1608	else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
1609		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1610			      CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
1611			      cm_id_priv->private_data,
1612			      cm_id_priv->private_data_len);
1613	else
1614		goto unlock;
1615	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1616
1617	ret = ib_post_send_mad(msg, NULL);
1618	if (ret)
1619		goto free;
1620	goto deref;
1621
1622unlock:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1623free:	cm_free_msg(msg);
1624deref:	cm_deref_id(cm_id_priv);
1625}
1626
1627static int cm_rep_handler(struct cm_work *work)
1628{
1629	struct cm_id_private *cm_id_priv;
1630	struct cm_rep_msg *rep_msg;
1631	unsigned long flags;
1632	int ret;
1633
1634	rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1635	cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
1636	if (!cm_id_priv) {
1637		cm_dup_rep_handler(work);
1638		return -EINVAL;
1639	}
1640
1641	cm_format_rep_event(work);
1642
1643	spin_lock_irqsave(&cm_id_priv->lock, flags);
1644	switch (cm_id_priv->id.state) {
1645	case IB_CM_REQ_SENT:
1646	case IB_CM_MRA_REQ_RCVD:
1647		break;
1648	default:
1649		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1650		ret = -EINVAL;
1651		goto error;
1652	}
1653
1654	cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
1655	cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
1656	cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1657
1658	spin_lock(&cm.lock);
1659	/* Check for duplicate REP. */
1660	if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
1661		spin_unlock(&cm.lock);
1662		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1663		ret = -EINVAL;
1664		goto error;
1665	}
1666	/* Check for a stale connection. */
1667	if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
1668		rb_erase(&cm_id_priv->timewait_info->remote_id_node,
1669			 &cm.remote_id_table);
1670		cm_id_priv->timewait_info->inserted_remote_id = 0;
1671		spin_unlock(&cm.lock);
1672		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1673		cm_issue_rej(work->port, work->mad_recv_wc,
1674			     IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
1675			     NULL, 0);
1676		ret = -EINVAL;
1677		goto error;
1678	}
1679	spin_unlock(&cm.lock);
1680
1681	cm_id_priv->id.state = IB_CM_REP_RCVD;
1682	cm_id_priv->id.remote_id = rep_msg->local_comm_id;
1683	cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1684	cm_id_priv->initiator_depth = rep_msg->resp_resources;
1685	cm_id_priv->responder_resources = rep_msg->initiator_depth;
1686	cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
1687	cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1688
1689	/* todo: handle peer_to_peer */
1690
1691	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1692	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1693	if (!ret)
1694		list_add_tail(&work->list, &cm_id_priv->work_list);
1695	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1696
1697	if (ret)
1698		cm_process_work(cm_id_priv, work);
1699	else
1700		cm_deref_id(cm_id_priv);
1701	return 0;
1702
1703error:
1704	cm_deref_id(cm_id_priv);
1705	return ret;
1706}
1707
1708static int cm_establish_handler(struct cm_work *work)
1709{
1710	struct cm_id_private *cm_id_priv;
1711	unsigned long flags;
1712	int ret;
1713
1714	/* See comment in cm_establish about lookup. */
1715	cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
1716	if (!cm_id_priv)
1717		return -EINVAL;
1718
1719	spin_lock_irqsave(&cm_id_priv->lock, flags);
1720	if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
1721		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1722		goto out;
1723	}
1724
1725	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1726	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1727	if (!ret)
1728		list_add_tail(&work->list, &cm_id_priv->work_list);
1729	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1730
1731	if (ret)
1732		cm_process_work(cm_id_priv, work);
1733	else
1734		cm_deref_id(cm_id_priv);
1735	return 0;
1736out:
1737	cm_deref_id(cm_id_priv);
1738	return -EINVAL;
1739}
1740
1741static int cm_rtu_handler(struct cm_work *work)
1742{
1743	struct cm_id_private *cm_id_priv;
1744	struct cm_rtu_msg *rtu_msg;
1745	unsigned long flags;
1746	int ret;
1747
1748	rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
1749	cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
1750				   rtu_msg->local_comm_id);
1751	if (!cm_id_priv)
1752		return -EINVAL;
1753
1754	work->cm_event.private_data = &rtu_msg->private_data;
1755
1756	spin_lock_irqsave(&cm_id_priv->lock, flags);
1757	if (cm_id_priv->id.state != IB_CM_REP_SENT &&
1758	    cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
1759		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1760		goto out;
1761	}
1762	cm_id_priv->id.state = IB_CM_ESTABLISHED;
1763
1764	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1765	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1766	if (!ret)
1767		list_add_tail(&work->list, &cm_id_priv->work_list);
1768	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1769
1770	if (ret)
1771		cm_process_work(cm_id_priv, work);
1772	else
1773		cm_deref_id(cm_id_priv);
1774	return 0;
1775out:
1776	cm_deref_id(cm_id_priv);
1777	return -EINVAL;
1778}
1779
1780static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
1781			  struct cm_id_private *cm_id_priv,
1782			  const void *private_data,
1783			  u8 private_data_len)
1784{
1785	cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
1786			  cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
1787	dreq_msg->local_comm_id = cm_id_priv->id.local_id;
1788	dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
1789	cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
1790
1791	if (private_data && private_data_len)
1792		memcpy(dreq_msg->private_data, private_data, private_data_len);
1793}
1794
1795int ib_send_cm_dreq(struct ib_cm_id *cm_id,
1796		    const void *private_data,
1797		    u8 private_data_len)
1798{
1799	struct cm_id_private *cm_id_priv;
1800	struct ib_mad_send_buf *msg;
1801	unsigned long flags;
1802	int ret;
1803
1804	if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
1805		return -EINVAL;
1806
1807	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1808	spin_lock_irqsave(&cm_id_priv->lock, flags);
1809	if (cm_id->state != IB_CM_ESTABLISHED) {
1810		ret = -EINVAL;
1811		goto out;
1812	}
1813
1814	ret = cm_alloc_msg(cm_id_priv, &msg);
1815	if (ret) {
1816		cm_enter_timewait(cm_id_priv);
1817		goto out;
1818	}
1819
1820	cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
1821		       private_data, private_data_len);
1822	msg->timeout_ms = cm_id_priv->timeout_ms;
1823	msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
1824
1825	ret = ib_post_send_mad(msg, NULL);
1826	if (ret) {
1827		cm_enter_timewait(cm_id_priv);
1828		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1829		cm_free_msg(msg);
1830		return ret;
1831	}
1832
1833	cm_id->state = IB_CM_DREQ_SENT;
1834	cm_id_priv->msg = msg;
1835out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1836	return ret;
1837}
1838EXPORT_SYMBOL(ib_send_cm_dreq);
1839
1840static void cm_format_drep(struct cm_drep_msg *drep_msg,
1841			  struct cm_id_private *cm_id_priv,
1842			  const void *private_data,
1843			  u8 private_data_len)
1844{
1845	cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
1846	drep_msg->local_comm_id = cm_id_priv->id.local_id;
1847	drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1848
1849	if (private_data && private_data_len)
1850		memcpy(drep_msg->private_data, private_data, private_data_len);
1851}
1852
1853int ib_send_cm_drep(struct ib_cm_id *cm_id,
1854		    const void *private_data,
1855		    u8 private_data_len)
1856{
1857	struct cm_id_private *cm_id_priv;
1858	struct ib_mad_send_buf *msg;
1859	unsigned long flags;
1860	void *data;
1861	int ret;
1862
1863	if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
1864		return -EINVAL;
1865
1866	data = cm_copy_private_data(private_data, private_data_len);
1867	if (IS_ERR(data))
1868		return PTR_ERR(data);
1869
1870	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1871	spin_lock_irqsave(&cm_id_priv->lock, flags);
1872	if (cm_id->state != IB_CM_DREQ_RCVD) {
1873		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1874		kfree(data);
1875		return -EINVAL;
1876	}
1877
1878	cm_set_private_data(cm_id_priv, data, private_data_len);
1879	cm_enter_timewait(cm_id_priv);
1880
1881	ret = cm_alloc_msg(cm_id_priv, &msg);
1882	if (ret)
1883		goto out;
1884
1885	cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
1886		       private_data, private_data_len);
1887
1888	ret = ib_post_send_mad(msg, NULL);
1889	if (ret) {
1890		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1891		cm_free_msg(msg);
1892		return ret;
1893	}
1894
1895out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1896	return ret;
1897}
1898EXPORT_SYMBOL(ib_send_cm_drep);
1899
1900static int cm_issue_drep(struct cm_port *port,
1901			 struct ib_mad_recv_wc *mad_recv_wc)
1902{
1903	struct ib_mad_send_buf *msg = NULL;
1904	struct cm_dreq_msg *dreq_msg;
1905	struct cm_drep_msg *drep_msg;
1906	int ret;
1907
1908	ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1909	if (ret)
1910		return ret;
1911
1912	dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
1913	drep_msg = (struct cm_drep_msg *) msg->mad;
1914
1915	cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
1916	drep_msg->remote_comm_id = dreq_msg->local_comm_id;
1917	drep_msg->local_comm_id = dreq_msg->remote_comm_id;
1918
1919	ret = ib_post_send_mad(msg, NULL);
1920	if (ret)
1921		cm_free_msg(msg);
1922
1923	return ret;
1924}
1925
1926static int cm_dreq_handler(struct cm_work *work)
1927{
1928	struct cm_id_private *cm_id_priv;
1929	struct cm_dreq_msg *dreq_msg;
1930	struct ib_mad_send_buf *msg = NULL;
1931	unsigned long flags;
1932	int ret;
1933
1934	dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
1935	cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
1936				   dreq_msg->local_comm_id);
1937	if (!cm_id_priv) {
1938		cm_issue_drep(work->port, work->mad_recv_wc);
1939		return -EINVAL;
1940	}
1941
1942	work->cm_event.private_data = &dreq_msg->private_data;
1943
1944	spin_lock_irqsave(&cm_id_priv->lock, flags);
1945	if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
1946		goto unlock;
1947
1948	switch (cm_id_priv->id.state) {
1949	case IB_CM_REP_SENT:
1950	case IB_CM_DREQ_SENT:
1951		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1952		break;
1953	case IB_CM_ESTABLISHED:
1954	case IB_CM_MRA_REP_RCVD:
1955		break;
1956	case IB_CM_TIMEWAIT:
1957		if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
1958			goto unlock;
1959
1960		cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
1961			       cm_id_priv->private_data,
1962			       cm_id_priv->private_data_len);
1963		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1964
1965		if (ib_post_send_mad(msg, NULL))
1966			cm_free_msg(msg);
1967		goto deref;
1968	default:
1969		goto unlock;
1970	}
1971	cm_id_priv->id.state = IB_CM_DREQ_RCVD;
1972	cm_id_priv->tid = dreq_msg->hdr.tid;
1973	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1974	if (!ret)
1975		list_add_tail(&work->list, &cm_id_priv->work_list);
1976	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1977
1978	if (ret)
1979		cm_process_work(cm_id_priv, work);
1980	else
1981		cm_deref_id(cm_id_priv);
1982	return 0;
1983
1984unlock:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1985deref:	cm_deref_id(cm_id_priv);
1986	return -EINVAL;
1987}
1988
1989static int cm_drep_handler(struct cm_work *work)
1990{
1991	struct cm_id_private *cm_id_priv;
1992	struct cm_drep_msg *drep_msg;
1993	unsigned long flags;
1994	int ret;
1995
1996	drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
1997	cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
1998				   drep_msg->local_comm_id);
1999	if (!cm_id_priv)
2000		return -EINVAL;
2001
2002	work->cm_event.private_data = &drep_msg->private_data;
2003
2004	spin_lock_irqsave(&cm_id_priv->lock, flags);
2005	if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2006	    cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2007		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2008		goto out;
2009	}
2010	cm_enter_timewait(cm_id_priv);
2011
2012	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2013	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2014	if (!ret)
2015		list_add_tail(&work->list, &cm_id_priv->work_list);
2016	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2017
2018	if (ret)
2019		cm_process_work(cm_id_priv, work);
2020	else
2021		cm_deref_id(cm_id_priv);
2022	return 0;
2023out:
2024	cm_deref_id(cm_id_priv);
2025	return -EINVAL;
2026}
2027
2028int ib_send_cm_rej(struct ib_cm_id *cm_id,
2029		   enum ib_cm_rej_reason reason,
2030		   void *ari,
2031		   u8 ari_length,
2032		   const void *private_data,
2033		   u8 private_data_len)
2034{
2035	struct cm_id_private *cm_id_priv;
2036	struct ib_mad_send_buf *msg;
2037	unsigned long flags;
2038	int ret;
2039
2040	if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2041	    (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2042		return -EINVAL;
2043
2044	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2045
2046	spin_lock_irqsave(&cm_id_priv->lock, flags);
2047	switch (cm_id->state) {
2048	case IB_CM_REQ_SENT:
2049	case IB_CM_MRA_REQ_RCVD:
2050	case IB_CM_REQ_RCVD:
2051	case IB_CM_MRA_REQ_SENT:
2052	case IB_CM_REP_RCVD:
2053	case IB_CM_MRA_REP_SENT:
2054		ret = cm_alloc_msg(cm_id_priv, &msg);
2055		if (!ret)
2056			cm_format_rej((struct cm_rej_msg *) msg->mad,
2057				      cm_id_priv, reason, ari, ari_length,
2058				      private_data, private_data_len);
2059
2060		cm_reset_to_idle(cm_id_priv);
2061		break;
2062	case IB_CM_REP_SENT:
2063	case IB_CM_MRA_REP_RCVD:
2064		ret = cm_alloc_msg(cm_id_priv, &msg);
2065		if (!ret)
2066			cm_format_rej((struct cm_rej_msg *) msg->mad,
2067				      cm_id_priv, reason, ari, ari_length,
2068				      private_data, private_data_len);
2069
2070		cm_enter_timewait(cm_id_priv);
2071		break;
2072	default:
2073		ret = -EINVAL;
2074		goto out;
2075	}
2076
2077	if (ret)
2078		goto out;
2079
2080	ret = ib_post_send_mad(msg, NULL);
2081	if (ret)
2082		cm_free_msg(msg);
2083
2084out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2085	return ret;
2086}
2087EXPORT_SYMBOL(ib_send_cm_rej);
2088
2089static void cm_format_rej_event(struct cm_work *work)
2090{
2091	struct cm_rej_msg *rej_msg;
2092	struct ib_cm_rej_event_param *param;
2093
2094	rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2095	param = &work->cm_event.param.rej_rcvd;
2096	param->ari = rej_msg->ari;
2097	param->ari_length = cm_rej_get_reject_info_len(rej_msg);
2098	param->reason = __be16_to_cpu(rej_msg->reason);
2099	work->cm_event.private_data = &rej_msg->private_data;
2100}
2101
2102static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2103{
2104	struct cm_timewait_info *timewait_info;
2105	struct cm_id_private *cm_id_priv;
2106	unsigned long flags;
2107	__be32 remote_id;
2108
2109	remote_id = rej_msg->local_comm_id;
2110
2111	if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
2112		spin_lock_irqsave(&cm.lock, flags);
2113		timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
2114						  remote_id);
2115		if (!timewait_info) {
2116			spin_unlock_irqrestore(&cm.lock, flags);
2117			return NULL;
2118		}
2119		cm_id_priv = idr_find(&cm.local_id_table, (__force int)
2120				      (timewait_info->work.local_id ^
2121				       cm.random_id_operand));
2122		if (cm_id_priv) {
2123			if (cm_id_priv->id.remote_id == remote_id)
2124				atomic_inc(&cm_id_priv->refcount);
2125			else
2126				cm_id_priv = NULL;
2127		}
2128		spin_unlock_irqrestore(&cm.lock, flags);
2129	} else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2130		cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2131	else
2132		cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2133
2134	return cm_id_priv;
2135}
2136
2137static int cm_rej_handler(struct cm_work *work)
2138{
2139	struct cm_id_private *cm_id_priv;
2140	struct cm_rej_msg *rej_msg;
2141	unsigned long flags;
2142	int ret;
2143
2144	rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2145	cm_id_priv = cm_acquire_rejected_id(rej_msg);
2146	if (!cm_id_priv)
2147		return -EINVAL;
2148
2149	cm_format_rej_event(work);
2150
2151	spin_lock_irqsave(&cm_id_priv->lock, flags);
2152	switch (cm_id_priv->id.state) {
2153	case IB_CM_REQ_SENT:
2154	case IB_CM_MRA_REQ_RCVD:
2155	case IB_CM_REP_SENT:
2156	case IB_CM_MRA_REP_RCVD:
2157		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2158		/* fall through */
2159	case IB_CM_REQ_RCVD:
2160	case IB_CM_MRA_REQ_SENT:
2161		if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2162			cm_enter_timewait(cm_id_priv);
2163		else
2164			cm_reset_to_idle(cm_id_priv);
2165		break;
2166	case IB_CM_DREQ_SENT:
2167		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2168		/* fall through */
2169	case IB_CM_REP_RCVD:
2170	case IB_CM_MRA_REP_SENT:
2171	case IB_CM_ESTABLISHED:
2172		cm_enter_timewait(cm_id_priv);
2173		break;
2174	default:
2175		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2176		ret = -EINVAL;
2177		goto out;
2178	}
2179
2180	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2181	if (!ret)
2182		list_add_tail(&work->list, &cm_id_priv->work_list);
2183	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2184
2185	if (ret)
2186		cm_process_work(cm_id_priv, work);
2187	else
2188		cm_deref_id(cm_id_priv);
2189	return 0;
2190out:
2191	cm_deref_id(cm_id_priv);
2192	return -EINVAL;
2193}
2194
2195int ib_send_cm_mra(struct ib_cm_id *cm_id,
2196		   u8 service_timeout,
2197		   const void *private_data,
2198		   u8 private_data_len)
2199{
2200	struct cm_id_private *cm_id_priv;
2201	struct ib_mad_send_buf *msg;
2202	void *data;
2203	unsigned long flags;
2204	int ret;
2205
2206	if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2207		return -EINVAL;
2208
2209	data = cm_copy_private_data(private_data, private_data_len);
2210	if (IS_ERR(data))
2211		return PTR_ERR(data);
2212
2213	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2214
2215	spin_lock_irqsave(&cm_id_priv->lock, flags);
2216	switch(cm_id_priv->id.state) {
2217	case IB_CM_REQ_RCVD:
2218		ret = cm_alloc_msg(cm_id_priv, &msg);
2219		if (ret)
2220			goto error1;
2221
2222		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2223			      CM_MSG_RESPONSE_REQ, service_timeout,
2224			      private_data, private_data_len);
2225		ret = ib_post_send_mad(msg, NULL);
2226		if (ret)
2227			goto error2;
2228		cm_id->state = IB_CM_MRA_REQ_SENT;
2229		break;
2230	case IB_CM_REP_RCVD:
2231		ret = cm_alloc_msg(cm_id_priv, &msg);
2232		if (ret)
2233			goto error1;
2234
2235		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2236			      CM_MSG_RESPONSE_REP, service_timeout,
2237			      private_data, private_data_len);
2238		ret = ib_post_send_mad(msg, NULL);
2239		if (ret)
2240			goto error2;
2241		cm_id->state = IB_CM_MRA_REP_SENT;
2242		break;
2243	case IB_CM_ESTABLISHED:
2244		ret = cm_alloc_msg(cm_id_priv, &msg);
2245		if (ret)
2246			goto error1;
2247
2248		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2249			      CM_MSG_RESPONSE_OTHER, service_timeout,
2250			      private_data, private_data_len);
2251		ret = ib_post_send_mad(msg, NULL);
2252		if (ret)
2253			goto error2;
2254		cm_id->lap_state = IB_CM_MRA_LAP_SENT;
2255		break;
2256	default:
2257		ret = -EINVAL;
2258		goto error1;
2259	}
2260	cm_id_priv->service_timeout = service_timeout;
2261	cm_set_private_data(cm_id_priv, data, private_data_len);
2262	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2263	return 0;
2264
2265error1:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2266	kfree(data);
2267	return ret;
2268
2269error2:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2270	kfree(data);
2271	cm_free_msg(msg);
2272	return ret;
2273}
2274EXPORT_SYMBOL(ib_send_cm_mra);
2275
2276static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2277{
2278	switch (cm_mra_get_msg_mraed(mra_msg)) {
2279	case CM_MSG_RESPONSE_REQ:
2280		return cm_acquire_id(mra_msg->remote_comm_id, 0);
2281	case CM_MSG_RESPONSE_REP:
2282	case CM_MSG_RESPONSE_OTHER:
2283		return cm_acquire_id(mra_msg->remote_comm_id,
2284				     mra_msg->local_comm_id);
2285	default:
2286		return NULL;
2287	}
2288}
2289
2290static int cm_mra_handler(struct cm_work *work)
2291{
2292	struct cm_id_private *cm_id_priv;
2293	struct cm_mra_msg *mra_msg;
2294	unsigned long flags;
2295	int timeout, ret;
2296
2297	mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
2298	cm_id_priv = cm_acquire_mraed_id(mra_msg);
2299	if (!cm_id_priv)
2300		return -EINVAL;
2301
2302	work->cm_event.private_data = &mra_msg->private_data;
2303	work->cm_event.param.mra_rcvd.service_timeout =
2304					cm_mra_get_service_timeout(mra_msg);
2305	timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2306		  cm_convert_to_ms(cm_id_priv->av.packet_life_time);
2307
2308	spin_lock_irqsave(&cm_id_priv->lock, flags);
2309	switch (cm_id_priv->id.state) {
2310	case IB_CM_REQ_SENT:
2311		if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
2312		    ib_modify_mad(cm_id_priv->av.port->mad_agent,
2313				  cm_id_priv->msg, timeout))
2314			goto out;
2315		cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
2316		break;
2317	case IB_CM_REP_SENT:
2318		if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
2319		    ib_modify_mad(cm_id_priv->av.port->mad_agent,
2320				  cm_id_priv->msg, timeout))
2321			goto out;
2322		cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
2323		break;
2324	case IB_CM_ESTABLISHED:
2325		if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2326		    cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2327		    ib_modify_mad(cm_id_priv->av.port->mad_agent,
2328				  cm_id_priv->msg, timeout))
2329			goto out;
2330		cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2331		break;
2332	default:
2333		goto out;
2334	}
2335
2336	cm_id_priv->msg->context[1] = (void *) (unsigned long)
2337				      cm_id_priv->id.state;
2338	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2339	if (!ret)
2340		list_add_tail(&work->list, &cm_id_priv->work_list);
2341	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2342
2343	if (ret)
2344		cm_process_work(cm_id_priv, work);
2345	else
2346		cm_deref_id(cm_id_priv);
2347	return 0;
2348out:
2349	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2350	cm_deref_id(cm_id_priv);
2351	return -EINVAL;
2352}
2353
2354static void cm_format_lap(struct cm_lap_msg *lap_msg,
2355			  struct cm_id_private *cm_id_priv,
2356			  struct ib_sa_path_rec *alternate_path,
2357			  const void *private_data,
2358			  u8 private_data_len)
2359{
2360	cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
2361			  cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
2362	lap_msg->local_comm_id = cm_id_priv->id.local_id;
2363	lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
2364	cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
2365	/* todo: need remote CM response timeout */
2366	cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
2367	lap_msg->alt_local_lid = alternate_path->slid;
2368	lap_msg->alt_remote_lid = alternate_path->dlid;
2369	lap_msg->alt_local_gid = alternate_path->sgid;
2370	lap_msg->alt_remote_gid = alternate_path->dgid;
2371	cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
2372	cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
2373	lap_msg->alt_hop_limit = alternate_path->hop_limit;
2374	cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
2375	cm_lap_set_sl(lap_msg, alternate_path->sl);
2376	cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
2377	cm_lap_set_local_ack_timeout(lap_msg,
2378		min(31, alternate_path->packet_life_time + 1));
2379
2380	if (private_data && private_data_len)
2381		memcpy(lap_msg->private_data, private_data, private_data_len);
2382}
2383
2384int ib_send_cm_lap(struct ib_cm_id *cm_id,
2385		   struct ib_sa_path_rec *alternate_path,
2386		   const void *private_data,
2387		   u8 private_data_len)
2388{
2389	struct cm_id_private *cm_id_priv;
2390	struct ib_mad_send_buf *msg;
2391	unsigned long flags;
2392	int ret;
2393
2394	if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
2395		return -EINVAL;
2396
2397	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2398	spin_lock_irqsave(&cm_id_priv->lock, flags);
2399	if (cm_id->state != IB_CM_ESTABLISHED ||
2400	    (cm_id->lap_state != IB_CM_LAP_UNINIT &&
2401	     cm_id->lap_state != IB_CM_LAP_IDLE)) {
2402		ret = -EINVAL;
2403		goto out;
2404	}
2405
2406	ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
2407	if (ret)
2408		goto out;
2409
2410	ret = cm_alloc_msg(cm_id_priv, &msg);
2411	if (ret)
2412		goto out;
2413
2414	cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
2415		      alternate_path, private_data, private_data_len);
2416	msg->timeout_ms = cm_id_priv->timeout_ms;
2417	msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
2418
2419	ret = ib_post_send_mad(msg, NULL);
2420	if (ret) {
2421		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2422		cm_free_msg(msg);
2423		return ret;
2424	}
2425
2426	cm_id->lap_state = IB_CM_LAP_SENT;
2427	cm_id_priv->msg = msg;
2428
2429out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2430	return ret;
2431}
2432EXPORT_SYMBOL(ib_send_cm_lap);
2433
2434static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
2435				    struct ib_sa_path_rec *path,
2436				    struct cm_lap_msg *lap_msg)
2437{
2438	memset(path, 0, sizeof *path);
2439	path->dgid = lap_msg->alt_local_gid;
2440	path->sgid = lap_msg->alt_remote_gid;
2441	path->dlid = lap_msg->alt_local_lid;
2442	path->slid = lap_msg->alt_remote_lid;
2443	path->flow_label = cm_lap_get_flow_label(lap_msg);
2444	path->hop_limit = lap_msg->alt_hop_limit;
2445	path->traffic_class = cm_lap_get_traffic_class(lap_msg);
2446	path->reversible = 1;
2447	path->pkey = cm_id_priv->pkey;
2448	path->sl = cm_lap_get_sl(lap_msg);
2449	path->mtu_selector = IB_SA_EQ;
2450	path->mtu = cm_id_priv->path_mtu;
2451	path->rate_selector = IB_SA_EQ;
2452	path->rate = cm_lap_get_packet_rate(lap_msg);
2453	path->packet_life_time_selector = IB_SA_EQ;
2454	path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
2455	path->packet_life_time -= (path->packet_life_time > 0);
2456}
2457
2458static int cm_lap_handler(struct cm_work *work)
2459{
2460	struct cm_id_private *cm_id_priv;
2461	struct cm_lap_msg *lap_msg;
2462	struct ib_cm_lap_event_param *param;
2463	struct ib_mad_send_buf *msg = NULL;
2464	unsigned long flags;
2465	int ret;
2466
2467	/* todo: verify LAP request and send reject APR if invalid. */
2468	lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
2469	cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
2470				   lap_msg->local_comm_id);
2471	if (!cm_id_priv)
2472		return -EINVAL;
2473
2474	param = &work->cm_event.param.lap_rcvd;
2475	param->alternate_path = &work->path[0];
2476	cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
2477	work->cm_event.private_data = &lap_msg->private_data;
2478
2479	spin_lock_irqsave(&cm_id_priv->lock, flags);
2480	if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
2481		goto unlock;
2482
2483	switch (cm_id_priv->id.lap_state) {
2484	case IB_CM_LAP_UNINIT:
2485	case IB_CM_LAP_IDLE:
2486		break;
2487	case IB_CM_MRA_LAP_SENT:
2488		if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2489			goto unlock;
2490
2491		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2492			      CM_MSG_RESPONSE_OTHER,
2493			      cm_id_priv->service_timeout,
2494			      cm_id_priv->private_data,
2495			      cm_id_priv->private_data_len);
2496		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2497
2498		if (ib_post_send_mad(msg, NULL))
2499			cm_free_msg(msg);
2500		goto deref;
2501	default:
2502		goto unlock;
2503	}
2504
2505	cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
2506	cm_id_priv->tid = lap_msg->hdr.tid;
2507	cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2508				work->mad_recv_wc->recv_buf.grh,
2509				&cm_id_priv->av);
2510	cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
2511	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2512	if (!ret)
2513		list_add_tail(&work->list, &cm_id_priv->work_list);
2514	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2515
2516	if (ret)
2517		cm_process_work(cm_id_priv, work);
2518	else
2519		cm_deref_id(cm_id_priv);
2520	return 0;
2521
2522unlock:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2523deref:	cm_deref_id(cm_id_priv);
2524	return -EINVAL;
2525}
2526
2527static void cm_format_apr(struct cm_apr_msg *apr_msg,
2528			  struct cm_id_private *cm_id_priv,
2529			  enum ib_cm_apr_status status,
2530			  void *info,
2531			  u8 info_length,
2532			  const void *private_data,
2533			  u8 private_data_len)
2534{
2535	cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
2536	apr_msg->local_comm_id = cm_id_priv->id.local_id;
2537	apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
2538	apr_msg->ap_status = (u8) status;
2539
2540	if (info && info_length) {
2541		apr_msg->info_length = info_length;
2542		memcpy(apr_msg->info, info, info_length);
2543	}
2544
2545	if (private_data && private_data_len)
2546		memcpy(apr_msg->private_data, private_data, private_data_len);
2547}
2548
2549int ib_send_cm_apr(struct ib_cm_id *cm_id,
2550		   enum ib_cm_apr_status status,
2551		   void *info,
2552		   u8 info_length,
2553		   const void *private_data,
2554		   u8 private_data_len)
2555{
2556	struct cm_id_private *cm_id_priv;
2557	struct ib_mad_send_buf *msg;
2558	unsigned long flags;
2559	int ret;
2560
2561	if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
2562	    (info && info_length > IB_CM_APR_INFO_LENGTH))
2563		return -EINVAL;
2564
2565	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2566	spin_lock_irqsave(&cm_id_priv->lock, flags);
2567	if (cm_id->state != IB_CM_ESTABLISHED ||
2568	    (cm_id->lap_state != IB_CM_LAP_RCVD &&
2569	     cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
2570		ret = -EINVAL;
2571		goto out;
2572	}
2573
2574	ret = cm_alloc_msg(cm_id_priv, &msg);
2575	if (ret)
2576		goto out;
2577
2578	cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
2579		      info, info_length, private_data, private_data_len);
2580	ret = ib_post_send_mad(msg, NULL);
2581	if (ret) {
2582		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2583		cm_free_msg(msg);
2584		return ret;
2585	}
2586
2587	cm_id->lap_state = IB_CM_LAP_IDLE;
2588out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2589	return ret;
2590}
2591EXPORT_SYMBOL(ib_send_cm_apr);
2592
2593static int cm_apr_handler(struct cm_work *work)
2594{
2595	struct cm_id_private *cm_id_priv;
2596	struct cm_apr_msg *apr_msg;
2597	unsigned long flags;
2598	int ret;
2599
2600	apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
2601	cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
2602				   apr_msg->local_comm_id);
2603	if (!cm_id_priv)
2604		return -EINVAL; /* Unmatched reply. */
2605
2606	work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
2607	work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
2608	work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
2609	work->cm_event.private_data = &apr_msg->private_data;
2610
2611	spin_lock_irqsave(&cm_id_priv->lock, flags);
2612	if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
2613	    (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
2614	     cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
2615		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2616		goto out;
2617	}
2618	cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
2619	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2620	cm_id_priv->msg = NULL;
2621
2622	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2623	if (!ret)
2624		list_add_tail(&work->list, &cm_id_priv->work_list);
2625	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2626
2627	if (ret)
2628		cm_process_work(cm_id_priv, work);
2629	else
2630		cm_deref_id(cm_id_priv);
2631	return 0;
2632out:
2633	cm_deref_id(cm_id_priv);
2634	return -EINVAL;
2635}
2636
2637static int cm_timewait_handler(struct cm_work *work)
2638{
2639	struct cm_timewait_info *timewait_info;
2640	struct cm_id_private *cm_id_priv;
2641	int ret;
2642
2643	timewait_info = (struct cm_timewait_info *)work;
2644	spin_lock_irq(&cm.lock);
2645	list_del(&timewait_info->list);
2646	spin_unlock_irq(&cm.lock);
2647
2648	cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2649				   timewait_info->work.remote_id);
2650	if (!cm_id_priv)
2651		return -EINVAL;
2652
2653	spin_lock_irq(&cm_id_priv->lock);
2654	if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
2655	    cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
2656		spin_unlock_irq(&cm_id_priv->lock);
2657		goto out;
2658	}
2659	cm_id_priv->id.state = IB_CM_IDLE;
2660	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2661	if (!ret)
2662		list_add_tail(&work->list, &cm_id_priv->work_list);
2663	spin_unlock_irq(&cm_id_priv->lock);
2664
2665	if (ret)
2666		cm_process_work(cm_id_priv, work);
2667	else
2668		cm_deref_id(cm_id_priv);
2669	return 0;
2670out:
2671	cm_deref_id(cm_id_priv);
2672	return -EINVAL;
2673}
2674
2675static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
2676			       struct cm_id_private *cm_id_priv,
2677			       struct ib_cm_sidr_req_param *param)
2678{
2679	cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
2680			  cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
2681	sidr_req_msg->request_id = cm_id_priv->id.local_id;
2682	sidr_req_msg->pkey = cpu_to_be16(param->path->pkey);
2683	sidr_req_msg->service_id = param->service_id;
2684
2685	if (param->private_data && param->private_data_len)
2686		memcpy(sidr_req_msg->private_data, param->private_data,
2687		       param->private_data_len);
2688}
2689
2690int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
2691			struct ib_cm_sidr_req_param *param)
2692{
2693	struct cm_id_private *cm_id_priv;
2694	struct ib_mad_send_buf *msg;
2695	unsigned long flags;
2696	int ret;
2697
2698	if (!param->path || (param->private_data &&
2699	     param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
2700		return -EINVAL;
2701
2702	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2703	ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
2704	if (ret)
2705		goto out;
2706
2707	cm_id->service_id = param->service_id;
2708	cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
2709	cm_id_priv->timeout_ms = param->timeout_ms;
2710	cm_id_priv->max_cm_retries = param->max_cm_retries;
2711	ret = cm_alloc_msg(cm_id_priv, &msg);
2712	if (ret)
2713		goto out;
2714
2715	cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
2716			   param);
2717	msg->timeout_ms = cm_id_priv->timeout_ms;
2718	msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
2719
2720	spin_lock_irqsave(&cm_id_priv->lock, flags);
2721	if (cm_id->state == IB_CM_IDLE)
2722		ret = ib_post_send_mad(msg, NULL);
2723	else
2724		ret = -EINVAL;
2725
2726	if (ret) {
2727		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2728		cm_free_msg(msg);
2729		goto out;
2730	}
2731	cm_id->state = IB_CM_SIDR_REQ_SENT;
2732	cm_id_priv->msg = msg;
2733	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2734out:
2735	return ret;
2736}
2737EXPORT_SYMBOL(ib_send_cm_sidr_req);
2738
2739static void cm_format_sidr_req_event(struct cm_work *work,
2740				     struct ib_cm_id *listen_id)
2741{
2742	struct cm_sidr_req_msg *sidr_req_msg;
2743	struct ib_cm_sidr_req_event_param *param;
2744
2745	sidr_req_msg = (struct cm_sidr_req_msg *)
2746				work->mad_recv_wc->recv_buf.mad;
2747	param = &work->cm_event.param.sidr_req_rcvd;
2748	param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
2749	param->listen_id = listen_id;
2750	param->port = work->port->port_num;
2751	work->cm_event.private_data = &sidr_req_msg->private_data;
2752}
2753
2754static int cm_sidr_req_handler(struct cm_work *work)
2755{
2756	struct ib_cm_id *cm_id;
2757	struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
2758	struct cm_sidr_req_msg *sidr_req_msg;
2759	struct ib_wc *wc;
2760	unsigned long flags;
2761
2762	cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
2763	if (IS_ERR(cm_id))
2764		return PTR_ERR(cm_id);
2765	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2766
2767	/* Record SGID/SLID and request ID for lookup. */
2768	sidr_req_msg = (struct cm_sidr_req_msg *)
2769				work->mad_recv_wc->recv_buf.mad;
2770	wc = work->mad_recv_wc->wc;
2771	cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
2772	cm_id_priv->av.dgid.global.interface_id = 0;
2773	cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2774				work->mad_recv_wc->recv_buf.grh,
2775				&cm_id_priv->av);
2776	cm_id_priv->id.remote_id = sidr_req_msg->request_id;
2777	cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
2778	cm_id_priv->tid = sidr_req_msg->hdr.tid;
2779	atomic_inc(&cm_id_priv->work_count);
2780
2781	spin_lock_irqsave(&cm.lock, flags);
2782	cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
2783	if (cur_cm_id_priv) {
2784		spin_unlock_irqrestore(&cm.lock, flags);
2785		goto out; /* Duplicate message. */
2786	}
2787	cur_cm_id_priv = cm_find_listen(cm_id->device,
2788					sidr_req_msg->service_id,
2789					sidr_req_msg->private_data);
2790	if (!cur_cm_id_priv) {
2791		rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
2792		spin_unlock_irqrestore(&cm.lock, flags);
2793		/* todo: reply with no match */
2794		goto out; /* No match. */
2795	}
2796	atomic_inc(&cur_cm_id_priv->refcount);
2797	spin_unlock_irqrestore(&cm.lock, flags);
2798
2799	cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
2800	cm_id_priv->id.context = cur_cm_id_priv->id.context;
2801	cm_id_priv->id.service_id = sidr_req_msg->service_id;
2802	cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
2803
2804	cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
2805	cm_process_work(cm_id_priv, work);
2806	cm_deref_id(cur_cm_id_priv);
2807	return 0;
2808out:
2809	ib_destroy_cm_id(&cm_id_priv->id);
2810	return -EINVAL;
2811}
2812
2813static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
2814			       struct cm_id_private *cm_id_priv,
2815			       struct ib_cm_sidr_rep_param *param)
2816{
2817	cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
2818			  cm_id_priv->tid);
2819	sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
2820	sidr_rep_msg->status = param->status;
2821	cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
2822	sidr_rep_msg->service_id = cm_id_priv->id.service_id;
2823	sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
2824
2825	if (param->info && param->info_length)
2826		memcpy(sidr_rep_msg->info, param->info, param->info_length);
2827
2828	if (param->private_data && param->private_data_len)
2829		memcpy(sidr_rep_msg->private_data, param->private_data,
2830		       param->private_data_len);
2831}
2832
2833int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
2834			struct ib_cm_sidr_rep_param *param)
2835{
2836	struct cm_id_private *cm_id_priv;
2837	struct ib_mad_send_buf *msg;
2838	unsigned long flags;
2839	int ret;
2840
2841	if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
2842	    (param->private_data &&
2843	     param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
2844		return -EINVAL;
2845
2846	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2847	spin_lock_irqsave(&cm_id_priv->lock, flags);
2848	if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
2849		ret = -EINVAL;
2850		goto error;
2851	}
2852
2853	ret = cm_alloc_msg(cm_id_priv, &msg);
2854	if (ret)
2855		goto error;
2856
2857	cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
2858			   param);
2859	ret = ib_post_send_mad(msg, NULL);
2860	if (ret) {
2861		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2862		cm_free_msg(msg);
2863		return ret;
2864	}
2865	cm_id->state = IB_CM_IDLE;
2866	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2867
2868	spin_lock_irqsave(&cm.lock, flags);
2869	rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
2870	spin_unlock_irqrestore(&cm.lock, flags);
2871	return 0;
2872
2873error:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2874	return ret;
2875}
2876EXPORT_SYMBOL(ib_send_cm_sidr_rep);
2877
2878static void cm_format_sidr_rep_event(struct cm_work *work)
2879{
2880	struct cm_sidr_rep_msg *sidr_rep_msg;
2881	struct ib_cm_sidr_rep_event_param *param;
2882
2883	sidr_rep_msg = (struct cm_sidr_rep_msg *)
2884				work->mad_recv_wc->recv_buf.mad;
2885	param = &work->cm_event.param.sidr_rep_rcvd;
2886	param->status = sidr_rep_msg->status;
2887	param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
2888	param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
2889	param->info = &sidr_rep_msg->info;
2890	param->info_len = sidr_rep_msg->info_length;
2891	work->cm_event.private_data = &sidr_rep_msg->private_data;
2892}
2893
2894static int cm_sidr_rep_handler(struct cm_work *work)
2895{
2896	struct cm_sidr_rep_msg *sidr_rep_msg;
2897	struct cm_id_private *cm_id_priv;
2898	unsigned long flags;
2899
2900	sidr_rep_msg = (struct cm_sidr_rep_msg *)
2901				work->mad_recv_wc->recv_buf.mad;
2902	cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
2903	if (!cm_id_priv)
2904		return -EINVAL; /* Unmatched reply. */
2905
2906	spin_lock_irqsave(&cm_id_priv->lock, flags);
2907	if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
2908		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2909		goto out;
2910	}
2911	cm_id_priv->id.state = IB_CM_IDLE;
2912	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2913	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2914
2915	cm_format_sidr_rep_event(work);
2916	cm_process_work(cm_id_priv, work);
2917	return 0;
2918out:
2919	cm_deref_id(cm_id_priv);
2920	return -EINVAL;
2921}
2922
2923static void cm_process_send_error(struct ib_mad_send_buf *msg,
2924				  enum ib_wc_status wc_status)
2925{
2926	struct cm_id_private *cm_id_priv;
2927	struct ib_cm_event cm_event;
2928	enum ib_cm_state state;
2929	unsigned long flags;
2930	int ret;
2931
2932	memset(&cm_event, 0, sizeof cm_event);
2933	cm_id_priv = msg->context[0];
2934
2935	/* Discard old sends or ones without a response. */
2936	spin_lock_irqsave(&cm_id_priv->lock, flags);
2937	state = (enum ib_cm_state) (unsigned long) msg->context[1];
2938	if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
2939		goto discard;
2940
2941	switch (state) {
2942	case IB_CM_REQ_SENT:
2943	case IB_CM_MRA_REQ_RCVD:
2944		cm_reset_to_idle(cm_id_priv);
2945		cm_event.event = IB_CM_REQ_ERROR;
2946		break;
2947	case IB_CM_REP_SENT:
2948	case IB_CM_MRA_REP_RCVD:
2949		cm_reset_to_idle(cm_id_priv);
2950		cm_event.event = IB_CM_REP_ERROR;
2951		break;
2952	case IB_CM_DREQ_SENT:
2953		cm_enter_timewait(cm_id_priv);
2954		cm_event.event = IB_CM_DREQ_ERROR;
2955		break;
2956	case IB_CM_SIDR_REQ_SENT:
2957		cm_id_priv->id.state = IB_CM_IDLE;
2958		cm_event.event = IB_CM_SIDR_REQ_ERROR;
2959		break;
2960	default:
2961		goto discard;
2962	}
2963	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2964	cm_event.param.send_status = wc_status;
2965
2966	/* No other events can occur on the cm_id at this point. */
2967	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
2968	cm_free_msg(msg);
2969	if (ret)
2970		ib_destroy_cm_id(&cm_id_priv->id);
2971	return;
2972discard:
2973	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2974	cm_free_msg(msg);
2975}
2976
2977static void cm_send_handler(struct ib_mad_agent *mad_agent,
2978			    struct ib_mad_send_wc *mad_send_wc)
2979{
2980	struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
2981
2982	switch (mad_send_wc->status) {
2983	case IB_WC_SUCCESS:
2984	case IB_WC_WR_FLUSH_ERR:
2985		cm_free_msg(msg);
2986		break;
2987	default:
2988		if (msg->context[0] && msg->context[1])
2989			cm_process_send_error(msg, mad_send_wc->status);
2990		else
2991			cm_free_msg(msg);
2992		break;
2993	}
2994}
2995
2996static void cm_work_handler(struct work_struct *_work)
2997{
2998	struct cm_work *work = container_of(_work, struct cm_work, work.work);
2999	int ret;
3000
3001	switch (work->cm_event.event) {
3002	case IB_CM_REQ_RECEIVED:
3003		ret = cm_req_handler(work);
3004		break;
3005	case IB_CM_MRA_RECEIVED:
3006		ret = cm_mra_handler(work);
3007		break;
3008	case IB_CM_REJ_RECEIVED:
3009		ret = cm_rej_handler(work);
3010		break;
3011	case IB_CM_REP_RECEIVED:
3012		ret = cm_rep_handler(work);
3013		break;
3014	case IB_CM_RTU_RECEIVED:
3015		ret = cm_rtu_handler(work);
3016		break;
3017	case IB_CM_USER_ESTABLISHED:
3018		ret = cm_establish_handler(work);
3019		break;
3020	case IB_CM_DREQ_RECEIVED:
3021		ret = cm_dreq_handler(work);
3022		break;
3023	case IB_CM_DREP_RECEIVED:
3024		ret = cm_drep_handler(work);
3025		break;
3026	case IB_CM_SIDR_REQ_RECEIVED:
3027		ret = cm_sidr_req_handler(work);
3028		break;
3029	case IB_CM_SIDR_REP_RECEIVED:
3030		ret = cm_sidr_rep_handler(work);
3031		break;
3032	case IB_CM_LAP_RECEIVED:
3033		ret = cm_lap_handler(work);
3034		break;
3035	case IB_CM_APR_RECEIVED:
3036		ret = cm_apr_handler(work);
3037		break;
3038	case IB_CM_TIMEWAIT_EXIT:
3039		ret = cm_timewait_handler(work);
3040		break;
3041	default:
3042		ret = -EINVAL;
3043		break;
3044	}
3045	if (ret)
3046		cm_free_work(work);
3047}
3048
3049static int cm_establish(struct ib_cm_id *cm_id)
3050{
3051	struct cm_id_private *cm_id_priv;
3052	struct cm_work *work;
3053	unsigned long flags;
3054	int ret = 0;
3055
3056	work = kmalloc(sizeof *work, GFP_ATOMIC);
3057	if (!work)
3058		return -ENOMEM;
3059
3060	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3061	spin_lock_irqsave(&cm_id_priv->lock, flags);
3062	switch (cm_id->state)
3063	{
3064	case IB_CM_REP_SENT:
3065	case IB_CM_MRA_REP_RCVD:
3066		cm_id->state = IB_CM_ESTABLISHED;
3067		break;
3068	case IB_CM_ESTABLISHED:
3069		ret = -EISCONN;
3070		break;
3071	default:
3072		ret = -EINVAL;
3073		break;
3074	}
3075	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3076
3077	if (ret) {
3078		kfree(work);
3079		goto out;
3080	}
3081
3082	/*
3083	 * The CM worker thread may try to destroy the cm_id before it
3084	 * can execute this work item.  To prevent potential deadlock,
3085	 * we need to find the cm_id once we're in the context of the
3086	 * worker thread, rather than holding a reference on it.
3087	 */
3088	INIT_DELAYED_WORK(&work->work, cm_work_handler);
3089	work->local_id = cm_id->local_id;
3090	work->remote_id = cm_id->remote_id;
3091	work->mad_recv_wc = NULL;
3092	work->cm_event.event = IB_CM_USER_ESTABLISHED;
3093	queue_delayed_work(cm.wq, &work->work, 0);
3094out:
3095	return ret;
3096}
3097
3098static int cm_migrate(struct ib_cm_id *cm_id)
3099{
3100	struct cm_id_private *cm_id_priv;
3101	unsigned long flags;
3102	int ret = 0;
3103
3104	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3105	spin_lock_irqsave(&cm_id_priv->lock, flags);
3106	if (cm_id->state == IB_CM_ESTABLISHED &&
3107	    (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3108	     cm_id->lap_state == IB_CM_LAP_IDLE)) {
3109		cm_id->lap_state = IB_CM_LAP_IDLE;
3110		cm_id_priv->av = cm_id_priv->alt_av;
3111	} else
3112		ret = -EINVAL;
3113	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3114
3115	return ret;
3116}
3117
3118int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3119{
3120	int ret;
3121
3122	switch (event) {
3123	case IB_EVENT_COMM_EST:
3124		ret = cm_establish(cm_id);
3125		break;
3126	case IB_EVENT_PATH_MIG:
3127		ret = cm_migrate(cm_id);
3128		break;
3129	default:
3130		ret = -EINVAL;
3131	}
3132	return ret;
3133}
3134EXPORT_SYMBOL(ib_cm_notify);
3135
3136static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3137			    struct ib_mad_recv_wc *mad_recv_wc)
3138{
3139	struct cm_work *work;
3140	enum ib_cm_event_type event;
3141	int paths = 0;
3142
3143	switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
3144	case CM_REQ_ATTR_ID:
3145		paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
3146						    alt_local_lid != 0);
3147		event = IB_CM_REQ_RECEIVED;
3148		break;
3149	case CM_MRA_ATTR_ID:
3150		event = IB_CM_MRA_RECEIVED;
3151		break;
3152	case CM_REJ_ATTR_ID:
3153		event = IB_CM_REJ_RECEIVED;
3154		break;
3155	case CM_REP_ATTR_ID:
3156		event = IB_CM_REP_RECEIVED;
3157		break;
3158	case CM_RTU_ATTR_ID:
3159		event = IB_CM_RTU_RECEIVED;
3160		break;
3161	case CM_DREQ_ATTR_ID:
3162		event = IB_CM_DREQ_RECEIVED;
3163		break;
3164	case CM_DREP_ATTR_ID:
3165		event = IB_CM_DREP_RECEIVED;
3166		break;
3167	case CM_SIDR_REQ_ATTR_ID:
3168		event = IB_CM_SIDR_REQ_RECEIVED;
3169		break;
3170	case CM_SIDR_REP_ATTR_ID:
3171		event = IB_CM_SIDR_REP_RECEIVED;
3172		break;
3173	case CM_LAP_ATTR_ID:
3174		paths = 1;
3175		event = IB_CM_LAP_RECEIVED;
3176		break;
3177	case CM_APR_ATTR_ID:
3178		event = IB_CM_APR_RECEIVED;
3179		break;
3180	default:
3181		ib_free_recv_mad(mad_recv_wc);
3182		return;
3183	}
3184
3185	work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
3186		       GFP_KERNEL);
3187	if (!work) {
3188		ib_free_recv_mad(mad_recv_wc);
3189		return;
3190	}
3191
3192	INIT_DELAYED_WORK(&work->work, cm_work_handler);
3193	work->cm_event.event = event;
3194	work->mad_recv_wc = mad_recv_wc;
3195	work->port = (struct cm_port *)mad_agent->context;
3196	queue_delayed_work(cm.wq, &work->work, 0);
3197}
3198
3199static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3200				struct ib_qp_attr *qp_attr,
3201				int *qp_attr_mask)
3202{
3203	unsigned long flags;
3204	int ret;
3205
3206	spin_lock_irqsave(&cm_id_priv->lock, flags);
3207	switch (cm_id_priv->id.state) {
3208	case IB_CM_REQ_SENT:
3209	case IB_CM_MRA_REQ_RCVD:
3210	case IB_CM_REQ_RCVD:
3211	case IB_CM_MRA_REQ_SENT:
3212	case IB_CM_REP_RCVD:
3213	case IB_CM_MRA_REP_SENT:
3214	case IB_CM_REP_SENT:
3215	case IB_CM_MRA_REP_RCVD:
3216	case IB_CM_ESTABLISHED:
3217		*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3218				IB_QP_PKEY_INDEX | IB_QP_PORT;
3219		qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
3220		if (cm_id_priv->responder_resources)
3221			qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
3222						    IB_ACCESS_REMOTE_ATOMIC;
3223		qp_attr->pkey_index = cm_id_priv->av.pkey_index;
3224		qp_attr->port_num = cm_id_priv->av.port->port_num;
3225		ret = 0;
3226		break;
3227	default:
3228		ret = -EINVAL;
3229		break;
3230	}
3231	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3232	return ret;
3233}
3234
3235static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3236			       struct ib_qp_attr *qp_attr,
3237			       int *qp_attr_mask)
3238{
3239	unsigned long flags;
3240	int ret;
3241
3242	spin_lock_irqsave(&cm_id_priv->lock, flags);
3243	switch (cm_id_priv->id.state) {
3244	case IB_CM_REQ_RCVD:
3245	case IB_CM_MRA_REQ_SENT:
3246	case IB_CM_REP_RCVD:
3247	case IB_CM_MRA_REP_SENT:
3248	case IB_CM_REP_SENT:
3249	case IB_CM_MRA_REP_RCVD:
3250	case IB_CM_ESTABLISHED:
3251		*qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
3252				IB_QP_DEST_QPN | IB_QP_RQ_PSN;
3253		qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3254		qp_attr->path_mtu = cm_id_priv->path_mtu;
3255		qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3256		qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
3257		if (cm_id_priv->qp_type == IB_QPT_RC) {
3258			*qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
3259					 IB_QP_MIN_RNR_TIMER;
3260			qp_attr->max_dest_rd_atomic =
3261					cm_id_priv->responder_resources;
3262			qp_attr->min_rnr_timer = 0;
3263		}
3264		if (cm_id_priv->alt_av.ah_attr.dlid) {
3265			*qp_attr_mask |= IB_QP_ALT_PATH;
3266			qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3267			qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3268			qp_attr->alt_timeout =
3269					cm_id_priv->alt_av.packet_life_time + 1;
3270			qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3271		}
3272		ret = 0;
3273		break;
3274	default:
3275		ret = -EINVAL;
3276		break;
3277	}
3278	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3279	return ret;
3280}
3281
3282static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3283			       struct ib_qp_attr *qp_attr,
3284			       int *qp_attr_mask)
3285{
3286	unsigned long flags;
3287	int ret;
3288
3289	spin_lock_irqsave(&cm_id_priv->lock, flags);
3290	switch (cm_id_priv->id.state) {
3291	/* Allow transition to RTS before sending REP */
3292	case IB_CM_REQ_RCVD:
3293	case IB_CM_MRA_REQ_SENT:
3294
3295	case IB_CM_REP_RCVD:
3296	case IB_CM_MRA_REP_SENT:
3297	case IB_CM_REP_SENT:
3298	case IB_CM_MRA_REP_RCVD:
3299	case IB_CM_ESTABLISHED:
3300		if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
3301			*qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
3302			qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
3303			if (cm_id_priv->qp_type == IB_QPT_RC) {
3304				*qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
3305						 IB_QP_RNR_RETRY |
3306						 IB_QP_MAX_QP_RD_ATOMIC;
3307				qp_attr->timeout =
3308					cm_id_priv->av.packet_life_time + 1;
3309				qp_attr->retry_cnt = cm_id_priv->retry_count;
3310				qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
3311				qp_attr->max_rd_atomic =
3312					cm_id_priv->initiator_depth;
3313			}
3314			if (cm_id_priv->alt_av.ah_attr.dlid) {
3315				*qp_attr_mask |= IB_QP_PATH_MIG_STATE;
3316				qp_attr->path_mig_state = IB_MIG_REARM;
3317			}
3318		} else {
3319			*qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
3320			qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3321			qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3322			qp_attr->alt_timeout =
3323				cm_id_priv->alt_av.packet_life_time + 1;
3324			qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3325			qp_attr->path_mig_state = IB_MIG_REARM;
3326		}
3327		ret = 0;
3328		break;
3329	default:
3330		ret = -EINVAL;
3331		break;
3332	}
3333	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3334	return ret;
3335}
3336
3337int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
3338		       struct ib_qp_attr *qp_attr,
3339		       int *qp_attr_mask)
3340{
3341	struct cm_id_private *cm_id_priv;
3342	int ret;
3343
3344	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3345	switch (qp_attr->qp_state) {
3346	case IB_QPS_INIT:
3347		ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
3348		break;
3349	case IB_QPS_RTR:
3350		ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
3351		break;
3352	case IB_QPS_RTS:
3353		ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
3354		break;
3355	default:
3356		ret = -EINVAL;
3357		break;
3358	}
3359	return ret;
3360}
3361EXPORT_SYMBOL(ib_cm_init_qp_attr);
3362
3363static void cm_add_one(struct ib_device *device)
3364{
3365	struct cm_device *cm_dev;
3366	struct cm_port *port;
3367	struct ib_mad_reg_req reg_req = {
3368		.mgmt_class = IB_MGMT_CLASS_CM,
3369		.mgmt_class_version = IB_CM_CLASS_VERSION
3370	};
3371	struct ib_port_modify port_modify = {
3372		.set_port_cap_mask = IB_PORT_CM_SUP
3373	};
3374	unsigned long flags;
3375	int ret;
3376	u8 i;
3377
3378	if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
3379		return;
3380
3381	cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) *
3382			 device->phys_port_cnt, GFP_KERNEL);
3383	if (!cm_dev)
3384		return;
3385
3386	cm_dev->device = device;
3387
3388	set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
3389	for (i = 1; i <= device->phys_port_cnt; i++) {
3390		port = &cm_dev->port[i-1];
3391		port->cm_dev = cm_dev;
3392		port->port_num = i;
3393		port->mad_agent = ib_register_mad_agent(device, i,
3394							IB_QPT_GSI,
3395							&reg_req,
3396							0,
3397							cm_send_handler,
3398							cm_recv_handler,
3399							port);
3400		if (IS_ERR(port->mad_agent))
3401			goto error1;
3402
3403		ret = ib_modify_port(device, i, 0, &port_modify);
3404		if (ret)
3405			goto error2;
3406	}
3407	ib_set_client_data(device, &cm_client, cm_dev);
3408
3409	write_lock_irqsave(&cm.device_lock, flags);
3410	list_add_tail(&cm_dev->list, &cm.device_list);
3411	write_unlock_irqrestore(&cm.device_lock, flags);
3412	return;
3413
3414error2:
3415	ib_unregister_mad_agent(port->mad_agent);
3416error1:
3417	port_modify.set_port_cap_mask = 0;
3418	port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
3419	while (--i) {
3420		port = &cm_dev->port[i-1];
3421		ib_modify_port(device, port->port_num, 0, &port_modify);
3422		ib_unregister_mad_agent(port->mad_agent);
3423	}
3424	kfree(cm_dev);
3425}
3426
3427static void cm_remove_one(struct ib_device *device)
3428{
3429	struct cm_device *cm_dev;
3430	struct cm_port *port;
3431	struct ib_port_modify port_modify = {
3432		.clr_port_cap_mask = IB_PORT_CM_SUP
3433	};
3434	unsigned long flags;
3435	int i;
3436
3437	cm_dev = ib_get_client_data(device, &cm_client);
3438	if (!cm_dev)
3439		return;
3440
3441	write_lock_irqsave(&cm.device_lock, flags);
3442	list_del(&cm_dev->list);
3443	write_unlock_irqrestore(&cm.device_lock, flags);
3444
3445	for (i = 1; i <= device->phys_port_cnt; i++) {
3446		port = &cm_dev->port[i-1];
3447		ib_modify_port(device, port->port_num, 0, &port_modify);
3448		ib_unregister_mad_agent(port->mad_agent);
3449	}
3450	kfree(cm_dev);
3451}
3452
3453static int __init ib_cm_init(void)
3454{
3455	int ret;
3456
3457	memset(&cm, 0, sizeof cm);
3458	INIT_LIST_HEAD(&cm.device_list);
3459	rwlock_init(&cm.device_lock);
3460	spin_lock_init(&cm.lock);
3461	cm.listen_service_table = RB_ROOT;
3462	cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
3463	cm.remote_id_table = RB_ROOT;
3464	cm.remote_qp_table = RB_ROOT;
3465	cm.remote_sidr_table = RB_ROOT;
3466	idr_init(&cm.local_id_table);
3467	get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
3468	idr_pre_get(&cm.local_id_table, GFP_KERNEL);
3469	INIT_LIST_HEAD(&cm.timewait_list);
3470
3471	cm.wq = create_workqueue("ib_cm");
3472	if (!cm.wq)
3473		return -ENOMEM;
3474
3475	ret = ib_register_client(&cm_client);
3476	if (ret)
3477		goto error;
3478
3479	return 0;
3480error:
3481	destroy_workqueue(cm.wq);
3482	return ret;
3483}
3484
3485static void __exit ib_cm_cleanup(void)
3486{
3487	struct cm_timewait_info *timewait_info, *tmp;
3488
3489	spin_lock_irq(&cm.lock);
3490	list_for_each_entry(timewait_info, &cm.timewait_list, list)
3491		cancel_delayed_work(&timewait_info->work.work);
3492	spin_unlock_irq(&cm.lock);
3493
3494	destroy_workqueue(cm.wq);
3495
3496	list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
3497		list_del(&timewait_info->list);
3498		kfree(timewait_info);
3499	}
3500
3501	ib_unregister_client(&cm_client);
3502	idr_destroy(&cm.local_id_table);
3503}
3504
3505module_init(ib_cm_init);
3506module_exit(ib_cm_cleanup);
3507
3508