cm.c revision 34816ad98efe4d47ffd858a0345321f9d85d9420
1/*
2 * Copyright (c) 2004, 2005 Intel Corporation.  All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation.  All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses.  You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 *     Redistribution and use in source and binary forms, with or
14 *     without modification, are permitted provided that the following
15 *     conditions are met:
16 *
17 *      - Redistributions of source code must retain the above
18 *        copyright notice, this list of conditions and the following
19 *        disclaimer.
20 *
21 *      - Redistributions in binary form must reproduce the above
22 *        copyright notice, this list of conditions and the following
23 *        disclaimer in the documentation and/or other materials
24 *        provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 *
35 * $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $
36 */
37#include <linux/dma-mapping.h>
38#include <linux/err.h>
39#include <linux/idr.h>
40#include <linux/interrupt.h>
41#include <linux/pci.h>
42#include <linux/rbtree.h>
43#include <linux/spinlock.h>
44#include <linux/workqueue.h>
45
46#include <rdma/ib_cache.h>
47#include <rdma/ib_cm.h>
48#include "cm_msgs.h"
49
50MODULE_AUTHOR("Sean Hefty");
51MODULE_DESCRIPTION("InfiniBand CM");
52MODULE_LICENSE("Dual BSD/GPL");
53
54static void cm_add_one(struct ib_device *device);
55static void cm_remove_one(struct ib_device *device);
56
57static struct ib_client cm_client = {
58	.name   = "cm",
59	.add    = cm_add_one,
60	.remove = cm_remove_one
61};
62
63static struct ib_cm {
64	spinlock_t lock;
65	struct list_head device_list;
66	rwlock_t device_lock;
67	struct rb_root listen_service_table;
68	u64 listen_service_id;
69	/* struct rb_root peer_service_table; todo: fix peer to peer */
70	struct rb_root remote_qp_table;
71	struct rb_root remote_id_table;
72	struct rb_root remote_sidr_table;
73	struct idr local_id_table;
74	struct workqueue_struct *wq;
75} cm;
76
77struct cm_port {
78	struct cm_device *cm_dev;
79	struct ib_mad_agent *mad_agent;
80	u8 port_num;
81};
82
83struct cm_device {
84	struct list_head list;
85	struct ib_device *device;
86	__be64 ca_guid;
87	struct cm_port port[0];
88};
89
90struct cm_av {
91	struct cm_port *port;
92	union ib_gid dgid;
93	struct ib_ah_attr ah_attr;
94	u16 pkey_index;
95	u8 packet_life_time;
96};
97
98struct cm_work {
99	struct work_struct work;
100	struct list_head list;
101	struct cm_port *port;
102	struct ib_mad_recv_wc *mad_recv_wc;	/* Received MADs */
103	__be32 local_id;			/* Established / timewait */
104	__be32 remote_id;
105	struct ib_cm_event cm_event;
106	struct ib_sa_path_rec path[0];
107};
108
109struct cm_timewait_info {
110	struct cm_work work;			/* Must be first. */
111	struct rb_node remote_qp_node;
112	struct rb_node remote_id_node;
113	__be64 remote_ca_guid;
114	__be32 remote_qpn;
115	u8 inserted_remote_qp;
116	u8 inserted_remote_id;
117};
118
119struct cm_id_private {
120	struct ib_cm_id	id;
121
122	struct rb_node service_node;
123	struct rb_node sidr_id_node;
124	spinlock_t lock;
125	wait_queue_head_t wait;
126	atomic_t refcount;
127
128	struct ib_mad_send_buf *msg;
129	struct cm_timewait_info *timewait_info;
130	/* todo: use alternate port on send failure */
131	struct cm_av av;
132	struct cm_av alt_av;
133
134	void *private_data;
135	__be64 tid;
136	__be32 local_qpn;
137	__be32 remote_qpn;
138	enum ib_qp_type qp_type;
139	__be32 sq_psn;
140	__be32 rq_psn;
141	int timeout_ms;
142	enum ib_mtu path_mtu;
143	u8 private_data_len;
144	u8 max_cm_retries;
145	u8 peer_to_peer;
146	u8 responder_resources;
147	u8 initiator_depth;
148	u8 local_ack_timeout;
149	u8 retry_count;
150	u8 rnr_retry_count;
151	u8 service_timeout;
152
153	struct list_head work_list;
154	atomic_t work_count;
155};
156
157static void cm_work_handler(void *data);
158
159static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
160{
161	if (atomic_dec_and_test(&cm_id_priv->refcount))
162		wake_up(&cm_id_priv->wait);
163}
164
165static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
166			struct ib_mad_send_buf **msg)
167{
168	struct ib_mad_agent *mad_agent;
169	struct ib_mad_send_buf *m;
170	struct ib_ah *ah;
171
172	mad_agent = cm_id_priv->av.port->mad_agent;
173	ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
174	if (IS_ERR(ah))
175		return PTR_ERR(ah);
176
177	m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
178			       cm_id_priv->av.pkey_index,
179			       0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
180			       GFP_ATOMIC);
181	if (IS_ERR(m)) {
182		ib_destroy_ah(ah);
183		return PTR_ERR(m);
184	}
185
186	/* Timeout set by caller if response is expected. */
187	m->ah = ah;
188	m->retries = cm_id_priv->max_cm_retries;
189
190	atomic_inc(&cm_id_priv->refcount);
191	m->context[0] = cm_id_priv;
192	*msg = m;
193	return 0;
194}
195
196static int cm_alloc_response_msg(struct cm_port *port,
197				 struct ib_mad_recv_wc *mad_recv_wc,
198				 struct ib_mad_send_buf **msg)
199{
200	struct ib_mad_send_buf *m;
201	struct ib_ah *ah;
202
203	ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
204				  mad_recv_wc->recv_buf.grh, port->port_num);
205	if (IS_ERR(ah))
206		return PTR_ERR(ah);
207
208	m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
209			       0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
210			       GFP_ATOMIC);
211	if (IS_ERR(m)) {
212		ib_destroy_ah(ah);
213		return PTR_ERR(m);
214	}
215	m->ah = ah;
216	*msg = m;
217	return 0;
218}
219
220static void cm_free_msg(struct ib_mad_send_buf *msg)
221{
222	ib_destroy_ah(msg->ah);
223	if (msg->context[0])
224		cm_deref_id(msg->context[0]);
225	ib_free_send_mad(msg);
226}
227
228static void * cm_copy_private_data(const void *private_data,
229				   u8 private_data_len)
230{
231	void *data;
232
233	if (!private_data || !private_data_len)
234		return NULL;
235
236	data = kmalloc(private_data_len, GFP_KERNEL);
237	if (!data)
238		return ERR_PTR(-ENOMEM);
239
240	memcpy(data, private_data, private_data_len);
241	return data;
242}
243
244static void cm_set_private_data(struct cm_id_private *cm_id_priv,
245				 void *private_data, u8 private_data_len)
246{
247	if (cm_id_priv->private_data && cm_id_priv->private_data_len)
248		kfree(cm_id_priv->private_data);
249
250	cm_id_priv->private_data = private_data;
251	cm_id_priv->private_data_len = private_data_len;
252}
253
254static void cm_set_ah_attr(struct ib_ah_attr *ah_attr, u8 port_num,
255			   u16 dlid, u8 sl, u16 src_path_bits)
256{
257	memset(ah_attr, 0, sizeof ah_attr);
258	ah_attr->dlid = dlid;
259	ah_attr->sl = sl;
260	ah_attr->src_path_bits = src_path_bits;
261	ah_attr->port_num = port_num;
262}
263
264static void cm_init_av_for_response(struct cm_port *port,
265				    struct ib_wc *wc, struct cm_av *av)
266{
267	av->port = port;
268	av->pkey_index = wc->pkey_index;
269	cm_set_ah_attr(&av->ah_attr, port->port_num, wc->slid,
270		       wc->sl, wc->dlid_path_bits);
271}
272
273static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
274{
275	struct cm_device *cm_dev;
276	struct cm_port *port = NULL;
277	unsigned long flags;
278	int ret;
279	u8 p;
280
281	read_lock_irqsave(&cm.device_lock, flags);
282	list_for_each_entry(cm_dev, &cm.device_list, list) {
283		if (!ib_find_cached_gid(cm_dev->device, &path->sgid,
284					&p, NULL)) {
285			port = &cm_dev->port[p-1];
286			break;
287		}
288	}
289	read_unlock_irqrestore(&cm.device_lock, flags);
290
291	if (!port)
292		return -EINVAL;
293
294	ret = ib_find_cached_pkey(cm_dev->device, port->port_num,
295				  be16_to_cpu(path->pkey), &av->pkey_index);
296	if (ret)
297		return ret;
298
299	av->port = port;
300	cm_set_ah_attr(&av->ah_attr, av->port->port_num,
301		       be16_to_cpu(path->dlid), path->sl,
302		       be16_to_cpu(path->slid) & 0x7F);
303	av->packet_life_time = path->packet_life_time;
304	return 0;
305}
306
307static int cm_alloc_id(struct cm_id_private *cm_id_priv)
308{
309	unsigned long flags;
310	int ret;
311
312	do {
313		spin_lock_irqsave(&cm.lock, flags);
314		ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 1,
315					(__force int *) &cm_id_priv->id.local_id);
316		spin_unlock_irqrestore(&cm.lock, flags);
317	} while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
318	return ret;
319}
320
321static void cm_free_id(__be32 local_id)
322{
323	unsigned long flags;
324
325	spin_lock_irqsave(&cm.lock, flags);
326	idr_remove(&cm.local_id_table, (__force int) local_id);
327	spin_unlock_irqrestore(&cm.lock, flags);
328}
329
330static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
331{
332	struct cm_id_private *cm_id_priv;
333
334	cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id);
335	if (cm_id_priv) {
336		if (cm_id_priv->id.remote_id == remote_id)
337			atomic_inc(&cm_id_priv->refcount);
338		else
339			cm_id_priv = NULL;
340	}
341
342	return cm_id_priv;
343}
344
345static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
346{
347	struct cm_id_private *cm_id_priv;
348	unsigned long flags;
349
350	spin_lock_irqsave(&cm.lock, flags);
351	cm_id_priv = cm_get_id(local_id, remote_id);
352	spin_unlock_irqrestore(&cm.lock, flags);
353
354	return cm_id_priv;
355}
356
357static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
358{
359	struct rb_node **link = &cm.listen_service_table.rb_node;
360	struct rb_node *parent = NULL;
361	struct cm_id_private *cur_cm_id_priv;
362	__be64 service_id = cm_id_priv->id.service_id;
363	__be64 service_mask = cm_id_priv->id.service_mask;
364
365	while (*link) {
366		parent = *link;
367		cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
368					  service_node);
369		if ((cur_cm_id_priv->id.service_mask & service_id) ==
370		    (service_mask & cur_cm_id_priv->id.service_id) &&
371		    (cm_id_priv->id.device == cur_cm_id_priv->id.device))
372			return cur_cm_id_priv;
373
374		if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
375			link = &(*link)->rb_left;
376		else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
377			link = &(*link)->rb_right;
378		else if (service_id < cur_cm_id_priv->id.service_id)
379			link = &(*link)->rb_left;
380		else
381			link = &(*link)->rb_right;
382	}
383	rb_link_node(&cm_id_priv->service_node, parent, link);
384	rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
385	return NULL;
386}
387
388static struct cm_id_private * cm_find_listen(struct ib_device *device,
389					     __be64 service_id)
390{
391	struct rb_node *node = cm.listen_service_table.rb_node;
392	struct cm_id_private *cm_id_priv;
393
394	while (node) {
395		cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
396		if ((cm_id_priv->id.service_mask & service_id) ==
397		     cm_id_priv->id.service_id &&
398		    (cm_id_priv->id.device == device))
399			return cm_id_priv;
400
401		if (device < cm_id_priv->id.device)
402			node = node->rb_left;
403		else if (device > cm_id_priv->id.device)
404			node = node->rb_right;
405		else if (service_id < cm_id_priv->id.service_id)
406			node = node->rb_left;
407		else
408			node = node->rb_right;
409	}
410	return NULL;
411}
412
413static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
414						     *timewait_info)
415{
416	struct rb_node **link = &cm.remote_id_table.rb_node;
417	struct rb_node *parent = NULL;
418	struct cm_timewait_info *cur_timewait_info;
419	__be64 remote_ca_guid = timewait_info->remote_ca_guid;
420	__be32 remote_id = timewait_info->work.remote_id;
421
422	while (*link) {
423		parent = *link;
424		cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
425					     remote_id_node);
426		if (remote_id < cur_timewait_info->work.remote_id)
427			link = &(*link)->rb_left;
428		else if (remote_id > cur_timewait_info->work.remote_id)
429			link = &(*link)->rb_right;
430		else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
431			link = &(*link)->rb_left;
432		else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
433			link = &(*link)->rb_right;
434		else
435			return cur_timewait_info;
436	}
437	timewait_info->inserted_remote_id = 1;
438	rb_link_node(&timewait_info->remote_id_node, parent, link);
439	rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
440	return NULL;
441}
442
443static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
444						   __be32 remote_id)
445{
446	struct rb_node *node = cm.remote_id_table.rb_node;
447	struct cm_timewait_info *timewait_info;
448
449	while (node) {
450		timewait_info = rb_entry(node, struct cm_timewait_info,
451					 remote_id_node);
452		if (remote_id < timewait_info->work.remote_id)
453			node = node->rb_left;
454		else if (remote_id > timewait_info->work.remote_id)
455			node = node->rb_right;
456		else if (remote_ca_guid < timewait_info->remote_ca_guid)
457			node = node->rb_left;
458		else if (remote_ca_guid > timewait_info->remote_ca_guid)
459			node = node->rb_right;
460		else
461			return timewait_info;
462	}
463	return NULL;
464}
465
466static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
467						      *timewait_info)
468{
469	struct rb_node **link = &cm.remote_qp_table.rb_node;
470	struct rb_node *parent = NULL;
471	struct cm_timewait_info *cur_timewait_info;
472	__be64 remote_ca_guid = timewait_info->remote_ca_guid;
473	__be32 remote_qpn = timewait_info->remote_qpn;
474
475	while (*link) {
476		parent = *link;
477		cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
478					     remote_qp_node);
479		if (remote_qpn < cur_timewait_info->remote_qpn)
480			link = &(*link)->rb_left;
481		else if (remote_qpn > cur_timewait_info->remote_qpn)
482			link = &(*link)->rb_right;
483		else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
484			link = &(*link)->rb_left;
485		else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
486			link = &(*link)->rb_right;
487		else
488			return cur_timewait_info;
489	}
490	timewait_info->inserted_remote_qp = 1;
491	rb_link_node(&timewait_info->remote_qp_node, parent, link);
492	rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
493	return NULL;
494}
495
496static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
497						    *cm_id_priv)
498{
499	struct rb_node **link = &cm.remote_sidr_table.rb_node;
500	struct rb_node *parent = NULL;
501	struct cm_id_private *cur_cm_id_priv;
502	union ib_gid *port_gid = &cm_id_priv->av.dgid;
503	__be32 remote_id = cm_id_priv->id.remote_id;
504
505	while (*link) {
506		parent = *link;
507		cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
508					  sidr_id_node);
509		if (remote_id < cur_cm_id_priv->id.remote_id)
510			link = &(*link)->rb_left;
511		else if (remote_id > cur_cm_id_priv->id.remote_id)
512			link = &(*link)->rb_right;
513		else {
514			int cmp;
515			cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
516				     sizeof *port_gid);
517			if (cmp < 0)
518				link = &(*link)->rb_left;
519			else if (cmp > 0)
520				link = &(*link)->rb_right;
521			else
522				return cur_cm_id_priv;
523		}
524	}
525	rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
526	rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
527	return NULL;
528}
529
530static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
531			       enum ib_cm_sidr_status status)
532{
533	struct ib_cm_sidr_rep_param param;
534
535	memset(&param, 0, sizeof param);
536	param.status = status;
537	ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
538}
539
540struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
541				 ib_cm_handler cm_handler,
542				 void *context)
543{
544	struct cm_id_private *cm_id_priv;
545	int ret;
546
547	cm_id_priv = kmalloc(sizeof *cm_id_priv, GFP_KERNEL);
548	if (!cm_id_priv)
549		return ERR_PTR(-ENOMEM);
550
551	memset(cm_id_priv, 0, sizeof *cm_id_priv);
552	cm_id_priv->id.state = IB_CM_IDLE;
553	cm_id_priv->id.device = device;
554	cm_id_priv->id.cm_handler = cm_handler;
555	cm_id_priv->id.context = context;
556	cm_id_priv->id.remote_cm_qpn = 1;
557	ret = cm_alloc_id(cm_id_priv);
558	if (ret)
559		goto error;
560
561	spin_lock_init(&cm_id_priv->lock);
562	init_waitqueue_head(&cm_id_priv->wait);
563	INIT_LIST_HEAD(&cm_id_priv->work_list);
564	atomic_set(&cm_id_priv->work_count, -1);
565	atomic_set(&cm_id_priv->refcount, 1);
566	return &cm_id_priv->id;
567
568error:
569	kfree(cm_id_priv);
570	return ERR_PTR(-ENOMEM);
571}
572EXPORT_SYMBOL(ib_create_cm_id);
573
574static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
575{
576	struct cm_work *work;
577
578	if (list_empty(&cm_id_priv->work_list))
579		return NULL;
580
581	work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
582	list_del(&work->list);
583	return work;
584}
585
586static void cm_free_work(struct cm_work *work)
587{
588	if (work->mad_recv_wc)
589		ib_free_recv_mad(work->mad_recv_wc);
590	kfree(work);
591}
592
593static inline int cm_convert_to_ms(int iba_time)
594{
595	/* approximate conversion to ms from 4.096us x 2^iba_time */
596	return 1 << max(iba_time - 8, 0);
597}
598
599static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
600{
601	unsigned long flags;
602
603	if (!timewait_info->inserted_remote_id &&
604	    !timewait_info->inserted_remote_qp)
605	    return;
606
607	spin_lock_irqsave(&cm.lock, flags);
608	if (timewait_info->inserted_remote_id) {
609		rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
610		timewait_info->inserted_remote_id = 0;
611	}
612
613	if (timewait_info->inserted_remote_qp) {
614		rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
615		timewait_info->inserted_remote_qp = 0;
616	}
617	spin_unlock_irqrestore(&cm.lock, flags);
618}
619
620static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
621{
622	struct cm_timewait_info *timewait_info;
623
624	timewait_info = kmalloc(sizeof *timewait_info, GFP_KERNEL);
625	if (!timewait_info)
626		return ERR_PTR(-ENOMEM);
627	memset(timewait_info, 0, sizeof *timewait_info);
628
629	timewait_info->work.local_id = local_id;
630	INIT_WORK(&timewait_info->work.work, cm_work_handler,
631		  &timewait_info->work);
632	timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
633	return timewait_info;
634}
635
636static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
637{
638	int wait_time;
639
640	/*
641	 * The cm_id could be destroyed by the user before we exit timewait.
642	 * To protect against this, we search for the cm_id after exiting
643	 * timewait before notifying the user that we've exited timewait.
644	 */
645	cm_id_priv->id.state = IB_CM_TIMEWAIT;
646	wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout);
647	queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
648			   msecs_to_jiffies(wait_time));
649	cm_id_priv->timewait_info = NULL;
650}
651
652static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
653{
654	cm_id_priv->id.state = IB_CM_IDLE;
655	if (cm_id_priv->timewait_info) {
656		cm_cleanup_timewait(cm_id_priv->timewait_info);
657		kfree(cm_id_priv->timewait_info);
658		cm_id_priv->timewait_info = NULL;
659	}
660}
661
662void ib_destroy_cm_id(struct ib_cm_id *cm_id)
663{
664	struct cm_id_private *cm_id_priv;
665	struct cm_work *work;
666	unsigned long flags;
667
668	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
669retest:
670	spin_lock_irqsave(&cm_id_priv->lock, flags);
671	switch (cm_id->state) {
672	case IB_CM_LISTEN:
673		cm_id->state = IB_CM_IDLE;
674		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
675		spin_lock_irqsave(&cm.lock, flags);
676		rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
677		spin_unlock_irqrestore(&cm.lock, flags);
678		break;
679	case IB_CM_SIDR_REQ_SENT:
680		cm_id->state = IB_CM_IDLE;
681		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
682		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
683		break;
684	case IB_CM_SIDR_REQ_RCVD:
685		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
686		cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
687		break;
688	case IB_CM_REQ_SENT:
689	case IB_CM_MRA_REQ_RCVD:
690	case IB_CM_REP_SENT:
691	case IB_CM_MRA_REP_RCVD:
692		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
693		/* Fall through */
694	case IB_CM_REQ_RCVD:
695	case IB_CM_MRA_REQ_SENT:
696	case IB_CM_REP_RCVD:
697	case IB_CM_MRA_REP_SENT:
698		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
699		ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
700			       &cm_id_priv->av.port->cm_dev->ca_guid,
701			       sizeof cm_id_priv->av.port->cm_dev->ca_guid,
702			       NULL, 0);
703		break;
704	case IB_CM_ESTABLISHED:
705		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
706		ib_send_cm_dreq(cm_id, NULL, 0);
707		goto retest;
708	case IB_CM_DREQ_SENT:
709		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
710		cm_enter_timewait(cm_id_priv);
711		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
712		break;
713	case IB_CM_DREQ_RCVD:
714		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
715		ib_send_cm_drep(cm_id, NULL, 0);
716		break;
717	default:
718		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
719		break;
720	}
721
722	cm_free_id(cm_id->local_id);
723	atomic_dec(&cm_id_priv->refcount);
724	wait_event(cm_id_priv->wait, !atomic_read(&cm_id_priv->refcount));
725	while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
726		cm_free_work(work);
727	if (cm_id_priv->private_data && cm_id_priv->private_data_len)
728		kfree(cm_id_priv->private_data);
729	kfree(cm_id_priv);
730}
731EXPORT_SYMBOL(ib_destroy_cm_id);
732
733int ib_cm_listen(struct ib_cm_id *cm_id,
734		 __be64 service_id,
735		 __be64 service_mask)
736{
737	struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
738	unsigned long flags;
739	int ret = 0;
740
741	service_mask = service_mask ? service_mask :
742		       __constant_cpu_to_be64(~0ULL);
743	service_id &= service_mask;
744	if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
745	    (service_id != IB_CM_ASSIGN_SERVICE_ID))
746		return -EINVAL;
747
748	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
749	BUG_ON(cm_id->state != IB_CM_IDLE);
750
751	cm_id->state = IB_CM_LISTEN;
752
753	spin_lock_irqsave(&cm.lock, flags);
754	if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
755		cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
756		cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
757	} else {
758		cm_id->service_id = service_id;
759		cm_id->service_mask = service_mask;
760	}
761	cur_cm_id_priv = cm_insert_listen(cm_id_priv);
762	spin_unlock_irqrestore(&cm.lock, flags);
763
764	if (cur_cm_id_priv) {
765		cm_id->state = IB_CM_IDLE;
766		ret = -EBUSY;
767	}
768	return ret;
769}
770EXPORT_SYMBOL(ib_cm_listen);
771
772static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
773			  enum cm_msg_sequence msg_seq)
774{
775	u64 hi_tid, low_tid;
776
777	hi_tid   = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
778	low_tid  = (u64) ((__force u32)cm_id_priv->id.local_id |
779			  (msg_seq << 30));
780	return cpu_to_be64(hi_tid | low_tid);
781}
782
783static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
784			      __be16 attr_id, __be64 tid)
785{
786	hdr->base_version  = IB_MGMT_BASE_VERSION;
787	hdr->mgmt_class	   = IB_MGMT_CLASS_CM;
788	hdr->class_version = IB_CM_CLASS_VERSION;
789	hdr->method	   = IB_MGMT_METHOD_SEND;
790	hdr->attr_id	   = attr_id;
791	hdr->tid	   = tid;
792}
793
794static void cm_format_req(struct cm_req_msg *req_msg,
795			  struct cm_id_private *cm_id_priv,
796			  struct ib_cm_req_param *param)
797{
798	cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
799			  cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
800
801	req_msg->local_comm_id = cm_id_priv->id.local_id;
802	req_msg->service_id = param->service_id;
803	req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
804	cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
805	cm_req_set_resp_res(req_msg, param->responder_resources);
806	cm_req_set_init_depth(req_msg, param->initiator_depth);
807	cm_req_set_remote_resp_timeout(req_msg,
808				       param->remote_cm_response_timeout);
809	cm_req_set_qp_type(req_msg, param->qp_type);
810	cm_req_set_flow_ctrl(req_msg, param->flow_control);
811	cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
812	cm_req_set_local_resp_timeout(req_msg,
813				      param->local_cm_response_timeout);
814	cm_req_set_retry_count(req_msg, param->retry_count);
815	req_msg->pkey = param->primary_path->pkey;
816	cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
817	cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
818	cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
819	cm_req_set_srq(req_msg, param->srq);
820
821	req_msg->primary_local_lid = param->primary_path->slid;
822	req_msg->primary_remote_lid = param->primary_path->dlid;
823	req_msg->primary_local_gid = param->primary_path->sgid;
824	req_msg->primary_remote_gid = param->primary_path->dgid;
825	cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label);
826	cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate);
827	req_msg->primary_traffic_class = param->primary_path->traffic_class;
828	req_msg->primary_hop_limit = param->primary_path->hop_limit;
829	cm_req_set_primary_sl(req_msg, param->primary_path->sl);
830	cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */
831	cm_req_set_primary_local_ack_timeout(req_msg,
832		min(31, param->primary_path->packet_life_time + 1));
833
834	if (param->alternate_path) {
835		req_msg->alt_local_lid = param->alternate_path->slid;
836		req_msg->alt_remote_lid = param->alternate_path->dlid;
837		req_msg->alt_local_gid = param->alternate_path->sgid;
838		req_msg->alt_remote_gid = param->alternate_path->dgid;
839		cm_req_set_alt_flow_label(req_msg,
840					  param->alternate_path->flow_label);
841		cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate);
842		req_msg->alt_traffic_class = param->alternate_path->traffic_class;
843		req_msg->alt_hop_limit = param->alternate_path->hop_limit;
844		cm_req_set_alt_sl(req_msg, param->alternate_path->sl);
845		cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */
846		cm_req_set_alt_local_ack_timeout(req_msg,
847			min(31, param->alternate_path->packet_life_time + 1));
848	}
849
850	if (param->private_data && param->private_data_len)
851		memcpy(req_msg->private_data, param->private_data,
852		       param->private_data_len);
853}
854
855static inline int cm_validate_req_param(struct ib_cm_req_param *param)
856{
857	/* peer-to-peer not supported */
858	if (param->peer_to_peer)
859		return -EINVAL;
860
861	if (!param->primary_path)
862		return -EINVAL;
863
864	if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC)
865		return -EINVAL;
866
867	if (param->private_data &&
868	    param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
869		return -EINVAL;
870
871	if (param->alternate_path &&
872	    (param->alternate_path->pkey != param->primary_path->pkey ||
873	     param->alternate_path->mtu != param->primary_path->mtu))
874		return -EINVAL;
875
876	return 0;
877}
878
879int ib_send_cm_req(struct ib_cm_id *cm_id,
880		   struct ib_cm_req_param *param)
881{
882	struct cm_id_private *cm_id_priv;
883	struct cm_req_msg *req_msg;
884	unsigned long flags;
885	int ret;
886
887	ret = cm_validate_req_param(param);
888	if (ret)
889		return ret;
890
891	/* Verify that we're not in timewait. */
892	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
893	spin_lock_irqsave(&cm_id_priv->lock, flags);
894	if (cm_id->state != IB_CM_IDLE) {
895		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
896		ret = -EINVAL;
897		goto out;
898	}
899	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
900
901	cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
902							    id.local_id);
903	if (IS_ERR(cm_id_priv->timewait_info))
904		goto out;
905
906	ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
907	if (ret)
908		goto error1;
909	if (param->alternate_path) {
910		ret = cm_init_av_by_path(param->alternate_path,
911					 &cm_id_priv->alt_av);
912		if (ret)
913			goto error1;
914	}
915	cm_id->service_id = param->service_id;
916	cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
917	cm_id_priv->timeout_ms = cm_convert_to_ms(
918				    param->primary_path->packet_life_time) * 2 +
919				 cm_convert_to_ms(
920				    param->remote_cm_response_timeout);
921	cm_id_priv->max_cm_retries = param->max_cm_retries;
922	cm_id_priv->initiator_depth = param->initiator_depth;
923	cm_id_priv->responder_resources = param->responder_resources;
924	cm_id_priv->retry_count = param->retry_count;
925	cm_id_priv->path_mtu = param->primary_path->mtu;
926	cm_id_priv->qp_type = param->qp_type;
927
928	ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
929	if (ret)
930		goto error1;
931
932	req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
933	cm_format_req(req_msg, cm_id_priv, param);
934	cm_id_priv->tid = req_msg->hdr.tid;
935	cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
936	cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
937
938	cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
939	cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
940	cm_id_priv->local_ack_timeout =
941				cm_req_get_primary_local_ack_timeout(req_msg);
942
943	spin_lock_irqsave(&cm_id_priv->lock, flags);
944	ret = ib_post_send_mad(cm_id_priv->msg, NULL);
945	if (ret) {
946		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
947		goto error2;
948	}
949	BUG_ON(cm_id->state != IB_CM_IDLE);
950	cm_id->state = IB_CM_REQ_SENT;
951	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
952	return 0;
953
954error2:	cm_free_msg(cm_id_priv->msg);
955error1:	kfree(cm_id_priv->timewait_info);
956out:	return ret;
957}
958EXPORT_SYMBOL(ib_send_cm_req);
959
960static int cm_issue_rej(struct cm_port *port,
961			struct ib_mad_recv_wc *mad_recv_wc,
962			enum ib_cm_rej_reason reason,
963			enum cm_msg_response msg_rejected,
964			void *ari, u8 ari_length)
965{
966	struct ib_mad_send_buf *msg = NULL;
967	struct cm_rej_msg *rej_msg, *rcv_msg;
968	int ret;
969
970	ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
971	if (ret)
972		return ret;
973
974	/* We just need common CM header information.  Cast to any message. */
975	rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
976	rej_msg = (struct cm_rej_msg *) msg->mad;
977
978	cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
979	rej_msg->remote_comm_id = rcv_msg->local_comm_id;
980	rej_msg->local_comm_id = rcv_msg->remote_comm_id;
981	cm_rej_set_msg_rejected(rej_msg, msg_rejected);
982	rej_msg->reason = cpu_to_be16(reason);
983
984	if (ari && ari_length) {
985		cm_rej_set_reject_info_len(rej_msg, ari_length);
986		memcpy(rej_msg->ari, ari, ari_length);
987	}
988
989	ret = ib_post_send_mad(msg, NULL);
990	if (ret)
991		cm_free_msg(msg);
992
993	return ret;
994}
995
996static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
997				    __be32 local_qpn, __be32 remote_qpn)
998{
999	return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
1000		((local_ca_guid == remote_ca_guid) &&
1001		 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
1002}
1003
1004static inline void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1005					    struct ib_sa_path_rec *primary_path,
1006					    struct ib_sa_path_rec *alt_path)
1007{
1008	memset(primary_path, 0, sizeof *primary_path);
1009	primary_path->dgid = req_msg->primary_local_gid;
1010	primary_path->sgid = req_msg->primary_remote_gid;
1011	primary_path->dlid = req_msg->primary_local_lid;
1012	primary_path->slid = req_msg->primary_remote_lid;
1013	primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1014	primary_path->hop_limit = req_msg->primary_hop_limit;
1015	primary_path->traffic_class = req_msg->primary_traffic_class;
1016	primary_path->reversible = 1;
1017	primary_path->pkey = req_msg->pkey;
1018	primary_path->sl = cm_req_get_primary_sl(req_msg);
1019	primary_path->mtu_selector = IB_SA_EQ;
1020	primary_path->mtu = cm_req_get_path_mtu(req_msg);
1021	primary_path->rate_selector = IB_SA_EQ;
1022	primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1023	primary_path->packet_life_time_selector = IB_SA_EQ;
1024	primary_path->packet_life_time =
1025		cm_req_get_primary_local_ack_timeout(req_msg);
1026	primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1027
1028	if (req_msg->alt_local_lid) {
1029		memset(alt_path, 0, sizeof *alt_path);
1030		alt_path->dgid = req_msg->alt_local_gid;
1031		alt_path->sgid = req_msg->alt_remote_gid;
1032		alt_path->dlid = req_msg->alt_local_lid;
1033		alt_path->slid = req_msg->alt_remote_lid;
1034		alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1035		alt_path->hop_limit = req_msg->alt_hop_limit;
1036		alt_path->traffic_class = req_msg->alt_traffic_class;
1037		alt_path->reversible = 1;
1038		alt_path->pkey = req_msg->pkey;
1039		alt_path->sl = cm_req_get_alt_sl(req_msg);
1040		alt_path->mtu_selector = IB_SA_EQ;
1041		alt_path->mtu = cm_req_get_path_mtu(req_msg);
1042		alt_path->rate_selector = IB_SA_EQ;
1043		alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1044		alt_path->packet_life_time_selector = IB_SA_EQ;
1045		alt_path->packet_life_time =
1046			cm_req_get_alt_local_ack_timeout(req_msg);
1047		alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1048	}
1049}
1050
1051static void cm_format_req_event(struct cm_work *work,
1052				struct cm_id_private *cm_id_priv,
1053				struct ib_cm_id *listen_id)
1054{
1055	struct cm_req_msg *req_msg;
1056	struct ib_cm_req_event_param *param;
1057
1058	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1059	param = &work->cm_event.param.req_rcvd;
1060	param->listen_id = listen_id;
1061	param->port = cm_id_priv->av.port->port_num;
1062	param->primary_path = &work->path[0];
1063	if (req_msg->alt_local_lid)
1064		param->alternate_path = &work->path[1];
1065	else
1066		param->alternate_path = NULL;
1067	param->remote_ca_guid = req_msg->local_ca_guid;
1068	param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1069	param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1070	param->qp_type = cm_req_get_qp_type(req_msg);
1071	param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1072	param->responder_resources = cm_req_get_init_depth(req_msg);
1073	param->initiator_depth = cm_req_get_resp_res(req_msg);
1074	param->local_cm_response_timeout =
1075					cm_req_get_remote_resp_timeout(req_msg);
1076	param->flow_control = cm_req_get_flow_ctrl(req_msg);
1077	param->remote_cm_response_timeout =
1078					cm_req_get_local_resp_timeout(req_msg);
1079	param->retry_count = cm_req_get_retry_count(req_msg);
1080	param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1081	param->srq = cm_req_get_srq(req_msg);
1082	work->cm_event.private_data = &req_msg->private_data;
1083}
1084
1085static void cm_process_work(struct cm_id_private *cm_id_priv,
1086			    struct cm_work *work)
1087{
1088	unsigned long flags;
1089	int ret;
1090
1091	/* We will typically only have the current event to report. */
1092	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1093	cm_free_work(work);
1094
1095	while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1096		spin_lock_irqsave(&cm_id_priv->lock, flags);
1097		work = cm_dequeue_work(cm_id_priv);
1098		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1099		BUG_ON(!work);
1100		ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1101						&work->cm_event);
1102		cm_free_work(work);
1103	}
1104	cm_deref_id(cm_id_priv);
1105	if (ret)
1106		ib_destroy_cm_id(&cm_id_priv->id);
1107}
1108
1109static void cm_format_mra(struct cm_mra_msg *mra_msg,
1110			  struct cm_id_private *cm_id_priv,
1111			  enum cm_msg_response msg_mraed, u8 service_timeout,
1112			  const void *private_data, u8 private_data_len)
1113{
1114	cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1115	cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1116	mra_msg->local_comm_id = cm_id_priv->id.local_id;
1117	mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1118	cm_mra_set_service_timeout(mra_msg, service_timeout);
1119
1120	if (private_data && private_data_len)
1121		memcpy(mra_msg->private_data, private_data, private_data_len);
1122}
1123
1124static void cm_format_rej(struct cm_rej_msg *rej_msg,
1125			  struct cm_id_private *cm_id_priv,
1126			  enum ib_cm_rej_reason reason,
1127			  void *ari,
1128			  u8 ari_length,
1129			  const void *private_data,
1130			  u8 private_data_len)
1131{
1132	cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1133	rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1134
1135	switch(cm_id_priv->id.state) {
1136	case IB_CM_REQ_RCVD:
1137		rej_msg->local_comm_id = 0;
1138		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1139		break;
1140	case IB_CM_MRA_REQ_SENT:
1141		rej_msg->local_comm_id = cm_id_priv->id.local_id;
1142		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1143		break;
1144	case IB_CM_REP_RCVD:
1145	case IB_CM_MRA_REP_SENT:
1146		rej_msg->local_comm_id = cm_id_priv->id.local_id;
1147		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1148		break;
1149	default:
1150		rej_msg->local_comm_id = cm_id_priv->id.local_id;
1151		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1152		break;
1153	}
1154
1155	rej_msg->reason = cpu_to_be16(reason);
1156	if (ari && ari_length) {
1157		cm_rej_set_reject_info_len(rej_msg, ari_length);
1158		memcpy(rej_msg->ari, ari, ari_length);
1159	}
1160
1161	if (private_data && private_data_len)
1162		memcpy(rej_msg->private_data, private_data, private_data_len);
1163}
1164
1165static void cm_dup_req_handler(struct cm_work *work,
1166			       struct cm_id_private *cm_id_priv)
1167{
1168	struct ib_mad_send_buf *msg = NULL;
1169	unsigned long flags;
1170	int ret;
1171
1172	/* Quick state check to discard duplicate REQs. */
1173	if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1174		return;
1175
1176	ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1177	if (ret)
1178		return;
1179
1180	spin_lock_irqsave(&cm_id_priv->lock, flags);
1181	switch (cm_id_priv->id.state) {
1182	case IB_CM_MRA_REQ_SENT:
1183		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1184			      CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1185			      cm_id_priv->private_data,
1186			      cm_id_priv->private_data_len);
1187		break;
1188	case IB_CM_TIMEWAIT:
1189		cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1190			      IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1191		break;
1192	default:
1193		goto unlock;
1194	}
1195	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1196
1197	ret = ib_post_send_mad(msg, NULL);
1198	if (ret)
1199		goto free;
1200	return;
1201
1202unlock:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1203free:	cm_free_msg(msg);
1204}
1205
1206static struct cm_id_private * cm_match_req(struct cm_work *work,
1207					   struct cm_id_private *cm_id_priv)
1208{
1209	struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1210	struct cm_timewait_info *timewait_info;
1211	struct cm_req_msg *req_msg;
1212	unsigned long flags;
1213
1214	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1215
1216	/* Check for duplicate REQ and stale connections. */
1217	spin_lock_irqsave(&cm.lock, flags);
1218	timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1219	if (!timewait_info)
1220		timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1221
1222	if (timewait_info) {
1223		cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1224					   timewait_info->work.remote_id);
1225		spin_unlock_irqrestore(&cm.lock, flags);
1226		if (cur_cm_id_priv) {
1227			cm_dup_req_handler(work, cur_cm_id_priv);
1228			cm_deref_id(cur_cm_id_priv);
1229		} else
1230			cm_issue_rej(work->port, work->mad_recv_wc,
1231				     IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1232				     NULL, 0);
1233		goto error;
1234	}
1235
1236	/* Find matching listen request. */
1237	listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
1238					   req_msg->service_id);
1239	if (!listen_cm_id_priv) {
1240		spin_unlock_irqrestore(&cm.lock, flags);
1241		cm_issue_rej(work->port, work->mad_recv_wc,
1242			     IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1243			     NULL, 0);
1244		goto error;
1245	}
1246	atomic_inc(&listen_cm_id_priv->refcount);
1247	atomic_inc(&cm_id_priv->refcount);
1248	cm_id_priv->id.state = IB_CM_REQ_RCVD;
1249	atomic_inc(&cm_id_priv->work_count);
1250	spin_unlock_irqrestore(&cm.lock, flags);
1251	return listen_cm_id_priv;
1252
1253error:	cm_cleanup_timewait(cm_id_priv->timewait_info);
1254	return NULL;
1255}
1256
1257static int cm_req_handler(struct cm_work *work)
1258{
1259	struct ib_cm_id *cm_id;
1260	struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1261	struct cm_req_msg *req_msg;
1262	int ret;
1263
1264	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1265
1266	cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
1267	if (IS_ERR(cm_id))
1268		return PTR_ERR(cm_id);
1269
1270	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1271	cm_id_priv->id.remote_id = req_msg->local_comm_id;
1272	cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1273				&cm_id_priv->av);
1274	cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1275							    id.local_id);
1276	if (IS_ERR(cm_id_priv->timewait_info)) {
1277		ret = PTR_ERR(cm_id_priv->timewait_info);
1278		goto error1;
1279	}
1280	cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1281	cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1282	cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1283
1284	listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1285	if (!listen_cm_id_priv) {
1286		ret = -EINVAL;
1287		goto error2;
1288	}
1289
1290	cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1291	cm_id_priv->id.context = listen_cm_id_priv->id.context;
1292	cm_id_priv->id.service_id = req_msg->service_id;
1293	cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
1294
1295	cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1296	ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
1297	if (ret)
1298		goto error3;
1299	if (req_msg->alt_local_lid) {
1300		ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
1301		if (ret)
1302			goto error3;
1303	}
1304	cm_id_priv->tid = req_msg->hdr.tid;
1305	cm_id_priv->timeout_ms = cm_convert_to_ms(
1306					cm_req_get_local_resp_timeout(req_msg));
1307	cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
1308	cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
1309	cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1310	cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1311	cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1312	cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1313	cm_id_priv->local_ack_timeout =
1314				cm_req_get_primary_local_ack_timeout(req_msg);
1315	cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1316	cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1317	cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
1318
1319	cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
1320	cm_process_work(cm_id_priv, work);
1321	cm_deref_id(listen_cm_id_priv);
1322	return 0;
1323
1324error3:	atomic_dec(&cm_id_priv->refcount);
1325	cm_deref_id(listen_cm_id_priv);
1326	cm_cleanup_timewait(cm_id_priv->timewait_info);
1327error2:	kfree(cm_id_priv->timewait_info);
1328	cm_id_priv->timewait_info = NULL;
1329error1:	ib_destroy_cm_id(&cm_id_priv->id);
1330	return ret;
1331}
1332
1333static void cm_format_rep(struct cm_rep_msg *rep_msg,
1334			  struct cm_id_private *cm_id_priv,
1335			  struct ib_cm_rep_param *param)
1336{
1337	cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
1338	rep_msg->local_comm_id = cm_id_priv->id.local_id;
1339	rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1340	cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
1341	cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1342	rep_msg->resp_resources = param->responder_resources;
1343	rep_msg->initiator_depth = param->initiator_depth;
1344	cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay);
1345	cm_rep_set_failover(rep_msg, param->failover_accepted);
1346	cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1347	cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1348	cm_rep_set_srq(rep_msg, param->srq);
1349	rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
1350
1351	if (param->private_data && param->private_data_len)
1352		memcpy(rep_msg->private_data, param->private_data,
1353		       param->private_data_len);
1354}
1355
1356int ib_send_cm_rep(struct ib_cm_id *cm_id,
1357		   struct ib_cm_rep_param *param)
1358{
1359	struct cm_id_private *cm_id_priv;
1360	struct ib_mad_send_buf *msg;
1361	struct cm_rep_msg *rep_msg;
1362	unsigned long flags;
1363	int ret;
1364
1365	if (param->private_data &&
1366	    param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
1367		return -EINVAL;
1368
1369	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1370	spin_lock_irqsave(&cm_id_priv->lock, flags);
1371	if (cm_id->state != IB_CM_REQ_RCVD &&
1372	    cm_id->state != IB_CM_MRA_REQ_SENT) {
1373		ret = -EINVAL;
1374		goto out;
1375	}
1376
1377	ret = cm_alloc_msg(cm_id_priv, &msg);
1378	if (ret)
1379		goto out;
1380
1381	rep_msg = (struct cm_rep_msg *) msg->mad;
1382	cm_format_rep(rep_msg, cm_id_priv, param);
1383	msg->timeout_ms = cm_id_priv->timeout_ms;
1384	msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
1385
1386	ret = ib_post_send_mad(msg, NULL);
1387	if (ret) {
1388		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1389		cm_free_msg(msg);
1390		return ret;
1391	}
1392
1393	cm_id->state = IB_CM_REP_SENT;
1394	cm_id_priv->msg = msg;
1395	cm_id_priv->initiator_depth = param->initiator_depth;
1396	cm_id_priv->responder_resources = param->responder_resources;
1397	cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
1398	cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg);
1399
1400out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1401	return ret;
1402}
1403EXPORT_SYMBOL(ib_send_cm_rep);
1404
1405static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
1406			  struct cm_id_private *cm_id_priv,
1407			  const void *private_data,
1408			  u8 private_data_len)
1409{
1410	cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
1411	rtu_msg->local_comm_id = cm_id_priv->id.local_id;
1412	rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
1413
1414	if (private_data && private_data_len)
1415		memcpy(rtu_msg->private_data, private_data, private_data_len);
1416}
1417
1418int ib_send_cm_rtu(struct ib_cm_id *cm_id,
1419		   const void *private_data,
1420		   u8 private_data_len)
1421{
1422	struct cm_id_private *cm_id_priv;
1423	struct ib_mad_send_buf *msg;
1424	unsigned long flags;
1425	void *data;
1426	int ret;
1427
1428	if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
1429		return -EINVAL;
1430
1431	data = cm_copy_private_data(private_data, private_data_len);
1432	if (IS_ERR(data))
1433		return PTR_ERR(data);
1434
1435	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1436	spin_lock_irqsave(&cm_id_priv->lock, flags);
1437	if (cm_id->state != IB_CM_REP_RCVD &&
1438	    cm_id->state != IB_CM_MRA_REP_SENT) {
1439		ret = -EINVAL;
1440		goto error;
1441	}
1442
1443	ret = cm_alloc_msg(cm_id_priv, &msg);
1444	if (ret)
1445		goto error;
1446
1447	cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1448		      private_data, private_data_len);
1449
1450	ret = ib_post_send_mad(msg, NULL);
1451	if (ret) {
1452		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1453		cm_free_msg(msg);
1454		kfree(data);
1455		return ret;
1456	}
1457
1458	cm_id->state = IB_CM_ESTABLISHED;
1459	cm_set_private_data(cm_id_priv, data, private_data_len);
1460	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1461	return 0;
1462
1463error:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1464	kfree(data);
1465	return ret;
1466}
1467EXPORT_SYMBOL(ib_send_cm_rtu);
1468
1469static void cm_format_rep_event(struct cm_work *work)
1470{
1471	struct cm_rep_msg *rep_msg;
1472	struct ib_cm_rep_event_param *param;
1473
1474	rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1475	param = &work->cm_event.param.rep_rcvd;
1476	param->remote_ca_guid = rep_msg->local_ca_guid;
1477	param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
1478	param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg));
1479	param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
1480	param->responder_resources = rep_msg->initiator_depth;
1481	param->initiator_depth = rep_msg->resp_resources;
1482	param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1483	param->failover_accepted = cm_rep_get_failover(rep_msg);
1484	param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
1485	param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1486	param->srq = cm_rep_get_srq(rep_msg);
1487	work->cm_event.private_data = &rep_msg->private_data;
1488}
1489
1490static void cm_dup_rep_handler(struct cm_work *work)
1491{
1492	struct cm_id_private *cm_id_priv;
1493	struct cm_rep_msg *rep_msg;
1494	struct ib_mad_send_buf *msg = NULL;
1495	unsigned long flags;
1496	int ret;
1497
1498	rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
1499	cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
1500				   rep_msg->local_comm_id);
1501	if (!cm_id_priv)
1502		return;
1503
1504	ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1505	if (ret)
1506		goto deref;
1507
1508	spin_lock_irqsave(&cm_id_priv->lock, flags);
1509	if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
1510		cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1511			      cm_id_priv->private_data,
1512			      cm_id_priv->private_data_len);
1513	else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
1514		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1515			      CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
1516			      cm_id_priv->private_data,
1517			      cm_id_priv->private_data_len);
1518	else
1519		goto unlock;
1520	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1521
1522	ret = ib_post_send_mad(msg, NULL);
1523	if (ret)
1524		goto free;
1525	goto deref;
1526
1527unlock:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1528free:	cm_free_msg(msg);
1529deref:	cm_deref_id(cm_id_priv);
1530}
1531
1532static int cm_rep_handler(struct cm_work *work)
1533{
1534	struct cm_id_private *cm_id_priv;
1535	struct cm_rep_msg *rep_msg;
1536	unsigned long flags;
1537	int ret;
1538
1539	rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1540	cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
1541	if (!cm_id_priv) {
1542		cm_dup_rep_handler(work);
1543		return -EINVAL;
1544	}
1545
1546	cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
1547	cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
1548	cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1549
1550	spin_lock_irqsave(&cm.lock, flags);
1551	/* Check for duplicate REP. */
1552	if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
1553		spin_unlock_irqrestore(&cm.lock, flags);
1554		ret = -EINVAL;
1555		goto error;
1556	}
1557	/* Check for a stale connection. */
1558	if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
1559		spin_unlock_irqrestore(&cm.lock, flags);
1560		cm_issue_rej(work->port, work->mad_recv_wc,
1561			     IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
1562			     NULL, 0);
1563		ret = -EINVAL;
1564		goto error;
1565	}
1566	spin_unlock_irqrestore(&cm.lock, flags);
1567
1568	cm_format_rep_event(work);
1569
1570	spin_lock_irqsave(&cm_id_priv->lock, flags);
1571	switch (cm_id_priv->id.state) {
1572	case IB_CM_REQ_SENT:
1573	case IB_CM_MRA_REQ_RCVD:
1574		break;
1575	default:
1576		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1577		ret = -EINVAL;
1578		goto error;
1579	}
1580	cm_id_priv->id.state = IB_CM_REP_RCVD;
1581	cm_id_priv->id.remote_id = rep_msg->local_comm_id;
1582	cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1583	cm_id_priv->initiator_depth = rep_msg->resp_resources;
1584	cm_id_priv->responder_resources = rep_msg->initiator_depth;
1585	cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
1586	cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1587
1588	/* todo: handle peer_to_peer */
1589
1590	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1591	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1592	if (!ret)
1593		list_add_tail(&work->list, &cm_id_priv->work_list);
1594	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1595
1596	if (ret)
1597		cm_process_work(cm_id_priv, work);
1598	else
1599		cm_deref_id(cm_id_priv);
1600	return 0;
1601
1602error:	cm_cleanup_timewait(cm_id_priv->timewait_info);
1603	cm_deref_id(cm_id_priv);
1604	return ret;
1605}
1606
1607static int cm_establish_handler(struct cm_work *work)
1608{
1609	struct cm_id_private *cm_id_priv;
1610	unsigned long flags;
1611	int ret;
1612
1613	/* See comment in ib_cm_establish about lookup. */
1614	cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
1615	if (!cm_id_priv)
1616		return -EINVAL;
1617
1618	spin_lock_irqsave(&cm_id_priv->lock, flags);
1619	if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
1620		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1621		goto out;
1622	}
1623
1624	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1625	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1626	if (!ret)
1627		list_add_tail(&work->list, &cm_id_priv->work_list);
1628	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1629
1630	if (ret)
1631		cm_process_work(cm_id_priv, work);
1632	else
1633		cm_deref_id(cm_id_priv);
1634	return 0;
1635out:
1636	cm_deref_id(cm_id_priv);
1637	return -EINVAL;
1638}
1639
1640static int cm_rtu_handler(struct cm_work *work)
1641{
1642	struct cm_id_private *cm_id_priv;
1643	struct cm_rtu_msg *rtu_msg;
1644	unsigned long flags;
1645	int ret;
1646
1647	rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
1648	cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
1649				   rtu_msg->local_comm_id);
1650	if (!cm_id_priv)
1651		return -EINVAL;
1652
1653	work->cm_event.private_data = &rtu_msg->private_data;
1654
1655	spin_lock_irqsave(&cm_id_priv->lock, flags);
1656	if (cm_id_priv->id.state != IB_CM_REP_SENT &&
1657	    cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
1658		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1659		goto out;
1660	}
1661	cm_id_priv->id.state = IB_CM_ESTABLISHED;
1662
1663	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1664	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1665	if (!ret)
1666		list_add_tail(&work->list, &cm_id_priv->work_list);
1667	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1668
1669	if (ret)
1670		cm_process_work(cm_id_priv, work);
1671	else
1672		cm_deref_id(cm_id_priv);
1673	return 0;
1674out:
1675	cm_deref_id(cm_id_priv);
1676	return -EINVAL;
1677}
1678
1679static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
1680			  struct cm_id_private *cm_id_priv,
1681			  const void *private_data,
1682			  u8 private_data_len)
1683{
1684	cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
1685			  cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
1686	dreq_msg->local_comm_id = cm_id_priv->id.local_id;
1687	dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
1688	cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
1689
1690	if (private_data && private_data_len)
1691		memcpy(dreq_msg->private_data, private_data, private_data_len);
1692}
1693
1694int ib_send_cm_dreq(struct ib_cm_id *cm_id,
1695		    const void *private_data,
1696		    u8 private_data_len)
1697{
1698	struct cm_id_private *cm_id_priv;
1699	struct ib_mad_send_buf *msg;
1700	unsigned long flags;
1701	int ret;
1702
1703	if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
1704		return -EINVAL;
1705
1706	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1707	spin_lock_irqsave(&cm_id_priv->lock, flags);
1708	if (cm_id->state != IB_CM_ESTABLISHED) {
1709		ret = -EINVAL;
1710		goto out;
1711	}
1712
1713	ret = cm_alloc_msg(cm_id_priv, &msg);
1714	if (ret) {
1715		cm_enter_timewait(cm_id_priv);
1716		goto out;
1717	}
1718
1719	cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
1720		       private_data, private_data_len);
1721	msg->timeout_ms = cm_id_priv->timeout_ms;
1722	msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
1723
1724	ret = ib_post_send_mad(msg, NULL);
1725	if (ret) {
1726		cm_enter_timewait(cm_id_priv);
1727		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1728		cm_free_msg(msg);
1729		return ret;
1730	}
1731
1732	cm_id->state = IB_CM_DREQ_SENT;
1733	cm_id_priv->msg = msg;
1734out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1735	return ret;
1736}
1737EXPORT_SYMBOL(ib_send_cm_dreq);
1738
1739static void cm_format_drep(struct cm_drep_msg *drep_msg,
1740			  struct cm_id_private *cm_id_priv,
1741			  const void *private_data,
1742			  u8 private_data_len)
1743{
1744	cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
1745	drep_msg->local_comm_id = cm_id_priv->id.local_id;
1746	drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1747
1748	if (private_data && private_data_len)
1749		memcpy(drep_msg->private_data, private_data, private_data_len);
1750}
1751
1752int ib_send_cm_drep(struct ib_cm_id *cm_id,
1753		    const void *private_data,
1754		    u8 private_data_len)
1755{
1756	struct cm_id_private *cm_id_priv;
1757	struct ib_mad_send_buf *msg;
1758	unsigned long flags;
1759	void *data;
1760	int ret;
1761
1762	if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
1763		return -EINVAL;
1764
1765	data = cm_copy_private_data(private_data, private_data_len);
1766	if (IS_ERR(data))
1767		return PTR_ERR(data);
1768
1769	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1770	spin_lock_irqsave(&cm_id_priv->lock, flags);
1771	if (cm_id->state != IB_CM_DREQ_RCVD) {
1772		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1773		kfree(data);
1774		return -EINVAL;
1775	}
1776
1777	cm_set_private_data(cm_id_priv, data, private_data_len);
1778	cm_enter_timewait(cm_id_priv);
1779
1780	ret = cm_alloc_msg(cm_id_priv, &msg);
1781	if (ret)
1782		goto out;
1783
1784	cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
1785		       private_data, private_data_len);
1786
1787	ret = ib_post_send_mad(msg, NULL);
1788	if (ret) {
1789		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1790		cm_free_msg(msg);
1791		return ret;
1792	}
1793
1794out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1795	return ret;
1796}
1797EXPORT_SYMBOL(ib_send_cm_drep);
1798
1799static int cm_dreq_handler(struct cm_work *work)
1800{
1801	struct cm_id_private *cm_id_priv;
1802	struct cm_dreq_msg *dreq_msg;
1803	struct ib_mad_send_buf *msg = NULL;
1804	unsigned long flags;
1805	int ret;
1806
1807	dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
1808	cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
1809				   dreq_msg->local_comm_id);
1810	if (!cm_id_priv)
1811		return -EINVAL;
1812
1813	work->cm_event.private_data = &dreq_msg->private_data;
1814
1815	spin_lock_irqsave(&cm_id_priv->lock, flags);
1816	if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
1817		goto unlock;
1818
1819	switch (cm_id_priv->id.state) {
1820	case IB_CM_REP_SENT:
1821	case IB_CM_DREQ_SENT:
1822		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1823		break;
1824	case IB_CM_ESTABLISHED:
1825	case IB_CM_MRA_REP_RCVD:
1826		break;
1827	case IB_CM_TIMEWAIT:
1828		if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
1829			goto unlock;
1830
1831		cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
1832			       cm_id_priv->private_data,
1833			       cm_id_priv->private_data_len);
1834		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1835
1836		if (ib_post_send_mad(msg, NULL))
1837			cm_free_msg(msg);
1838		goto deref;
1839	default:
1840		goto unlock;
1841	}
1842	cm_id_priv->id.state = IB_CM_DREQ_RCVD;
1843	cm_id_priv->tid = dreq_msg->hdr.tid;
1844	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1845	if (!ret)
1846		list_add_tail(&work->list, &cm_id_priv->work_list);
1847	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1848
1849	if (ret)
1850		cm_process_work(cm_id_priv, work);
1851	else
1852		cm_deref_id(cm_id_priv);
1853	return 0;
1854
1855unlock:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1856deref:	cm_deref_id(cm_id_priv);
1857	return -EINVAL;
1858}
1859
1860static int cm_drep_handler(struct cm_work *work)
1861{
1862	struct cm_id_private *cm_id_priv;
1863	struct cm_drep_msg *drep_msg;
1864	unsigned long flags;
1865	int ret;
1866
1867	drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
1868	cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
1869				   drep_msg->local_comm_id);
1870	if (!cm_id_priv)
1871		return -EINVAL;
1872
1873	work->cm_event.private_data = &drep_msg->private_data;
1874
1875	spin_lock_irqsave(&cm_id_priv->lock, flags);
1876	if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
1877	    cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
1878		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1879		goto out;
1880	}
1881	cm_enter_timewait(cm_id_priv);
1882
1883	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1884	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1885	if (!ret)
1886		list_add_tail(&work->list, &cm_id_priv->work_list);
1887	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1888
1889	if (ret)
1890		cm_process_work(cm_id_priv, work);
1891	else
1892		cm_deref_id(cm_id_priv);
1893	return 0;
1894out:
1895	cm_deref_id(cm_id_priv);
1896	return -EINVAL;
1897}
1898
1899int ib_send_cm_rej(struct ib_cm_id *cm_id,
1900		   enum ib_cm_rej_reason reason,
1901		   void *ari,
1902		   u8 ari_length,
1903		   const void *private_data,
1904		   u8 private_data_len)
1905{
1906	struct cm_id_private *cm_id_priv;
1907	struct ib_mad_send_buf *msg;
1908	unsigned long flags;
1909	int ret;
1910
1911	if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
1912	    (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
1913		return -EINVAL;
1914
1915	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1916
1917	spin_lock_irqsave(&cm_id_priv->lock, flags);
1918	switch (cm_id->state) {
1919	case IB_CM_REQ_SENT:
1920	case IB_CM_MRA_REQ_RCVD:
1921	case IB_CM_REQ_RCVD:
1922	case IB_CM_MRA_REQ_SENT:
1923	case IB_CM_REP_RCVD:
1924	case IB_CM_MRA_REP_SENT:
1925		ret = cm_alloc_msg(cm_id_priv, &msg);
1926		if (!ret)
1927			cm_format_rej((struct cm_rej_msg *) msg->mad,
1928				      cm_id_priv, reason, ari, ari_length,
1929				      private_data, private_data_len);
1930
1931		cm_reset_to_idle(cm_id_priv);
1932		break;
1933	case IB_CM_REP_SENT:
1934	case IB_CM_MRA_REP_RCVD:
1935		ret = cm_alloc_msg(cm_id_priv, &msg);
1936		if (!ret)
1937			cm_format_rej((struct cm_rej_msg *) msg->mad,
1938				      cm_id_priv, reason, ari, ari_length,
1939				      private_data, private_data_len);
1940
1941		cm_enter_timewait(cm_id_priv);
1942		break;
1943	default:
1944		ret = -EINVAL;
1945		goto out;
1946	}
1947
1948	if (ret)
1949		goto out;
1950
1951	ret = ib_post_send_mad(msg, NULL);
1952	if (ret)
1953		cm_free_msg(msg);
1954
1955out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1956	return ret;
1957}
1958EXPORT_SYMBOL(ib_send_cm_rej);
1959
1960static void cm_format_rej_event(struct cm_work *work)
1961{
1962	struct cm_rej_msg *rej_msg;
1963	struct ib_cm_rej_event_param *param;
1964
1965	rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
1966	param = &work->cm_event.param.rej_rcvd;
1967	param->ari = rej_msg->ari;
1968	param->ari_length = cm_rej_get_reject_info_len(rej_msg);
1969	param->reason = __be16_to_cpu(rej_msg->reason);
1970	work->cm_event.private_data = &rej_msg->private_data;
1971}
1972
1973static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
1974{
1975	struct cm_timewait_info *timewait_info;
1976	struct cm_id_private *cm_id_priv;
1977	unsigned long flags;
1978	__be32 remote_id;
1979
1980	remote_id = rej_msg->local_comm_id;
1981
1982	if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
1983		spin_lock_irqsave(&cm.lock, flags);
1984		timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
1985						  remote_id);
1986		if (!timewait_info) {
1987			spin_unlock_irqrestore(&cm.lock, flags);
1988			return NULL;
1989		}
1990		cm_id_priv = idr_find(&cm.local_id_table,
1991				      (__force int) timewait_info->work.local_id);
1992		if (cm_id_priv) {
1993			if (cm_id_priv->id.remote_id == remote_id)
1994				atomic_inc(&cm_id_priv->refcount);
1995			else
1996				cm_id_priv = NULL;
1997		}
1998		spin_unlock_irqrestore(&cm.lock, flags);
1999	} else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2000		cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2001	else
2002		cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2003
2004	return cm_id_priv;
2005}
2006
2007static int cm_rej_handler(struct cm_work *work)
2008{
2009	struct cm_id_private *cm_id_priv;
2010	struct cm_rej_msg *rej_msg;
2011	unsigned long flags;
2012	int ret;
2013
2014	rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2015	cm_id_priv = cm_acquire_rejected_id(rej_msg);
2016	if (!cm_id_priv)
2017		return -EINVAL;
2018
2019	cm_format_rej_event(work);
2020
2021	spin_lock_irqsave(&cm_id_priv->lock, flags);
2022	switch (cm_id_priv->id.state) {
2023	case IB_CM_REQ_SENT:
2024	case IB_CM_MRA_REQ_RCVD:
2025	case IB_CM_REP_SENT:
2026	case IB_CM_MRA_REP_RCVD:
2027		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2028		/* fall through */
2029	case IB_CM_REQ_RCVD:
2030	case IB_CM_MRA_REQ_SENT:
2031		if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2032			cm_enter_timewait(cm_id_priv);
2033		else
2034			cm_reset_to_idle(cm_id_priv);
2035		break;
2036	case IB_CM_DREQ_SENT:
2037		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2038		/* fall through */
2039	case IB_CM_REP_RCVD:
2040	case IB_CM_MRA_REP_SENT:
2041	case IB_CM_ESTABLISHED:
2042		cm_enter_timewait(cm_id_priv);
2043		break;
2044	default:
2045		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2046		ret = -EINVAL;
2047		goto out;
2048	}
2049
2050	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2051	if (!ret)
2052		list_add_tail(&work->list, &cm_id_priv->work_list);
2053	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2054
2055	if (ret)
2056		cm_process_work(cm_id_priv, work);
2057	else
2058		cm_deref_id(cm_id_priv);
2059	return 0;
2060out:
2061	cm_deref_id(cm_id_priv);
2062	return -EINVAL;
2063}
2064
2065int ib_send_cm_mra(struct ib_cm_id *cm_id,
2066		   u8 service_timeout,
2067		   const void *private_data,
2068		   u8 private_data_len)
2069{
2070	struct cm_id_private *cm_id_priv;
2071	struct ib_mad_send_buf *msg;
2072	void *data;
2073	unsigned long flags;
2074	int ret;
2075
2076	if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2077		return -EINVAL;
2078
2079	data = cm_copy_private_data(private_data, private_data_len);
2080	if (IS_ERR(data))
2081		return PTR_ERR(data);
2082
2083	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2084
2085	spin_lock_irqsave(&cm_id_priv->lock, flags);
2086	switch(cm_id_priv->id.state) {
2087	case IB_CM_REQ_RCVD:
2088		ret = cm_alloc_msg(cm_id_priv, &msg);
2089		if (ret)
2090			goto error1;
2091
2092		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2093			      CM_MSG_RESPONSE_REQ, service_timeout,
2094			      private_data, private_data_len);
2095		ret = ib_post_send_mad(msg, NULL);
2096		if (ret)
2097			goto error2;
2098		cm_id->state = IB_CM_MRA_REQ_SENT;
2099		break;
2100	case IB_CM_REP_RCVD:
2101		ret = cm_alloc_msg(cm_id_priv, &msg);
2102		if (ret)
2103			goto error1;
2104
2105		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2106			      CM_MSG_RESPONSE_REP, service_timeout,
2107			      private_data, private_data_len);
2108		ret = ib_post_send_mad(msg, NULL);
2109		if (ret)
2110			goto error2;
2111		cm_id->state = IB_CM_MRA_REP_SENT;
2112		break;
2113	case IB_CM_ESTABLISHED:
2114		ret = cm_alloc_msg(cm_id_priv, &msg);
2115		if (ret)
2116			goto error1;
2117
2118		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2119			      CM_MSG_RESPONSE_OTHER, service_timeout,
2120			      private_data, private_data_len);
2121		ret = ib_post_send_mad(msg, NULL);
2122		if (ret)
2123			goto error2;
2124		cm_id->lap_state = IB_CM_MRA_LAP_SENT;
2125		break;
2126	default:
2127		ret = -EINVAL;
2128		goto error1;
2129	}
2130	cm_id_priv->service_timeout = service_timeout;
2131	cm_set_private_data(cm_id_priv, data, private_data_len);
2132	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2133	return 0;
2134
2135error1:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2136	kfree(data);
2137	return ret;
2138
2139error2:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2140	kfree(data);
2141	cm_free_msg(msg);
2142	return ret;
2143}
2144EXPORT_SYMBOL(ib_send_cm_mra);
2145
2146static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2147{
2148	switch (cm_mra_get_msg_mraed(mra_msg)) {
2149	case CM_MSG_RESPONSE_REQ:
2150		return cm_acquire_id(mra_msg->remote_comm_id, 0);
2151	case CM_MSG_RESPONSE_REP:
2152	case CM_MSG_RESPONSE_OTHER:
2153		return cm_acquire_id(mra_msg->remote_comm_id,
2154				     mra_msg->local_comm_id);
2155	default:
2156		return NULL;
2157	}
2158}
2159
2160static int cm_mra_handler(struct cm_work *work)
2161{
2162	struct cm_id_private *cm_id_priv;
2163	struct cm_mra_msg *mra_msg;
2164	unsigned long flags;
2165	int timeout, ret;
2166
2167	mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
2168	cm_id_priv = cm_acquire_mraed_id(mra_msg);
2169	if (!cm_id_priv)
2170		return -EINVAL;
2171
2172	work->cm_event.private_data = &mra_msg->private_data;
2173	work->cm_event.param.mra_rcvd.service_timeout =
2174					cm_mra_get_service_timeout(mra_msg);
2175	timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2176		  cm_convert_to_ms(cm_id_priv->av.packet_life_time);
2177
2178	spin_lock_irqsave(&cm_id_priv->lock, flags);
2179	switch (cm_id_priv->id.state) {
2180	case IB_CM_REQ_SENT:
2181		if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
2182		    ib_modify_mad(cm_id_priv->av.port->mad_agent,
2183				  cm_id_priv->msg, timeout))
2184			goto out;
2185		cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
2186		break;
2187	case IB_CM_REP_SENT:
2188		if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
2189		    ib_modify_mad(cm_id_priv->av.port->mad_agent,
2190				  cm_id_priv->msg, timeout))
2191			goto out;
2192		cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
2193		break;
2194	case IB_CM_ESTABLISHED:
2195		if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2196		    cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2197		    ib_modify_mad(cm_id_priv->av.port->mad_agent,
2198				  cm_id_priv->msg, timeout))
2199			goto out;
2200		cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2201		break;
2202	default:
2203		goto out;
2204	}
2205
2206	cm_id_priv->msg->context[1] = (void *) (unsigned long)
2207				      cm_id_priv->id.state;
2208	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2209	if (!ret)
2210		list_add_tail(&work->list, &cm_id_priv->work_list);
2211	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2212
2213	if (ret)
2214		cm_process_work(cm_id_priv, work);
2215	else
2216		cm_deref_id(cm_id_priv);
2217	return 0;
2218out:
2219	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2220	cm_deref_id(cm_id_priv);
2221	return -EINVAL;
2222}
2223
2224static void cm_format_lap(struct cm_lap_msg *lap_msg,
2225			  struct cm_id_private *cm_id_priv,
2226			  struct ib_sa_path_rec *alternate_path,
2227			  const void *private_data,
2228			  u8 private_data_len)
2229{
2230	cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
2231			  cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
2232	lap_msg->local_comm_id = cm_id_priv->id.local_id;
2233	lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
2234	cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
2235	/* todo: need remote CM response timeout */
2236	cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
2237	lap_msg->alt_local_lid = alternate_path->slid;
2238	lap_msg->alt_remote_lid = alternate_path->dlid;
2239	lap_msg->alt_local_gid = alternate_path->sgid;
2240	lap_msg->alt_remote_gid = alternate_path->dgid;
2241	cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
2242	cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
2243	lap_msg->alt_hop_limit = alternate_path->hop_limit;
2244	cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
2245	cm_lap_set_sl(lap_msg, alternate_path->sl);
2246	cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
2247	cm_lap_set_local_ack_timeout(lap_msg,
2248		min(31, alternate_path->packet_life_time + 1));
2249
2250	if (private_data && private_data_len)
2251		memcpy(lap_msg->private_data, private_data, private_data_len);
2252}
2253
2254int ib_send_cm_lap(struct ib_cm_id *cm_id,
2255		   struct ib_sa_path_rec *alternate_path,
2256		   const void *private_data,
2257		   u8 private_data_len)
2258{
2259	struct cm_id_private *cm_id_priv;
2260	struct ib_mad_send_buf *msg;
2261	unsigned long flags;
2262	int ret;
2263
2264	if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
2265		return -EINVAL;
2266
2267	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2268	spin_lock_irqsave(&cm_id_priv->lock, flags);
2269	if (cm_id->state != IB_CM_ESTABLISHED ||
2270	    cm_id->lap_state != IB_CM_LAP_IDLE) {
2271		ret = -EINVAL;
2272		goto out;
2273	}
2274
2275	ret = cm_alloc_msg(cm_id_priv, &msg);
2276	if (ret)
2277		goto out;
2278
2279	cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
2280		      alternate_path, private_data, private_data_len);
2281	msg->timeout_ms = cm_id_priv->timeout_ms;
2282	msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
2283
2284	ret = ib_post_send_mad(msg, NULL);
2285	if (ret) {
2286		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2287		cm_free_msg(msg);
2288		return ret;
2289	}
2290
2291	cm_id->lap_state = IB_CM_LAP_SENT;
2292	cm_id_priv->msg = msg;
2293
2294out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2295	return ret;
2296}
2297EXPORT_SYMBOL(ib_send_cm_lap);
2298
2299static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
2300				    struct cm_lap_msg *lap_msg)
2301{
2302	memset(path, 0, sizeof *path);
2303	path->dgid = lap_msg->alt_local_gid;
2304	path->sgid = lap_msg->alt_remote_gid;
2305	path->dlid = lap_msg->alt_local_lid;
2306	path->slid = lap_msg->alt_remote_lid;
2307	path->flow_label = cm_lap_get_flow_label(lap_msg);
2308	path->hop_limit = lap_msg->alt_hop_limit;
2309	path->traffic_class = cm_lap_get_traffic_class(lap_msg);
2310	path->reversible = 1;
2311	/* pkey is same as in REQ */
2312	path->sl = cm_lap_get_sl(lap_msg);
2313	path->mtu_selector = IB_SA_EQ;
2314	/* mtu is same as in REQ */
2315	path->rate_selector = IB_SA_EQ;
2316	path->rate = cm_lap_get_packet_rate(lap_msg);
2317	path->packet_life_time_selector = IB_SA_EQ;
2318	path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
2319	path->packet_life_time -= (path->packet_life_time > 0);
2320}
2321
2322static int cm_lap_handler(struct cm_work *work)
2323{
2324	struct cm_id_private *cm_id_priv;
2325	struct cm_lap_msg *lap_msg;
2326	struct ib_cm_lap_event_param *param;
2327	struct ib_mad_send_buf *msg = NULL;
2328	unsigned long flags;
2329	int ret;
2330
2331	/* todo: verify LAP request and send reject APR if invalid. */
2332	lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
2333	cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
2334				   lap_msg->local_comm_id);
2335	if (!cm_id_priv)
2336		return -EINVAL;
2337
2338	param = &work->cm_event.param.lap_rcvd;
2339	param->alternate_path = &work->path[0];
2340	cm_format_path_from_lap(param->alternate_path, lap_msg);
2341	work->cm_event.private_data = &lap_msg->private_data;
2342
2343	spin_lock_irqsave(&cm_id_priv->lock, flags);
2344	if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
2345		goto unlock;
2346
2347	switch (cm_id_priv->id.lap_state) {
2348	case IB_CM_LAP_IDLE:
2349		break;
2350	case IB_CM_MRA_LAP_SENT:
2351		if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2352			goto unlock;
2353
2354		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2355			      CM_MSG_RESPONSE_OTHER,
2356			      cm_id_priv->service_timeout,
2357			      cm_id_priv->private_data,
2358			      cm_id_priv->private_data_len);
2359		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2360
2361		if (ib_post_send_mad(msg, NULL))
2362			cm_free_msg(msg);
2363		goto deref;
2364	default:
2365		goto unlock;
2366	}
2367
2368	cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
2369	cm_id_priv->tid = lap_msg->hdr.tid;
2370	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2371	if (!ret)
2372		list_add_tail(&work->list, &cm_id_priv->work_list);
2373	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2374
2375	if (ret)
2376		cm_process_work(cm_id_priv, work);
2377	else
2378		cm_deref_id(cm_id_priv);
2379	return 0;
2380
2381unlock:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2382deref:	cm_deref_id(cm_id_priv);
2383	return -EINVAL;
2384}
2385
2386static void cm_format_apr(struct cm_apr_msg *apr_msg,
2387			  struct cm_id_private *cm_id_priv,
2388			  enum ib_cm_apr_status status,
2389			  void *info,
2390			  u8 info_length,
2391			  const void *private_data,
2392			  u8 private_data_len)
2393{
2394	cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
2395	apr_msg->local_comm_id = cm_id_priv->id.local_id;
2396	apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
2397	apr_msg->ap_status = (u8) status;
2398
2399	if (info && info_length) {
2400		apr_msg->info_length = info_length;
2401		memcpy(apr_msg->info, info, info_length);
2402	}
2403
2404	if (private_data && private_data_len)
2405		memcpy(apr_msg->private_data, private_data, private_data_len);
2406}
2407
2408int ib_send_cm_apr(struct ib_cm_id *cm_id,
2409		   enum ib_cm_apr_status status,
2410		   void *info,
2411		   u8 info_length,
2412		   const void *private_data,
2413		   u8 private_data_len)
2414{
2415	struct cm_id_private *cm_id_priv;
2416	struct ib_mad_send_buf *msg;
2417	unsigned long flags;
2418	int ret;
2419
2420	if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
2421	    (info && info_length > IB_CM_APR_INFO_LENGTH))
2422		return -EINVAL;
2423
2424	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2425	spin_lock_irqsave(&cm_id_priv->lock, flags);
2426	if (cm_id->state != IB_CM_ESTABLISHED ||
2427	    (cm_id->lap_state != IB_CM_LAP_RCVD &&
2428	     cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
2429		ret = -EINVAL;
2430		goto out;
2431	}
2432
2433	ret = cm_alloc_msg(cm_id_priv, &msg);
2434	if (ret)
2435		goto out;
2436
2437	cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
2438		      info, info_length, private_data, private_data_len);
2439	ret = ib_post_send_mad(msg, NULL);
2440	if (ret) {
2441		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2442		cm_free_msg(msg);
2443		return ret;
2444	}
2445
2446	cm_id->lap_state = IB_CM_LAP_IDLE;
2447out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2448	return ret;
2449}
2450EXPORT_SYMBOL(ib_send_cm_apr);
2451
2452static int cm_apr_handler(struct cm_work *work)
2453{
2454	struct cm_id_private *cm_id_priv;
2455	struct cm_apr_msg *apr_msg;
2456	unsigned long flags;
2457	int ret;
2458
2459	apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
2460	cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
2461				   apr_msg->local_comm_id);
2462	if (!cm_id_priv)
2463		return -EINVAL; /* Unmatched reply. */
2464
2465	work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
2466	work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
2467	work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
2468	work->cm_event.private_data = &apr_msg->private_data;
2469
2470	spin_lock_irqsave(&cm_id_priv->lock, flags);
2471	if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
2472	    (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
2473	     cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
2474		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2475		goto out;
2476	}
2477	cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
2478	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2479	cm_id_priv->msg = NULL;
2480
2481	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2482	if (!ret)
2483		list_add_tail(&work->list, &cm_id_priv->work_list);
2484	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2485
2486	if (ret)
2487		cm_process_work(cm_id_priv, work);
2488	else
2489		cm_deref_id(cm_id_priv);
2490	return 0;
2491out:
2492	cm_deref_id(cm_id_priv);
2493	return -EINVAL;
2494}
2495
2496static int cm_timewait_handler(struct cm_work *work)
2497{
2498	struct cm_timewait_info *timewait_info;
2499	struct cm_id_private *cm_id_priv;
2500	unsigned long flags;
2501	int ret;
2502
2503	timewait_info = (struct cm_timewait_info *)work;
2504	cm_cleanup_timewait(timewait_info);
2505
2506	cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2507				   timewait_info->work.remote_id);
2508	if (!cm_id_priv)
2509		return -EINVAL;
2510
2511	spin_lock_irqsave(&cm_id_priv->lock, flags);
2512	if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
2513	    cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
2514		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2515		goto out;
2516	}
2517	cm_id_priv->id.state = IB_CM_IDLE;
2518	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2519	if (!ret)
2520		list_add_tail(&work->list, &cm_id_priv->work_list);
2521	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2522
2523	if (ret)
2524		cm_process_work(cm_id_priv, work);
2525	else
2526		cm_deref_id(cm_id_priv);
2527	return 0;
2528out:
2529	cm_deref_id(cm_id_priv);
2530	return -EINVAL;
2531}
2532
2533static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
2534			       struct cm_id_private *cm_id_priv,
2535			       struct ib_cm_sidr_req_param *param)
2536{
2537	cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
2538			  cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
2539	sidr_req_msg->request_id = cm_id_priv->id.local_id;
2540	sidr_req_msg->pkey = cpu_to_be16(param->pkey);
2541	sidr_req_msg->service_id = param->service_id;
2542
2543	if (param->private_data && param->private_data_len)
2544		memcpy(sidr_req_msg->private_data, param->private_data,
2545		       param->private_data_len);
2546}
2547
2548int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
2549			struct ib_cm_sidr_req_param *param)
2550{
2551	struct cm_id_private *cm_id_priv;
2552	struct ib_mad_send_buf *msg;
2553	unsigned long flags;
2554	int ret;
2555
2556	if (!param->path || (param->private_data &&
2557	     param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
2558		return -EINVAL;
2559
2560	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2561	ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
2562	if (ret)
2563		goto out;
2564
2565	cm_id->service_id = param->service_id;
2566	cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
2567	cm_id_priv->timeout_ms = param->timeout_ms;
2568	cm_id_priv->max_cm_retries = param->max_cm_retries;
2569	ret = cm_alloc_msg(cm_id_priv, &msg);
2570	if (ret)
2571		goto out;
2572
2573	cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
2574			   param);
2575	msg->timeout_ms = cm_id_priv->timeout_ms;
2576	msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
2577
2578	spin_lock_irqsave(&cm_id_priv->lock, flags);
2579	if (cm_id->state == IB_CM_IDLE)
2580		ret = ib_post_send_mad(msg, NULL);
2581	else
2582		ret = -EINVAL;
2583
2584	if (ret) {
2585		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2586		cm_free_msg(msg);
2587		goto out;
2588	}
2589	cm_id->state = IB_CM_SIDR_REQ_SENT;
2590	cm_id_priv->msg = msg;
2591	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2592out:
2593	return ret;
2594}
2595EXPORT_SYMBOL(ib_send_cm_sidr_req);
2596
2597static void cm_format_sidr_req_event(struct cm_work *work,
2598				     struct ib_cm_id *listen_id)
2599{
2600	struct cm_sidr_req_msg *sidr_req_msg;
2601	struct ib_cm_sidr_req_event_param *param;
2602
2603	sidr_req_msg = (struct cm_sidr_req_msg *)
2604				work->mad_recv_wc->recv_buf.mad;
2605	param = &work->cm_event.param.sidr_req_rcvd;
2606	param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
2607	param->listen_id = listen_id;
2608	param->port = work->port->port_num;
2609	work->cm_event.private_data = &sidr_req_msg->private_data;
2610}
2611
2612static int cm_sidr_req_handler(struct cm_work *work)
2613{
2614	struct ib_cm_id *cm_id;
2615	struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
2616	struct cm_sidr_req_msg *sidr_req_msg;
2617	struct ib_wc *wc;
2618	unsigned long flags;
2619
2620	cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
2621	if (IS_ERR(cm_id))
2622		return PTR_ERR(cm_id);
2623	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2624
2625	/* Record SGID/SLID and request ID for lookup. */
2626	sidr_req_msg = (struct cm_sidr_req_msg *)
2627				work->mad_recv_wc->recv_buf.mad;
2628	wc = work->mad_recv_wc->wc;
2629	cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
2630	cm_id_priv->av.dgid.global.interface_id = 0;
2631	cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2632				&cm_id_priv->av);
2633	cm_id_priv->id.remote_id = sidr_req_msg->request_id;
2634	cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
2635	cm_id_priv->tid = sidr_req_msg->hdr.tid;
2636	atomic_inc(&cm_id_priv->work_count);
2637
2638	spin_lock_irqsave(&cm.lock, flags);
2639	cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
2640	if (cur_cm_id_priv) {
2641		spin_unlock_irqrestore(&cm.lock, flags);
2642		goto out; /* Duplicate message. */
2643	}
2644	cur_cm_id_priv = cm_find_listen(cm_id->device,
2645					sidr_req_msg->service_id);
2646	if (!cur_cm_id_priv) {
2647		rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
2648		spin_unlock_irqrestore(&cm.lock, flags);
2649		/* todo: reply with no match */
2650		goto out; /* No match. */
2651	}
2652	atomic_inc(&cur_cm_id_priv->refcount);
2653	spin_unlock_irqrestore(&cm.lock, flags);
2654
2655	cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
2656	cm_id_priv->id.context = cur_cm_id_priv->id.context;
2657	cm_id_priv->id.service_id = sidr_req_msg->service_id;
2658	cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
2659
2660	cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
2661	cm_process_work(cm_id_priv, work);
2662	cm_deref_id(cur_cm_id_priv);
2663	return 0;
2664out:
2665	ib_destroy_cm_id(&cm_id_priv->id);
2666	return -EINVAL;
2667}
2668
2669static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
2670			       struct cm_id_private *cm_id_priv,
2671			       struct ib_cm_sidr_rep_param *param)
2672{
2673	cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
2674			  cm_id_priv->tid);
2675	sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
2676	sidr_rep_msg->status = param->status;
2677	cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
2678	sidr_rep_msg->service_id = cm_id_priv->id.service_id;
2679	sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
2680
2681	if (param->info && param->info_length)
2682		memcpy(sidr_rep_msg->info, param->info, param->info_length);
2683
2684	if (param->private_data && param->private_data_len)
2685		memcpy(sidr_rep_msg->private_data, param->private_data,
2686		       param->private_data_len);
2687}
2688
2689int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
2690			struct ib_cm_sidr_rep_param *param)
2691{
2692	struct cm_id_private *cm_id_priv;
2693	struct ib_mad_send_buf *msg;
2694	unsigned long flags;
2695	int ret;
2696
2697	if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
2698	    (param->private_data &&
2699	     param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
2700		return -EINVAL;
2701
2702	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2703	spin_lock_irqsave(&cm_id_priv->lock, flags);
2704	if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
2705		ret = -EINVAL;
2706		goto error;
2707	}
2708
2709	ret = cm_alloc_msg(cm_id_priv, &msg);
2710	if (ret)
2711		goto error;
2712
2713	cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
2714			   param);
2715	ret = ib_post_send_mad(msg, NULL);
2716	if (ret) {
2717		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2718		cm_free_msg(msg);
2719		return ret;
2720	}
2721	cm_id->state = IB_CM_IDLE;
2722	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2723
2724	spin_lock_irqsave(&cm.lock, flags);
2725	rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
2726	spin_unlock_irqrestore(&cm.lock, flags);
2727	return 0;
2728
2729error:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2730	return ret;
2731}
2732EXPORT_SYMBOL(ib_send_cm_sidr_rep);
2733
2734static void cm_format_sidr_rep_event(struct cm_work *work)
2735{
2736	struct cm_sidr_rep_msg *sidr_rep_msg;
2737	struct ib_cm_sidr_rep_event_param *param;
2738
2739	sidr_rep_msg = (struct cm_sidr_rep_msg *)
2740				work->mad_recv_wc->recv_buf.mad;
2741	param = &work->cm_event.param.sidr_rep_rcvd;
2742	param->status = sidr_rep_msg->status;
2743	param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
2744	param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
2745	param->info = &sidr_rep_msg->info;
2746	param->info_len = sidr_rep_msg->info_length;
2747	work->cm_event.private_data = &sidr_rep_msg->private_data;
2748}
2749
2750static int cm_sidr_rep_handler(struct cm_work *work)
2751{
2752	struct cm_sidr_rep_msg *sidr_rep_msg;
2753	struct cm_id_private *cm_id_priv;
2754	unsigned long flags;
2755
2756	sidr_rep_msg = (struct cm_sidr_rep_msg *)
2757				work->mad_recv_wc->recv_buf.mad;
2758	cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
2759	if (!cm_id_priv)
2760		return -EINVAL; /* Unmatched reply. */
2761
2762	spin_lock_irqsave(&cm_id_priv->lock, flags);
2763	if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
2764		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2765		goto out;
2766	}
2767	cm_id_priv->id.state = IB_CM_IDLE;
2768	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2769	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2770
2771	cm_format_sidr_rep_event(work);
2772	cm_process_work(cm_id_priv, work);
2773	return 0;
2774out:
2775	cm_deref_id(cm_id_priv);
2776	return -EINVAL;
2777}
2778
2779static void cm_process_send_error(struct ib_mad_send_buf *msg,
2780				  enum ib_wc_status wc_status)
2781{
2782	struct cm_id_private *cm_id_priv;
2783	struct ib_cm_event cm_event;
2784	enum ib_cm_state state;
2785	unsigned long flags;
2786	int ret;
2787
2788	memset(&cm_event, 0, sizeof cm_event);
2789	cm_id_priv = msg->context[0];
2790
2791	/* Discard old sends or ones without a response. */
2792	spin_lock_irqsave(&cm_id_priv->lock, flags);
2793	state = (enum ib_cm_state) (unsigned long) msg->context[1];
2794	if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
2795		goto discard;
2796
2797	switch (state) {
2798	case IB_CM_REQ_SENT:
2799	case IB_CM_MRA_REQ_RCVD:
2800		cm_reset_to_idle(cm_id_priv);
2801		cm_event.event = IB_CM_REQ_ERROR;
2802		break;
2803	case IB_CM_REP_SENT:
2804	case IB_CM_MRA_REP_RCVD:
2805		cm_reset_to_idle(cm_id_priv);
2806		cm_event.event = IB_CM_REP_ERROR;
2807		break;
2808	case IB_CM_DREQ_SENT:
2809		cm_enter_timewait(cm_id_priv);
2810		cm_event.event = IB_CM_DREQ_ERROR;
2811		break;
2812	case IB_CM_SIDR_REQ_SENT:
2813		cm_id_priv->id.state = IB_CM_IDLE;
2814		cm_event.event = IB_CM_SIDR_REQ_ERROR;
2815		break;
2816	default:
2817		goto discard;
2818	}
2819	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2820	cm_event.param.send_status = wc_status;
2821
2822	/* No other events can occur on the cm_id at this point. */
2823	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
2824	cm_free_msg(msg);
2825	if (ret)
2826		ib_destroy_cm_id(&cm_id_priv->id);
2827	return;
2828discard:
2829	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2830	cm_free_msg(msg);
2831}
2832
2833static void cm_send_handler(struct ib_mad_agent *mad_agent,
2834			    struct ib_mad_send_wc *mad_send_wc)
2835{
2836	struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
2837
2838	switch (mad_send_wc->status) {
2839	case IB_WC_SUCCESS:
2840	case IB_WC_WR_FLUSH_ERR:
2841		cm_free_msg(msg);
2842		break;
2843	default:
2844		if (msg->context[0] && msg->context[1])
2845			cm_process_send_error(msg, mad_send_wc->status);
2846		else
2847			cm_free_msg(msg);
2848		break;
2849	}
2850}
2851
2852static void cm_work_handler(void *data)
2853{
2854	struct cm_work *work = data;
2855	int ret;
2856
2857	switch (work->cm_event.event) {
2858	case IB_CM_REQ_RECEIVED:
2859		ret = cm_req_handler(work);
2860		break;
2861	case IB_CM_MRA_RECEIVED:
2862		ret = cm_mra_handler(work);
2863		break;
2864	case IB_CM_REJ_RECEIVED:
2865		ret = cm_rej_handler(work);
2866		break;
2867	case IB_CM_REP_RECEIVED:
2868		ret = cm_rep_handler(work);
2869		break;
2870	case IB_CM_RTU_RECEIVED:
2871		ret = cm_rtu_handler(work);
2872		break;
2873	case IB_CM_USER_ESTABLISHED:
2874		ret = cm_establish_handler(work);
2875		break;
2876	case IB_CM_DREQ_RECEIVED:
2877		ret = cm_dreq_handler(work);
2878		break;
2879	case IB_CM_DREP_RECEIVED:
2880		ret = cm_drep_handler(work);
2881		break;
2882	case IB_CM_SIDR_REQ_RECEIVED:
2883		ret = cm_sidr_req_handler(work);
2884		break;
2885	case IB_CM_SIDR_REP_RECEIVED:
2886		ret = cm_sidr_rep_handler(work);
2887		break;
2888	case IB_CM_LAP_RECEIVED:
2889		ret = cm_lap_handler(work);
2890		break;
2891	case IB_CM_APR_RECEIVED:
2892		ret = cm_apr_handler(work);
2893		break;
2894	case IB_CM_TIMEWAIT_EXIT:
2895		ret = cm_timewait_handler(work);
2896		break;
2897	default:
2898		ret = -EINVAL;
2899		break;
2900	}
2901	if (ret)
2902		cm_free_work(work);
2903}
2904
2905int ib_cm_establish(struct ib_cm_id *cm_id)
2906{
2907	struct cm_id_private *cm_id_priv;
2908	struct cm_work *work;
2909	unsigned long flags;
2910	int ret = 0;
2911
2912	work = kmalloc(sizeof *work, GFP_ATOMIC);
2913	if (!work)
2914		return -ENOMEM;
2915
2916	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2917	spin_lock_irqsave(&cm_id_priv->lock, flags);
2918	switch (cm_id->state)
2919	{
2920	case IB_CM_REP_SENT:
2921	case IB_CM_MRA_REP_RCVD:
2922		cm_id->state = IB_CM_ESTABLISHED;
2923		break;
2924	case IB_CM_ESTABLISHED:
2925		ret = -EISCONN;
2926		break;
2927	default:
2928		ret = -EINVAL;
2929		break;
2930	}
2931	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2932
2933	if (ret) {
2934		kfree(work);
2935		goto out;
2936	}
2937
2938	/*
2939	 * The CM worker thread may try to destroy the cm_id before it
2940	 * can execute this work item.  To prevent potential deadlock,
2941	 * we need to find the cm_id once we're in the context of the
2942	 * worker thread, rather than holding a reference on it.
2943	 */
2944	INIT_WORK(&work->work, cm_work_handler, work);
2945	work->local_id = cm_id->local_id;
2946	work->remote_id = cm_id->remote_id;
2947	work->mad_recv_wc = NULL;
2948	work->cm_event.event = IB_CM_USER_ESTABLISHED;
2949	queue_work(cm.wq, &work->work);
2950out:
2951	return ret;
2952}
2953EXPORT_SYMBOL(ib_cm_establish);
2954
2955static void cm_recv_handler(struct ib_mad_agent *mad_agent,
2956			    struct ib_mad_recv_wc *mad_recv_wc)
2957{
2958	struct cm_work *work;
2959	enum ib_cm_event_type event;
2960	int paths = 0;
2961
2962	switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
2963	case CM_REQ_ATTR_ID:
2964		paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
2965						    alt_local_lid != 0);
2966		event = IB_CM_REQ_RECEIVED;
2967		break;
2968	case CM_MRA_ATTR_ID:
2969		event = IB_CM_MRA_RECEIVED;
2970		break;
2971	case CM_REJ_ATTR_ID:
2972		event = IB_CM_REJ_RECEIVED;
2973		break;
2974	case CM_REP_ATTR_ID:
2975		event = IB_CM_REP_RECEIVED;
2976		break;
2977	case CM_RTU_ATTR_ID:
2978		event = IB_CM_RTU_RECEIVED;
2979		break;
2980	case CM_DREQ_ATTR_ID:
2981		event = IB_CM_DREQ_RECEIVED;
2982		break;
2983	case CM_DREP_ATTR_ID:
2984		event = IB_CM_DREP_RECEIVED;
2985		break;
2986	case CM_SIDR_REQ_ATTR_ID:
2987		event = IB_CM_SIDR_REQ_RECEIVED;
2988		break;
2989	case CM_SIDR_REP_ATTR_ID:
2990		event = IB_CM_SIDR_REP_RECEIVED;
2991		break;
2992	case CM_LAP_ATTR_ID:
2993		paths = 1;
2994		event = IB_CM_LAP_RECEIVED;
2995		break;
2996	case CM_APR_ATTR_ID:
2997		event = IB_CM_APR_RECEIVED;
2998		break;
2999	default:
3000		ib_free_recv_mad(mad_recv_wc);
3001		return;
3002	}
3003
3004	work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
3005		       GFP_KERNEL);
3006	if (!work) {
3007		ib_free_recv_mad(mad_recv_wc);
3008		return;
3009	}
3010
3011	INIT_WORK(&work->work, cm_work_handler, work);
3012	work->cm_event.event = event;
3013	work->mad_recv_wc = mad_recv_wc;
3014	work->port = (struct cm_port *)mad_agent->context;
3015	queue_work(cm.wq, &work->work);
3016}
3017
3018static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3019				struct ib_qp_attr *qp_attr,
3020				int *qp_attr_mask)
3021{
3022	unsigned long flags;
3023	int ret;
3024
3025	spin_lock_irqsave(&cm_id_priv->lock, flags);
3026	switch (cm_id_priv->id.state) {
3027	case IB_CM_REQ_SENT:
3028	case IB_CM_MRA_REQ_RCVD:
3029	case IB_CM_REQ_RCVD:
3030	case IB_CM_MRA_REQ_SENT:
3031	case IB_CM_REP_RCVD:
3032	case IB_CM_MRA_REP_SENT:
3033	case IB_CM_REP_SENT:
3034	case IB_CM_MRA_REP_RCVD:
3035	case IB_CM_ESTABLISHED:
3036		*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3037				IB_QP_PKEY_INDEX | IB_QP_PORT;
3038		qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
3039					   IB_ACCESS_REMOTE_WRITE;
3040		if (cm_id_priv->responder_resources)
3041			qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ;
3042		qp_attr->pkey_index = cm_id_priv->av.pkey_index;
3043		qp_attr->port_num = cm_id_priv->av.port->port_num;
3044		ret = 0;
3045		break;
3046	default:
3047		ret = -EINVAL;
3048		break;
3049	}
3050	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3051	return ret;
3052}
3053
3054static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3055			       struct ib_qp_attr *qp_attr,
3056			       int *qp_attr_mask)
3057{
3058	unsigned long flags;
3059	int ret;
3060
3061	spin_lock_irqsave(&cm_id_priv->lock, flags);
3062	switch (cm_id_priv->id.state) {
3063	case IB_CM_REQ_RCVD:
3064	case IB_CM_MRA_REQ_SENT:
3065	case IB_CM_REP_RCVD:
3066	case IB_CM_MRA_REP_SENT:
3067	case IB_CM_REP_SENT:
3068	case IB_CM_MRA_REP_RCVD:
3069	case IB_CM_ESTABLISHED:
3070		*qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
3071				IB_QP_DEST_QPN | IB_QP_RQ_PSN;
3072		qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3073		qp_attr->path_mtu = cm_id_priv->path_mtu;
3074		qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3075		qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
3076		if (cm_id_priv->qp_type == IB_QPT_RC) {
3077			*qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
3078					 IB_QP_MIN_RNR_TIMER;
3079			qp_attr->max_dest_rd_atomic =
3080					cm_id_priv->responder_resources;
3081			qp_attr->min_rnr_timer = 0;
3082		}
3083		if (cm_id_priv->alt_av.ah_attr.dlid) {
3084			*qp_attr_mask |= IB_QP_ALT_PATH;
3085			qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3086		}
3087		ret = 0;
3088		break;
3089	default:
3090		ret = -EINVAL;
3091		break;
3092	}
3093	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3094	return ret;
3095}
3096
3097static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3098			       struct ib_qp_attr *qp_attr,
3099			       int *qp_attr_mask)
3100{
3101	unsigned long flags;
3102	int ret;
3103
3104	spin_lock_irqsave(&cm_id_priv->lock, flags);
3105	switch (cm_id_priv->id.state) {
3106	case IB_CM_REP_RCVD:
3107	case IB_CM_MRA_REP_SENT:
3108	case IB_CM_REP_SENT:
3109	case IB_CM_MRA_REP_RCVD:
3110	case IB_CM_ESTABLISHED:
3111		*qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
3112		qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
3113		if (cm_id_priv->qp_type == IB_QPT_RC) {
3114			*qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
3115					 IB_QP_RNR_RETRY |
3116					 IB_QP_MAX_QP_RD_ATOMIC;
3117			qp_attr->timeout = cm_id_priv->local_ack_timeout;
3118			qp_attr->retry_cnt = cm_id_priv->retry_count;
3119			qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
3120			qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
3121		}
3122		if (cm_id_priv->alt_av.ah_attr.dlid) {
3123			*qp_attr_mask |= IB_QP_PATH_MIG_STATE;
3124			qp_attr->path_mig_state = IB_MIG_REARM;
3125		}
3126		ret = 0;
3127		break;
3128	default:
3129		ret = -EINVAL;
3130		break;
3131	}
3132	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3133	return ret;
3134}
3135
3136int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
3137		       struct ib_qp_attr *qp_attr,
3138		       int *qp_attr_mask)
3139{
3140	struct cm_id_private *cm_id_priv;
3141	int ret;
3142
3143	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3144	switch (qp_attr->qp_state) {
3145	case IB_QPS_INIT:
3146		ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
3147		break;
3148	case IB_QPS_RTR:
3149		ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
3150		break;
3151	case IB_QPS_RTS:
3152		ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
3153		break;
3154	default:
3155		ret = -EINVAL;
3156		break;
3157	}
3158	return ret;
3159}
3160EXPORT_SYMBOL(ib_cm_init_qp_attr);
3161
3162static __be64 cm_get_ca_guid(struct ib_device *device)
3163{
3164	struct ib_device_attr *device_attr;
3165	__be64 guid;
3166	int ret;
3167
3168	device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
3169	if (!device_attr)
3170		return 0;
3171
3172	ret = ib_query_device(device, device_attr);
3173	guid = ret ? 0 : device_attr->node_guid;
3174	kfree(device_attr);
3175	return guid;
3176}
3177
3178static void cm_add_one(struct ib_device *device)
3179{
3180	struct cm_device *cm_dev;
3181	struct cm_port *port;
3182	struct ib_mad_reg_req reg_req = {
3183		.mgmt_class = IB_MGMT_CLASS_CM,
3184		.mgmt_class_version = IB_CM_CLASS_VERSION
3185	};
3186	struct ib_port_modify port_modify = {
3187		.set_port_cap_mask = IB_PORT_CM_SUP
3188	};
3189	unsigned long flags;
3190	int ret;
3191	u8 i;
3192
3193	cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) *
3194			 device->phys_port_cnt, GFP_KERNEL);
3195	if (!cm_dev)
3196		return;
3197
3198	cm_dev->device = device;
3199	cm_dev->ca_guid = cm_get_ca_guid(device);
3200	if (!cm_dev->ca_guid)
3201		goto error1;
3202
3203	set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
3204	for (i = 1; i <= device->phys_port_cnt; i++) {
3205		port = &cm_dev->port[i-1];
3206		port->cm_dev = cm_dev;
3207		port->port_num = i;
3208		port->mad_agent = ib_register_mad_agent(device, i,
3209							IB_QPT_GSI,
3210							&reg_req,
3211							0,
3212							cm_send_handler,
3213							cm_recv_handler,
3214							port);
3215		if (IS_ERR(port->mad_agent))
3216			goto error2;
3217
3218		ret = ib_modify_port(device, i, 0, &port_modify);
3219		if (ret)
3220			goto error3;
3221	}
3222	ib_set_client_data(device, &cm_client, cm_dev);
3223
3224	write_lock_irqsave(&cm.device_lock, flags);
3225	list_add_tail(&cm_dev->list, &cm.device_list);
3226	write_unlock_irqrestore(&cm.device_lock, flags);
3227	return;
3228
3229error3:
3230	ib_unregister_mad_agent(port->mad_agent);
3231error2:
3232	port_modify.set_port_cap_mask = 0;
3233	port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
3234	while (--i) {
3235		port = &cm_dev->port[i-1];
3236		ib_modify_port(device, port->port_num, 0, &port_modify);
3237		ib_unregister_mad_agent(port->mad_agent);
3238	}
3239error1:
3240	kfree(cm_dev);
3241}
3242
3243static void cm_remove_one(struct ib_device *device)
3244{
3245	struct cm_device *cm_dev;
3246	struct cm_port *port;
3247	struct ib_port_modify port_modify = {
3248		.clr_port_cap_mask = IB_PORT_CM_SUP
3249	};
3250	unsigned long flags;
3251	int i;
3252
3253	cm_dev = ib_get_client_data(device, &cm_client);
3254	if (!cm_dev)
3255		return;
3256
3257	write_lock_irqsave(&cm.device_lock, flags);
3258	list_del(&cm_dev->list);
3259	write_unlock_irqrestore(&cm.device_lock, flags);
3260
3261	for (i = 1; i <= device->phys_port_cnt; i++) {
3262		port = &cm_dev->port[i-1];
3263		ib_modify_port(device, port->port_num, 0, &port_modify);
3264		ib_unregister_mad_agent(port->mad_agent);
3265	}
3266	kfree(cm_dev);
3267}
3268
3269static int __init ib_cm_init(void)
3270{
3271	int ret;
3272
3273	memset(&cm, 0, sizeof cm);
3274	INIT_LIST_HEAD(&cm.device_list);
3275	rwlock_init(&cm.device_lock);
3276	spin_lock_init(&cm.lock);
3277	cm.listen_service_table = RB_ROOT;
3278	cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
3279	cm.remote_id_table = RB_ROOT;
3280	cm.remote_qp_table = RB_ROOT;
3281	cm.remote_sidr_table = RB_ROOT;
3282	idr_init(&cm.local_id_table);
3283	idr_pre_get(&cm.local_id_table, GFP_KERNEL);
3284
3285	cm.wq = create_workqueue("ib_cm");
3286	if (!cm.wq)
3287		return -ENOMEM;
3288
3289	ret = ib_register_client(&cm_client);
3290	if (ret)
3291		goto error;
3292
3293	return 0;
3294error:
3295	destroy_workqueue(cm.wq);
3296	return ret;
3297}
3298
3299static void __exit ib_cm_cleanup(void)
3300{
3301	flush_workqueue(cm.wq);
3302	destroy_workqueue(cm.wq);
3303	ib_unregister_client(&cm_client);
3304	idr_destroy(&cm.local_id_table);
3305}
3306
3307module_init(ib_cm_init);
3308module_exit(ib_cm_cleanup);
3309
3310