cm.c revision 858119e159384308a5dde67776691a2ebf70df0f
1/*
2 * Copyright (c) 2004, 2005 Intel Corporation.  All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation.  All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses.  You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 *     Redistribution and use in source and binary forms, with or
14 *     without modification, are permitted provided that the following
15 *     conditions are met:
16 *
17 *      - Redistributions of source code must retain the above
18 *        copyright notice, this list of conditions and the following
19 *        disclaimer.
20 *
21 *      - Redistributions in binary form must reproduce the above
22 *        copyright notice, this list of conditions and the following
23 *        disclaimer in the documentation and/or other materials
24 *        provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 *
35 * $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $
36 */
37#include <linux/dma-mapping.h>
38#include <linux/err.h>
39#include <linux/idr.h>
40#include <linux/interrupt.h>
41#include <linux/pci.h>
42#include <linux/rbtree.h>
43#include <linux/spinlock.h>
44#include <linux/workqueue.h>
45
46#include <rdma/ib_cache.h>
47#include <rdma/ib_cm.h>
48#include "cm_msgs.h"
49
50MODULE_AUTHOR("Sean Hefty");
51MODULE_DESCRIPTION("InfiniBand CM");
52MODULE_LICENSE("Dual BSD/GPL");
53
54static void cm_add_one(struct ib_device *device);
55static void cm_remove_one(struct ib_device *device);
56
57static struct ib_client cm_client = {
58	.name   = "cm",
59	.add    = cm_add_one,
60	.remove = cm_remove_one
61};
62
63static struct ib_cm {
64	spinlock_t lock;
65	struct list_head device_list;
66	rwlock_t device_lock;
67	struct rb_root listen_service_table;
68	u64 listen_service_id;
69	/* struct rb_root peer_service_table; todo: fix peer to peer */
70	struct rb_root remote_qp_table;
71	struct rb_root remote_id_table;
72	struct rb_root remote_sidr_table;
73	struct idr local_id_table;
74	struct workqueue_struct *wq;
75} cm;
76
77struct cm_port {
78	struct cm_device *cm_dev;
79	struct ib_mad_agent *mad_agent;
80	u8 port_num;
81};
82
83struct cm_device {
84	struct list_head list;
85	struct ib_device *device;
86	__be64 ca_guid;
87	struct cm_port port[0];
88};
89
90struct cm_av {
91	struct cm_port *port;
92	union ib_gid dgid;
93	struct ib_ah_attr ah_attr;
94	u16 pkey_index;
95	u8 packet_life_time;
96};
97
98struct cm_work {
99	struct work_struct work;
100	struct list_head list;
101	struct cm_port *port;
102	struct ib_mad_recv_wc *mad_recv_wc;	/* Received MADs */
103	__be32 local_id;			/* Established / timewait */
104	__be32 remote_id;
105	struct ib_cm_event cm_event;
106	struct ib_sa_path_rec path[0];
107};
108
109struct cm_timewait_info {
110	struct cm_work work;			/* Must be first. */
111	struct rb_node remote_qp_node;
112	struct rb_node remote_id_node;
113	__be64 remote_ca_guid;
114	__be32 remote_qpn;
115	u8 inserted_remote_qp;
116	u8 inserted_remote_id;
117};
118
119struct cm_id_private {
120	struct ib_cm_id	id;
121
122	struct rb_node service_node;
123	struct rb_node sidr_id_node;
124	spinlock_t lock;
125	wait_queue_head_t wait;
126	atomic_t refcount;
127
128	struct ib_mad_send_buf *msg;
129	struct cm_timewait_info *timewait_info;
130	/* todo: use alternate port on send failure */
131	struct cm_av av;
132	struct cm_av alt_av;
133
134	void *private_data;
135	__be64 tid;
136	__be32 local_qpn;
137	__be32 remote_qpn;
138	enum ib_qp_type qp_type;
139	__be32 sq_psn;
140	__be32 rq_psn;
141	int timeout_ms;
142	enum ib_mtu path_mtu;
143	u8 private_data_len;
144	u8 max_cm_retries;
145	u8 peer_to_peer;
146	u8 responder_resources;
147	u8 initiator_depth;
148	u8 local_ack_timeout;
149	u8 retry_count;
150	u8 rnr_retry_count;
151	u8 service_timeout;
152
153	struct list_head work_list;
154	atomic_t work_count;
155};
156
157static void cm_work_handler(void *data);
158
159static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
160{
161	if (atomic_dec_and_test(&cm_id_priv->refcount))
162		wake_up(&cm_id_priv->wait);
163}
164
165static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
166			struct ib_mad_send_buf **msg)
167{
168	struct ib_mad_agent *mad_agent;
169	struct ib_mad_send_buf *m;
170	struct ib_ah *ah;
171
172	mad_agent = cm_id_priv->av.port->mad_agent;
173	ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
174	if (IS_ERR(ah))
175		return PTR_ERR(ah);
176
177	m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
178			       cm_id_priv->av.pkey_index,
179			       0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
180			       GFP_ATOMIC);
181	if (IS_ERR(m)) {
182		ib_destroy_ah(ah);
183		return PTR_ERR(m);
184	}
185
186	/* Timeout set by caller if response is expected. */
187	m->ah = ah;
188	m->retries = cm_id_priv->max_cm_retries;
189
190	atomic_inc(&cm_id_priv->refcount);
191	m->context[0] = cm_id_priv;
192	*msg = m;
193	return 0;
194}
195
196static int cm_alloc_response_msg(struct cm_port *port,
197				 struct ib_mad_recv_wc *mad_recv_wc,
198				 struct ib_mad_send_buf **msg)
199{
200	struct ib_mad_send_buf *m;
201	struct ib_ah *ah;
202
203	ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
204				  mad_recv_wc->recv_buf.grh, port->port_num);
205	if (IS_ERR(ah))
206		return PTR_ERR(ah);
207
208	m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
209			       0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
210			       GFP_ATOMIC);
211	if (IS_ERR(m)) {
212		ib_destroy_ah(ah);
213		return PTR_ERR(m);
214	}
215	m->ah = ah;
216	*msg = m;
217	return 0;
218}
219
220static void cm_free_msg(struct ib_mad_send_buf *msg)
221{
222	ib_destroy_ah(msg->ah);
223	if (msg->context[0])
224		cm_deref_id(msg->context[0]);
225	ib_free_send_mad(msg);
226}
227
228static void * cm_copy_private_data(const void *private_data,
229				   u8 private_data_len)
230{
231	void *data;
232
233	if (!private_data || !private_data_len)
234		return NULL;
235
236	data = kmalloc(private_data_len, GFP_KERNEL);
237	if (!data)
238		return ERR_PTR(-ENOMEM);
239
240	memcpy(data, private_data, private_data_len);
241	return data;
242}
243
244static void cm_set_private_data(struct cm_id_private *cm_id_priv,
245				 void *private_data, u8 private_data_len)
246{
247	if (cm_id_priv->private_data && cm_id_priv->private_data_len)
248		kfree(cm_id_priv->private_data);
249
250	cm_id_priv->private_data = private_data;
251	cm_id_priv->private_data_len = private_data_len;
252}
253
254static void cm_set_ah_attr(struct ib_ah_attr *ah_attr, u8 port_num,
255			   u16 dlid, u8 sl, u16 src_path_bits)
256{
257	memset(ah_attr, 0, sizeof ah_attr);
258	ah_attr->dlid = dlid;
259	ah_attr->sl = sl;
260	ah_attr->src_path_bits = src_path_bits;
261	ah_attr->port_num = port_num;
262}
263
264static void cm_init_av_for_response(struct cm_port *port,
265				    struct ib_wc *wc, struct cm_av *av)
266{
267	av->port = port;
268	av->pkey_index = wc->pkey_index;
269	cm_set_ah_attr(&av->ah_attr, port->port_num, wc->slid,
270		       wc->sl, wc->dlid_path_bits);
271}
272
273static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
274{
275	struct cm_device *cm_dev;
276	struct cm_port *port = NULL;
277	unsigned long flags;
278	int ret;
279	u8 p;
280
281	read_lock_irqsave(&cm.device_lock, flags);
282	list_for_each_entry(cm_dev, &cm.device_list, list) {
283		if (!ib_find_cached_gid(cm_dev->device, &path->sgid,
284					&p, NULL)) {
285			port = &cm_dev->port[p-1];
286			break;
287		}
288	}
289	read_unlock_irqrestore(&cm.device_lock, flags);
290
291	if (!port)
292		return -EINVAL;
293
294	ret = ib_find_cached_pkey(cm_dev->device, port->port_num,
295				  be16_to_cpu(path->pkey), &av->pkey_index);
296	if (ret)
297		return ret;
298
299	av->port = port;
300	cm_set_ah_attr(&av->ah_attr, av->port->port_num,
301		       be16_to_cpu(path->dlid), path->sl,
302		       be16_to_cpu(path->slid) & 0x7F);
303	av->packet_life_time = path->packet_life_time;
304	return 0;
305}
306
307static int cm_alloc_id(struct cm_id_private *cm_id_priv)
308{
309	unsigned long flags;
310	int ret;
311	static int next_id;
312
313	do {
314		spin_lock_irqsave(&cm.lock, flags);
315		ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, next_id++,
316					(__force int *) &cm_id_priv->id.local_id);
317		spin_unlock_irqrestore(&cm.lock, flags);
318	} while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
319	return ret;
320}
321
322static void cm_free_id(__be32 local_id)
323{
324	unsigned long flags;
325
326	spin_lock_irqsave(&cm.lock, flags);
327	idr_remove(&cm.local_id_table, (__force int) local_id);
328	spin_unlock_irqrestore(&cm.lock, flags);
329}
330
331static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
332{
333	struct cm_id_private *cm_id_priv;
334
335	cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id);
336	if (cm_id_priv) {
337		if (cm_id_priv->id.remote_id == remote_id)
338			atomic_inc(&cm_id_priv->refcount);
339		else
340			cm_id_priv = NULL;
341	}
342
343	return cm_id_priv;
344}
345
346static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
347{
348	struct cm_id_private *cm_id_priv;
349	unsigned long flags;
350
351	spin_lock_irqsave(&cm.lock, flags);
352	cm_id_priv = cm_get_id(local_id, remote_id);
353	spin_unlock_irqrestore(&cm.lock, flags);
354
355	return cm_id_priv;
356}
357
358static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
359{
360	struct rb_node **link = &cm.listen_service_table.rb_node;
361	struct rb_node *parent = NULL;
362	struct cm_id_private *cur_cm_id_priv;
363	__be64 service_id = cm_id_priv->id.service_id;
364	__be64 service_mask = cm_id_priv->id.service_mask;
365
366	while (*link) {
367		parent = *link;
368		cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
369					  service_node);
370		if ((cur_cm_id_priv->id.service_mask & service_id) ==
371		    (service_mask & cur_cm_id_priv->id.service_id) &&
372		    (cm_id_priv->id.device == cur_cm_id_priv->id.device))
373			return cur_cm_id_priv;
374
375		if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
376			link = &(*link)->rb_left;
377		else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
378			link = &(*link)->rb_right;
379		else if (service_id < cur_cm_id_priv->id.service_id)
380			link = &(*link)->rb_left;
381		else
382			link = &(*link)->rb_right;
383	}
384	rb_link_node(&cm_id_priv->service_node, parent, link);
385	rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
386	return NULL;
387}
388
389static struct cm_id_private * cm_find_listen(struct ib_device *device,
390					     __be64 service_id)
391{
392	struct rb_node *node = cm.listen_service_table.rb_node;
393	struct cm_id_private *cm_id_priv;
394
395	while (node) {
396		cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
397		if ((cm_id_priv->id.service_mask & service_id) ==
398		     cm_id_priv->id.service_id &&
399		    (cm_id_priv->id.device == device))
400			return cm_id_priv;
401
402		if (device < cm_id_priv->id.device)
403			node = node->rb_left;
404		else if (device > cm_id_priv->id.device)
405			node = node->rb_right;
406		else if (service_id < cm_id_priv->id.service_id)
407			node = node->rb_left;
408		else
409			node = node->rb_right;
410	}
411	return NULL;
412}
413
414static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
415						     *timewait_info)
416{
417	struct rb_node **link = &cm.remote_id_table.rb_node;
418	struct rb_node *parent = NULL;
419	struct cm_timewait_info *cur_timewait_info;
420	__be64 remote_ca_guid = timewait_info->remote_ca_guid;
421	__be32 remote_id = timewait_info->work.remote_id;
422
423	while (*link) {
424		parent = *link;
425		cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
426					     remote_id_node);
427		if (remote_id < cur_timewait_info->work.remote_id)
428			link = &(*link)->rb_left;
429		else if (remote_id > cur_timewait_info->work.remote_id)
430			link = &(*link)->rb_right;
431		else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
432			link = &(*link)->rb_left;
433		else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
434			link = &(*link)->rb_right;
435		else
436			return cur_timewait_info;
437	}
438	timewait_info->inserted_remote_id = 1;
439	rb_link_node(&timewait_info->remote_id_node, parent, link);
440	rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
441	return NULL;
442}
443
444static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
445						   __be32 remote_id)
446{
447	struct rb_node *node = cm.remote_id_table.rb_node;
448	struct cm_timewait_info *timewait_info;
449
450	while (node) {
451		timewait_info = rb_entry(node, struct cm_timewait_info,
452					 remote_id_node);
453		if (remote_id < timewait_info->work.remote_id)
454			node = node->rb_left;
455		else if (remote_id > timewait_info->work.remote_id)
456			node = node->rb_right;
457		else if (remote_ca_guid < timewait_info->remote_ca_guid)
458			node = node->rb_left;
459		else if (remote_ca_guid > timewait_info->remote_ca_guid)
460			node = node->rb_right;
461		else
462			return timewait_info;
463	}
464	return NULL;
465}
466
467static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
468						      *timewait_info)
469{
470	struct rb_node **link = &cm.remote_qp_table.rb_node;
471	struct rb_node *parent = NULL;
472	struct cm_timewait_info *cur_timewait_info;
473	__be64 remote_ca_guid = timewait_info->remote_ca_guid;
474	__be32 remote_qpn = timewait_info->remote_qpn;
475
476	while (*link) {
477		parent = *link;
478		cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
479					     remote_qp_node);
480		if (remote_qpn < cur_timewait_info->remote_qpn)
481			link = &(*link)->rb_left;
482		else if (remote_qpn > cur_timewait_info->remote_qpn)
483			link = &(*link)->rb_right;
484		else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
485			link = &(*link)->rb_left;
486		else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
487			link = &(*link)->rb_right;
488		else
489			return cur_timewait_info;
490	}
491	timewait_info->inserted_remote_qp = 1;
492	rb_link_node(&timewait_info->remote_qp_node, parent, link);
493	rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
494	return NULL;
495}
496
497static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
498						    *cm_id_priv)
499{
500	struct rb_node **link = &cm.remote_sidr_table.rb_node;
501	struct rb_node *parent = NULL;
502	struct cm_id_private *cur_cm_id_priv;
503	union ib_gid *port_gid = &cm_id_priv->av.dgid;
504	__be32 remote_id = cm_id_priv->id.remote_id;
505
506	while (*link) {
507		parent = *link;
508		cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
509					  sidr_id_node);
510		if (remote_id < cur_cm_id_priv->id.remote_id)
511			link = &(*link)->rb_left;
512		else if (remote_id > cur_cm_id_priv->id.remote_id)
513			link = &(*link)->rb_right;
514		else {
515			int cmp;
516			cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
517				     sizeof *port_gid);
518			if (cmp < 0)
519				link = &(*link)->rb_left;
520			else if (cmp > 0)
521				link = &(*link)->rb_right;
522			else
523				return cur_cm_id_priv;
524		}
525	}
526	rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
527	rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
528	return NULL;
529}
530
531static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
532			       enum ib_cm_sidr_status status)
533{
534	struct ib_cm_sidr_rep_param param;
535
536	memset(&param, 0, sizeof param);
537	param.status = status;
538	ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
539}
540
541struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
542				 ib_cm_handler cm_handler,
543				 void *context)
544{
545	struct cm_id_private *cm_id_priv;
546	int ret;
547
548	cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
549	if (!cm_id_priv)
550		return ERR_PTR(-ENOMEM);
551
552	cm_id_priv->id.state = IB_CM_IDLE;
553	cm_id_priv->id.device = device;
554	cm_id_priv->id.cm_handler = cm_handler;
555	cm_id_priv->id.context = context;
556	cm_id_priv->id.remote_cm_qpn = 1;
557	ret = cm_alloc_id(cm_id_priv);
558	if (ret)
559		goto error;
560
561	spin_lock_init(&cm_id_priv->lock);
562	init_waitqueue_head(&cm_id_priv->wait);
563	INIT_LIST_HEAD(&cm_id_priv->work_list);
564	atomic_set(&cm_id_priv->work_count, -1);
565	atomic_set(&cm_id_priv->refcount, 1);
566	return &cm_id_priv->id;
567
568error:
569	kfree(cm_id_priv);
570	return ERR_PTR(-ENOMEM);
571}
572EXPORT_SYMBOL(ib_create_cm_id);
573
574static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
575{
576	struct cm_work *work;
577
578	if (list_empty(&cm_id_priv->work_list))
579		return NULL;
580
581	work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
582	list_del(&work->list);
583	return work;
584}
585
586static void cm_free_work(struct cm_work *work)
587{
588	if (work->mad_recv_wc)
589		ib_free_recv_mad(work->mad_recv_wc);
590	kfree(work);
591}
592
593static inline int cm_convert_to_ms(int iba_time)
594{
595	/* approximate conversion to ms from 4.096us x 2^iba_time */
596	return 1 << max(iba_time - 8, 0);
597}
598
599static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
600{
601	unsigned long flags;
602
603	if (!timewait_info->inserted_remote_id &&
604	    !timewait_info->inserted_remote_qp)
605	    return;
606
607	spin_lock_irqsave(&cm.lock, flags);
608	if (timewait_info->inserted_remote_id) {
609		rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
610		timewait_info->inserted_remote_id = 0;
611	}
612
613	if (timewait_info->inserted_remote_qp) {
614		rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
615		timewait_info->inserted_remote_qp = 0;
616	}
617	spin_unlock_irqrestore(&cm.lock, flags);
618}
619
620static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
621{
622	struct cm_timewait_info *timewait_info;
623
624	timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
625	if (!timewait_info)
626		return ERR_PTR(-ENOMEM);
627
628	timewait_info->work.local_id = local_id;
629	INIT_WORK(&timewait_info->work.work, cm_work_handler,
630		  &timewait_info->work);
631	timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
632	return timewait_info;
633}
634
635static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
636{
637	int wait_time;
638
639	/*
640	 * The cm_id could be destroyed by the user before we exit timewait.
641	 * To protect against this, we search for the cm_id after exiting
642	 * timewait before notifying the user that we've exited timewait.
643	 */
644	cm_id_priv->id.state = IB_CM_TIMEWAIT;
645	wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout);
646	queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
647			   msecs_to_jiffies(wait_time));
648	cm_id_priv->timewait_info = NULL;
649}
650
651static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
652{
653	cm_id_priv->id.state = IB_CM_IDLE;
654	if (cm_id_priv->timewait_info) {
655		cm_cleanup_timewait(cm_id_priv->timewait_info);
656		kfree(cm_id_priv->timewait_info);
657		cm_id_priv->timewait_info = NULL;
658	}
659}
660
661void ib_destroy_cm_id(struct ib_cm_id *cm_id)
662{
663	struct cm_id_private *cm_id_priv;
664	struct cm_work *work;
665	unsigned long flags;
666
667	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
668retest:
669	spin_lock_irqsave(&cm_id_priv->lock, flags);
670	switch (cm_id->state) {
671	case IB_CM_LISTEN:
672		cm_id->state = IB_CM_IDLE;
673		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
674		spin_lock_irqsave(&cm.lock, flags);
675		rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
676		spin_unlock_irqrestore(&cm.lock, flags);
677		break;
678	case IB_CM_SIDR_REQ_SENT:
679		cm_id->state = IB_CM_IDLE;
680		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
681		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
682		break;
683	case IB_CM_SIDR_REQ_RCVD:
684		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
685		cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
686		break;
687	case IB_CM_REQ_SENT:
688		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
689		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
690		ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
691			       &cm_id_priv->av.port->cm_dev->ca_guid,
692			       sizeof cm_id_priv->av.port->cm_dev->ca_guid,
693			       NULL, 0);
694		break;
695	case IB_CM_MRA_REQ_RCVD:
696	case IB_CM_REP_SENT:
697	case IB_CM_MRA_REP_RCVD:
698		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
699		/* Fall through */
700	case IB_CM_REQ_RCVD:
701	case IB_CM_MRA_REQ_SENT:
702	case IB_CM_REP_RCVD:
703	case IB_CM_MRA_REP_SENT:
704		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
705		ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
706			       NULL, 0, NULL, 0);
707		break;
708	case IB_CM_ESTABLISHED:
709		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
710		ib_send_cm_dreq(cm_id, NULL, 0);
711		goto retest;
712	case IB_CM_DREQ_SENT:
713		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
714		cm_enter_timewait(cm_id_priv);
715		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
716		break;
717	case IB_CM_DREQ_RCVD:
718		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
719		ib_send_cm_drep(cm_id, NULL, 0);
720		break;
721	default:
722		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
723		break;
724	}
725
726	cm_free_id(cm_id->local_id);
727	atomic_dec(&cm_id_priv->refcount);
728	wait_event(cm_id_priv->wait, !atomic_read(&cm_id_priv->refcount));
729	while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
730		cm_free_work(work);
731	if (cm_id_priv->private_data && cm_id_priv->private_data_len)
732		kfree(cm_id_priv->private_data);
733	kfree(cm_id_priv);
734}
735EXPORT_SYMBOL(ib_destroy_cm_id);
736
737int ib_cm_listen(struct ib_cm_id *cm_id,
738		 __be64 service_id,
739		 __be64 service_mask)
740{
741	struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
742	unsigned long flags;
743	int ret = 0;
744
745	service_mask = service_mask ? service_mask :
746		       __constant_cpu_to_be64(~0ULL);
747	service_id &= service_mask;
748	if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
749	    (service_id != IB_CM_ASSIGN_SERVICE_ID))
750		return -EINVAL;
751
752	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
753	BUG_ON(cm_id->state != IB_CM_IDLE);
754
755	cm_id->state = IB_CM_LISTEN;
756
757	spin_lock_irqsave(&cm.lock, flags);
758	if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
759		cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
760		cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
761	} else {
762		cm_id->service_id = service_id;
763		cm_id->service_mask = service_mask;
764	}
765	cur_cm_id_priv = cm_insert_listen(cm_id_priv);
766	spin_unlock_irqrestore(&cm.lock, flags);
767
768	if (cur_cm_id_priv) {
769		cm_id->state = IB_CM_IDLE;
770		ret = -EBUSY;
771	}
772	return ret;
773}
774EXPORT_SYMBOL(ib_cm_listen);
775
776static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
777			  enum cm_msg_sequence msg_seq)
778{
779	u64 hi_tid, low_tid;
780
781	hi_tid   = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
782	low_tid  = (u64) ((__force u32)cm_id_priv->id.local_id |
783			  (msg_seq << 30));
784	return cpu_to_be64(hi_tid | low_tid);
785}
786
787static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
788			      __be16 attr_id, __be64 tid)
789{
790	hdr->base_version  = IB_MGMT_BASE_VERSION;
791	hdr->mgmt_class	   = IB_MGMT_CLASS_CM;
792	hdr->class_version = IB_CM_CLASS_VERSION;
793	hdr->method	   = IB_MGMT_METHOD_SEND;
794	hdr->attr_id	   = attr_id;
795	hdr->tid	   = tid;
796}
797
798static void cm_format_req(struct cm_req_msg *req_msg,
799			  struct cm_id_private *cm_id_priv,
800			  struct ib_cm_req_param *param)
801{
802	cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
803			  cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
804
805	req_msg->local_comm_id = cm_id_priv->id.local_id;
806	req_msg->service_id = param->service_id;
807	req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
808	cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
809	cm_req_set_resp_res(req_msg, param->responder_resources);
810	cm_req_set_init_depth(req_msg, param->initiator_depth);
811	cm_req_set_remote_resp_timeout(req_msg,
812				       param->remote_cm_response_timeout);
813	cm_req_set_qp_type(req_msg, param->qp_type);
814	cm_req_set_flow_ctrl(req_msg, param->flow_control);
815	cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
816	cm_req_set_local_resp_timeout(req_msg,
817				      param->local_cm_response_timeout);
818	cm_req_set_retry_count(req_msg, param->retry_count);
819	req_msg->pkey = param->primary_path->pkey;
820	cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
821	cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
822	cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
823	cm_req_set_srq(req_msg, param->srq);
824
825	req_msg->primary_local_lid = param->primary_path->slid;
826	req_msg->primary_remote_lid = param->primary_path->dlid;
827	req_msg->primary_local_gid = param->primary_path->sgid;
828	req_msg->primary_remote_gid = param->primary_path->dgid;
829	cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label);
830	cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate);
831	req_msg->primary_traffic_class = param->primary_path->traffic_class;
832	req_msg->primary_hop_limit = param->primary_path->hop_limit;
833	cm_req_set_primary_sl(req_msg, param->primary_path->sl);
834	cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */
835	cm_req_set_primary_local_ack_timeout(req_msg,
836		min(31, param->primary_path->packet_life_time + 1));
837
838	if (param->alternate_path) {
839		req_msg->alt_local_lid = param->alternate_path->slid;
840		req_msg->alt_remote_lid = param->alternate_path->dlid;
841		req_msg->alt_local_gid = param->alternate_path->sgid;
842		req_msg->alt_remote_gid = param->alternate_path->dgid;
843		cm_req_set_alt_flow_label(req_msg,
844					  param->alternate_path->flow_label);
845		cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate);
846		req_msg->alt_traffic_class = param->alternate_path->traffic_class;
847		req_msg->alt_hop_limit = param->alternate_path->hop_limit;
848		cm_req_set_alt_sl(req_msg, param->alternate_path->sl);
849		cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */
850		cm_req_set_alt_local_ack_timeout(req_msg,
851			min(31, param->alternate_path->packet_life_time + 1));
852	}
853
854	if (param->private_data && param->private_data_len)
855		memcpy(req_msg->private_data, param->private_data,
856		       param->private_data_len);
857}
858
859static int cm_validate_req_param(struct ib_cm_req_param *param)
860{
861	/* peer-to-peer not supported */
862	if (param->peer_to_peer)
863		return -EINVAL;
864
865	if (!param->primary_path)
866		return -EINVAL;
867
868	if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC)
869		return -EINVAL;
870
871	if (param->private_data &&
872	    param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
873		return -EINVAL;
874
875	if (param->alternate_path &&
876	    (param->alternate_path->pkey != param->primary_path->pkey ||
877	     param->alternate_path->mtu != param->primary_path->mtu))
878		return -EINVAL;
879
880	return 0;
881}
882
883int ib_send_cm_req(struct ib_cm_id *cm_id,
884		   struct ib_cm_req_param *param)
885{
886	struct cm_id_private *cm_id_priv;
887	struct cm_req_msg *req_msg;
888	unsigned long flags;
889	int ret;
890
891	ret = cm_validate_req_param(param);
892	if (ret)
893		return ret;
894
895	/* Verify that we're not in timewait. */
896	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
897	spin_lock_irqsave(&cm_id_priv->lock, flags);
898	if (cm_id->state != IB_CM_IDLE) {
899		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
900		ret = -EINVAL;
901		goto out;
902	}
903	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
904
905	cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
906							    id.local_id);
907	if (IS_ERR(cm_id_priv->timewait_info))
908		goto out;
909
910	ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
911	if (ret)
912		goto error1;
913	if (param->alternate_path) {
914		ret = cm_init_av_by_path(param->alternate_path,
915					 &cm_id_priv->alt_av);
916		if (ret)
917			goto error1;
918	}
919	cm_id->service_id = param->service_id;
920	cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
921	cm_id_priv->timeout_ms = cm_convert_to_ms(
922				    param->primary_path->packet_life_time) * 2 +
923				 cm_convert_to_ms(
924				    param->remote_cm_response_timeout);
925	cm_id_priv->max_cm_retries = param->max_cm_retries;
926	cm_id_priv->initiator_depth = param->initiator_depth;
927	cm_id_priv->responder_resources = param->responder_resources;
928	cm_id_priv->retry_count = param->retry_count;
929	cm_id_priv->path_mtu = param->primary_path->mtu;
930	cm_id_priv->qp_type = param->qp_type;
931
932	ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
933	if (ret)
934		goto error1;
935
936	req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
937	cm_format_req(req_msg, cm_id_priv, param);
938	cm_id_priv->tid = req_msg->hdr.tid;
939	cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
940	cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
941
942	cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
943	cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
944	cm_id_priv->local_ack_timeout =
945				cm_req_get_primary_local_ack_timeout(req_msg);
946
947	spin_lock_irqsave(&cm_id_priv->lock, flags);
948	ret = ib_post_send_mad(cm_id_priv->msg, NULL);
949	if (ret) {
950		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
951		goto error2;
952	}
953	BUG_ON(cm_id->state != IB_CM_IDLE);
954	cm_id->state = IB_CM_REQ_SENT;
955	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
956	return 0;
957
958error2:	cm_free_msg(cm_id_priv->msg);
959error1:	kfree(cm_id_priv->timewait_info);
960out:	return ret;
961}
962EXPORT_SYMBOL(ib_send_cm_req);
963
964static int cm_issue_rej(struct cm_port *port,
965			struct ib_mad_recv_wc *mad_recv_wc,
966			enum ib_cm_rej_reason reason,
967			enum cm_msg_response msg_rejected,
968			void *ari, u8 ari_length)
969{
970	struct ib_mad_send_buf *msg = NULL;
971	struct cm_rej_msg *rej_msg, *rcv_msg;
972	int ret;
973
974	ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
975	if (ret)
976		return ret;
977
978	/* We just need common CM header information.  Cast to any message. */
979	rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
980	rej_msg = (struct cm_rej_msg *) msg->mad;
981
982	cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
983	rej_msg->remote_comm_id = rcv_msg->local_comm_id;
984	rej_msg->local_comm_id = rcv_msg->remote_comm_id;
985	cm_rej_set_msg_rejected(rej_msg, msg_rejected);
986	rej_msg->reason = cpu_to_be16(reason);
987
988	if (ari && ari_length) {
989		cm_rej_set_reject_info_len(rej_msg, ari_length);
990		memcpy(rej_msg->ari, ari, ari_length);
991	}
992
993	ret = ib_post_send_mad(msg, NULL);
994	if (ret)
995		cm_free_msg(msg);
996
997	return ret;
998}
999
1000static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
1001				    __be32 local_qpn, __be32 remote_qpn)
1002{
1003	return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
1004		((local_ca_guid == remote_ca_guid) &&
1005		 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
1006}
1007
1008static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1009					    struct ib_sa_path_rec *primary_path,
1010					    struct ib_sa_path_rec *alt_path)
1011{
1012	memset(primary_path, 0, sizeof *primary_path);
1013	primary_path->dgid = req_msg->primary_local_gid;
1014	primary_path->sgid = req_msg->primary_remote_gid;
1015	primary_path->dlid = req_msg->primary_local_lid;
1016	primary_path->slid = req_msg->primary_remote_lid;
1017	primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1018	primary_path->hop_limit = req_msg->primary_hop_limit;
1019	primary_path->traffic_class = req_msg->primary_traffic_class;
1020	primary_path->reversible = 1;
1021	primary_path->pkey = req_msg->pkey;
1022	primary_path->sl = cm_req_get_primary_sl(req_msg);
1023	primary_path->mtu_selector = IB_SA_EQ;
1024	primary_path->mtu = cm_req_get_path_mtu(req_msg);
1025	primary_path->rate_selector = IB_SA_EQ;
1026	primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1027	primary_path->packet_life_time_selector = IB_SA_EQ;
1028	primary_path->packet_life_time =
1029		cm_req_get_primary_local_ack_timeout(req_msg);
1030	primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1031
1032	if (req_msg->alt_local_lid) {
1033		memset(alt_path, 0, sizeof *alt_path);
1034		alt_path->dgid = req_msg->alt_local_gid;
1035		alt_path->sgid = req_msg->alt_remote_gid;
1036		alt_path->dlid = req_msg->alt_local_lid;
1037		alt_path->slid = req_msg->alt_remote_lid;
1038		alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1039		alt_path->hop_limit = req_msg->alt_hop_limit;
1040		alt_path->traffic_class = req_msg->alt_traffic_class;
1041		alt_path->reversible = 1;
1042		alt_path->pkey = req_msg->pkey;
1043		alt_path->sl = cm_req_get_alt_sl(req_msg);
1044		alt_path->mtu_selector = IB_SA_EQ;
1045		alt_path->mtu = cm_req_get_path_mtu(req_msg);
1046		alt_path->rate_selector = IB_SA_EQ;
1047		alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1048		alt_path->packet_life_time_selector = IB_SA_EQ;
1049		alt_path->packet_life_time =
1050			cm_req_get_alt_local_ack_timeout(req_msg);
1051		alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1052	}
1053}
1054
1055static void cm_format_req_event(struct cm_work *work,
1056				struct cm_id_private *cm_id_priv,
1057				struct ib_cm_id *listen_id)
1058{
1059	struct cm_req_msg *req_msg;
1060	struct ib_cm_req_event_param *param;
1061
1062	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1063	param = &work->cm_event.param.req_rcvd;
1064	param->listen_id = listen_id;
1065	param->port = cm_id_priv->av.port->port_num;
1066	param->primary_path = &work->path[0];
1067	if (req_msg->alt_local_lid)
1068		param->alternate_path = &work->path[1];
1069	else
1070		param->alternate_path = NULL;
1071	param->remote_ca_guid = req_msg->local_ca_guid;
1072	param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1073	param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1074	param->qp_type = cm_req_get_qp_type(req_msg);
1075	param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1076	param->responder_resources = cm_req_get_init_depth(req_msg);
1077	param->initiator_depth = cm_req_get_resp_res(req_msg);
1078	param->local_cm_response_timeout =
1079					cm_req_get_remote_resp_timeout(req_msg);
1080	param->flow_control = cm_req_get_flow_ctrl(req_msg);
1081	param->remote_cm_response_timeout =
1082					cm_req_get_local_resp_timeout(req_msg);
1083	param->retry_count = cm_req_get_retry_count(req_msg);
1084	param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1085	param->srq = cm_req_get_srq(req_msg);
1086	work->cm_event.private_data = &req_msg->private_data;
1087}
1088
1089static void cm_process_work(struct cm_id_private *cm_id_priv,
1090			    struct cm_work *work)
1091{
1092	unsigned long flags;
1093	int ret;
1094
1095	/* We will typically only have the current event to report. */
1096	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1097	cm_free_work(work);
1098
1099	while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1100		spin_lock_irqsave(&cm_id_priv->lock, flags);
1101		work = cm_dequeue_work(cm_id_priv);
1102		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1103		BUG_ON(!work);
1104		ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1105						&work->cm_event);
1106		cm_free_work(work);
1107	}
1108	cm_deref_id(cm_id_priv);
1109	if (ret)
1110		ib_destroy_cm_id(&cm_id_priv->id);
1111}
1112
1113static void cm_format_mra(struct cm_mra_msg *mra_msg,
1114			  struct cm_id_private *cm_id_priv,
1115			  enum cm_msg_response msg_mraed, u8 service_timeout,
1116			  const void *private_data, u8 private_data_len)
1117{
1118	cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1119	cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1120	mra_msg->local_comm_id = cm_id_priv->id.local_id;
1121	mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1122	cm_mra_set_service_timeout(mra_msg, service_timeout);
1123
1124	if (private_data && private_data_len)
1125		memcpy(mra_msg->private_data, private_data, private_data_len);
1126}
1127
1128static void cm_format_rej(struct cm_rej_msg *rej_msg,
1129			  struct cm_id_private *cm_id_priv,
1130			  enum ib_cm_rej_reason reason,
1131			  void *ari,
1132			  u8 ari_length,
1133			  const void *private_data,
1134			  u8 private_data_len)
1135{
1136	cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1137	rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1138
1139	switch(cm_id_priv->id.state) {
1140	case IB_CM_REQ_RCVD:
1141		rej_msg->local_comm_id = 0;
1142		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1143		break;
1144	case IB_CM_MRA_REQ_SENT:
1145		rej_msg->local_comm_id = cm_id_priv->id.local_id;
1146		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1147		break;
1148	case IB_CM_REP_RCVD:
1149	case IB_CM_MRA_REP_SENT:
1150		rej_msg->local_comm_id = cm_id_priv->id.local_id;
1151		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1152		break;
1153	default:
1154		rej_msg->local_comm_id = cm_id_priv->id.local_id;
1155		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1156		break;
1157	}
1158
1159	rej_msg->reason = cpu_to_be16(reason);
1160	if (ari && ari_length) {
1161		cm_rej_set_reject_info_len(rej_msg, ari_length);
1162		memcpy(rej_msg->ari, ari, ari_length);
1163	}
1164
1165	if (private_data && private_data_len)
1166		memcpy(rej_msg->private_data, private_data, private_data_len);
1167}
1168
1169static void cm_dup_req_handler(struct cm_work *work,
1170			       struct cm_id_private *cm_id_priv)
1171{
1172	struct ib_mad_send_buf *msg = NULL;
1173	unsigned long flags;
1174	int ret;
1175
1176	/* Quick state check to discard duplicate REQs. */
1177	if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1178		return;
1179
1180	ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1181	if (ret)
1182		return;
1183
1184	spin_lock_irqsave(&cm_id_priv->lock, flags);
1185	switch (cm_id_priv->id.state) {
1186	case IB_CM_MRA_REQ_SENT:
1187		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1188			      CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1189			      cm_id_priv->private_data,
1190			      cm_id_priv->private_data_len);
1191		break;
1192	case IB_CM_TIMEWAIT:
1193		cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1194			      IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1195		break;
1196	default:
1197		goto unlock;
1198	}
1199	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1200
1201	ret = ib_post_send_mad(msg, NULL);
1202	if (ret)
1203		goto free;
1204	return;
1205
1206unlock:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1207free:	cm_free_msg(msg);
1208}
1209
1210static struct cm_id_private * cm_match_req(struct cm_work *work,
1211					   struct cm_id_private *cm_id_priv)
1212{
1213	struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1214	struct cm_timewait_info *timewait_info;
1215	struct cm_req_msg *req_msg;
1216	unsigned long flags;
1217
1218	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1219
1220	/* Check for duplicate REQ and stale connections. */
1221	spin_lock_irqsave(&cm.lock, flags);
1222	timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1223	if (!timewait_info)
1224		timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1225
1226	if (timewait_info) {
1227		cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1228					   timewait_info->work.remote_id);
1229		spin_unlock_irqrestore(&cm.lock, flags);
1230		if (cur_cm_id_priv) {
1231			cm_dup_req_handler(work, cur_cm_id_priv);
1232			cm_deref_id(cur_cm_id_priv);
1233		} else
1234			cm_issue_rej(work->port, work->mad_recv_wc,
1235				     IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1236				     NULL, 0);
1237		goto error;
1238	}
1239
1240	/* Find matching listen request. */
1241	listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
1242					   req_msg->service_id);
1243	if (!listen_cm_id_priv) {
1244		spin_unlock_irqrestore(&cm.lock, flags);
1245		cm_issue_rej(work->port, work->mad_recv_wc,
1246			     IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1247			     NULL, 0);
1248		goto error;
1249	}
1250	atomic_inc(&listen_cm_id_priv->refcount);
1251	atomic_inc(&cm_id_priv->refcount);
1252	cm_id_priv->id.state = IB_CM_REQ_RCVD;
1253	atomic_inc(&cm_id_priv->work_count);
1254	spin_unlock_irqrestore(&cm.lock, flags);
1255	return listen_cm_id_priv;
1256
1257error:	cm_cleanup_timewait(cm_id_priv->timewait_info);
1258	return NULL;
1259}
1260
1261static int cm_req_handler(struct cm_work *work)
1262{
1263	struct ib_cm_id *cm_id;
1264	struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1265	struct cm_req_msg *req_msg;
1266	int ret;
1267
1268	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1269
1270	cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
1271	if (IS_ERR(cm_id))
1272		return PTR_ERR(cm_id);
1273
1274	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1275	cm_id_priv->id.remote_id = req_msg->local_comm_id;
1276	cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1277				&cm_id_priv->av);
1278	cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1279							    id.local_id);
1280	if (IS_ERR(cm_id_priv->timewait_info)) {
1281		ret = PTR_ERR(cm_id_priv->timewait_info);
1282		goto error1;
1283	}
1284	cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1285	cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1286	cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1287
1288	listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1289	if (!listen_cm_id_priv) {
1290		ret = -EINVAL;
1291		goto error2;
1292	}
1293
1294	cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1295	cm_id_priv->id.context = listen_cm_id_priv->id.context;
1296	cm_id_priv->id.service_id = req_msg->service_id;
1297	cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
1298
1299	cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1300	ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
1301	if (ret)
1302		goto error3;
1303	if (req_msg->alt_local_lid) {
1304		ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
1305		if (ret)
1306			goto error3;
1307	}
1308	cm_id_priv->tid = req_msg->hdr.tid;
1309	cm_id_priv->timeout_ms = cm_convert_to_ms(
1310					cm_req_get_local_resp_timeout(req_msg));
1311	cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
1312	cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
1313	cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1314	cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1315	cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1316	cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1317	cm_id_priv->local_ack_timeout =
1318				cm_req_get_primary_local_ack_timeout(req_msg);
1319	cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1320	cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1321	cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
1322
1323	cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
1324	cm_process_work(cm_id_priv, work);
1325	cm_deref_id(listen_cm_id_priv);
1326	return 0;
1327
1328error3:	atomic_dec(&cm_id_priv->refcount);
1329	cm_deref_id(listen_cm_id_priv);
1330	cm_cleanup_timewait(cm_id_priv->timewait_info);
1331error2:	kfree(cm_id_priv->timewait_info);
1332	cm_id_priv->timewait_info = NULL;
1333error1:	ib_destroy_cm_id(&cm_id_priv->id);
1334	return ret;
1335}
1336
1337static void cm_format_rep(struct cm_rep_msg *rep_msg,
1338			  struct cm_id_private *cm_id_priv,
1339			  struct ib_cm_rep_param *param)
1340{
1341	cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
1342	rep_msg->local_comm_id = cm_id_priv->id.local_id;
1343	rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1344	cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
1345	cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1346	rep_msg->resp_resources = param->responder_resources;
1347	rep_msg->initiator_depth = param->initiator_depth;
1348	cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay);
1349	cm_rep_set_failover(rep_msg, param->failover_accepted);
1350	cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1351	cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1352	cm_rep_set_srq(rep_msg, param->srq);
1353	rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
1354
1355	if (param->private_data && param->private_data_len)
1356		memcpy(rep_msg->private_data, param->private_data,
1357		       param->private_data_len);
1358}
1359
1360int ib_send_cm_rep(struct ib_cm_id *cm_id,
1361		   struct ib_cm_rep_param *param)
1362{
1363	struct cm_id_private *cm_id_priv;
1364	struct ib_mad_send_buf *msg;
1365	struct cm_rep_msg *rep_msg;
1366	unsigned long flags;
1367	int ret;
1368
1369	if (param->private_data &&
1370	    param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
1371		return -EINVAL;
1372
1373	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1374	spin_lock_irqsave(&cm_id_priv->lock, flags);
1375	if (cm_id->state != IB_CM_REQ_RCVD &&
1376	    cm_id->state != IB_CM_MRA_REQ_SENT) {
1377		ret = -EINVAL;
1378		goto out;
1379	}
1380
1381	ret = cm_alloc_msg(cm_id_priv, &msg);
1382	if (ret)
1383		goto out;
1384
1385	rep_msg = (struct cm_rep_msg *) msg->mad;
1386	cm_format_rep(rep_msg, cm_id_priv, param);
1387	msg->timeout_ms = cm_id_priv->timeout_ms;
1388	msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
1389
1390	ret = ib_post_send_mad(msg, NULL);
1391	if (ret) {
1392		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1393		cm_free_msg(msg);
1394		return ret;
1395	}
1396
1397	cm_id->state = IB_CM_REP_SENT;
1398	cm_id_priv->msg = msg;
1399	cm_id_priv->initiator_depth = param->initiator_depth;
1400	cm_id_priv->responder_resources = param->responder_resources;
1401	cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
1402	cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg);
1403
1404out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1405	return ret;
1406}
1407EXPORT_SYMBOL(ib_send_cm_rep);
1408
1409static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
1410			  struct cm_id_private *cm_id_priv,
1411			  const void *private_data,
1412			  u8 private_data_len)
1413{
1414	cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
1415	rtu_msg->local_comm_id = cm_id_priv->id.local_id;
1416	rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
1417
1418	if (private_data && private_data_len)
1419		memcpy(rtu_msg->private_data, private_data, private_data_len);
1420}
1421
1422int ib_send_cm_rtu(struct ib_cm_id *cm_id,
1423		   const void *private_data,
1424		   u8 private_data_len)
1425{
1426	struct cm_id_private *cm_id_priv;
1427	struct ib_mad_send_buf *msg;
1428	unsigned long flags;
1429	void *data;
1430	int ret;
1431
1432	if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
1433		return -EINVAL;
1434
1435	data = cm_copy_private_data(private_data, private_data_len);
1436	if (IS_ERR(data))
1437		return PTR_ERR(data);
1438
1439	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1440	spin_lock_irqsave(&cm_id_priv->lock, flags);
1441	if (cm_id->state != IB_CM_REP_RCVD &&
1442	    cm_id->state != IB_CM_MRA_REP_SENT) {
1443		ret = -EINVAL;
1444		goto error;
1445	}
1446
1447	ret = cm_alloc_msg(cm_id_priv, &msg);
1448	if (ret)
1449		goto error;
1450
1451	cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1452		      private_data, private_data_len);
1453
1454	ret = ib_post_send_mad(msg, NULL);
1455	if (ret) {
1456		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1457		cm_free_msg(msg);
1458		kfree(data);
1459		return ret;
1460	}
1461
1462	cm_id->state = IB_CM_ESTABLISHED;
1463	cm_set_private_data(cm_id_priv, data, private_data_len);
1464	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1465	return 0;
1466
1467error:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1468	kfree(data);
1469	return ret;
1470}
1471EXPORT_SYMBOL(ib_send_cm_rtu);
1472
1473static void cm_format_rep_event(struct cm_work *work)
1474{
1475	struct cm_rep_msg *rep_msg;
1476	struct ib_cm_rep_event_param *param;
1477
1478	rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1479	param = &work->cm_event.param.rep_rcvd;
1480	param->remote_ca_guid = rep_msg->local_ca_guid;
1481	param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
1482	param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg));
1483	param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
1484	param->responder_resources = rep_msg->initiator_depth;
1485	param->initiator_depth = rep_msg->resp_resources;
1486	param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1487	param->failover_accepted = cm_rep_get_failover(rep_msg);
1488	param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
1489	param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1490	param->srq = cm_rep_get_srq(rep_msg);
1491	work->cm_event.private_data = &rep_msg->private_data;
1492}
1493
1494static void cm_dup_rep_handler(struct cm_work *work)
1495{
1496	struct cm_id_private *cm_id_priv;
1497	struct cm_rep_msg *rep_msg;
1498	struct ib_mad_send_buf *msg = NULL;
1499	unsigned long flags;
1500	int ret;
1501
1502	rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
1503	cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
1504				   rep_msg->local_comm_id);
1505	if (!cm_id_priv)
1506		return;
1507
1508	ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1509	if (ret)
1510		goto deref;
1511
1512	spin_lock_irqsave(&cm_id_priv->lock, flags);
1513	if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
1514		cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1515			      cm_id_priv->private_data,
1516			      cm_id_priv->private_data_len);
1517	else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
1518		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1519			      CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
1520			      cm_id_priv->private_data,
1521			      cm_id_priv->private_data_len);
1522	else
1523		goto unlock;
1524	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1525
1526	ret = ib_post_send_mad(msg, NULL);
1527	if (ret)
1528		goto free;
1529	goto deref;
1530
1531unlock:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1532free:	cm_free_msg(msg);
1533deref:	cm_deref_id(cm_id_priv);
1534}
1535
1536static int cm_rep_handler(struct cm_work *work)
1537{
1538	struct cm_id_private *cm_id_priv;
1539	struct cm_rep_msg *rep_msg;
1540	unsigned long flags;
1541	int ret;
1542
1543	rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1544	cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
1545	if (!cm_id_priv) {
1546		cm_dup_rep_handler(work);
1547		return -EINVAL;
1548	}
1549
1550	cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
1551	cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
1552	cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1553
1554	spin_lock_irqsave(&cm.lock, flags);
1555	/* Check for duplicate REP. */
1556	if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
1557		spin_unlock_irqrestore(&cm.lock, flags);
1558		ret = -EINVAL;
1559		goto error;
1560	}
1561	/* Check for a stale connection. */
1562	if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
1563		spin_unlock_irqrestore(&cm.lock, flags);
1564		cm_issue_rej(work->port, work->mad_recv_wc,
1565			     IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
1566			     NULL, 0);
1567		ret = -EINVAL;
1568		goto error;
1569	}
1570	spin_unlock_irqrestore(&cm.lock, flags);
1571
1572	cm_format_rep_event(work);
1573
1574	spin_lock_irqsave(&cm_id_priv->lock, flags);
1575	switch (cm_id_priv->id.state) {
1576	case IB_CM_REQ_SENT:
1577	case IB_CM_MRA_REQ_RCVD:
1578		break;
1579	default:
1580		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1581		ret = -EINVAL;
1582		goto error;
1583	}
1584	cm_id_priv->id.state = IB_CM_REP_RCVD;
1585	cm_id_priv->id.remote_id = rep_msg->local_comm_id;
1586	cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1587	cm_id_priv->initiator_depth = rep_msg->resp_resources;
1588	cm_id_priv->responder_resources = rep_msg->initiator_depth;
1589	cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
1590	cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1591
1592	/* todo: handle peer_to_peer */
1593
1594	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1595	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1596	if (!ret)
1597		list_add_tail(&work->list, &cm_id_priv->work_list);
1598	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1599
1600	if (ret)
1601		cm_process_work(cm_id_priv, work);
1602	else
1603		cm_deref_id(cm_id_priv);
1604	return 0;
1605
1606error:	cm_cleanup_timewait(cm_id_priv->timewait_info);
1607	cm_deref_id(cm_id_priv);
1608	return ret;
1609}
1610
1611static int cm_establish_handler(struct cm_work *work)
1612{
1613	struct cm_id_private *cm_id_priv;
1614	unsigned long flags;
1615	int ret;
1616
1617	/* See comment in ib_cm_establish about lookup. */
1618	cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
1619	if (!cm_id_priv)
1620		return -EINVAL;
1621
1622	spin_lock_irqsave(&cm_id_priv->lock, flags);
1623	if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
1624		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1625		goto out;
1626	}
1627
1628	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1629	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1630	if (!ret)
1631		list_add_tail(&work->list, &cm_id_priv->work_list);
1632	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1633
1634	if (ret)
1635		cm_process_work(cm_id_priv, work);
1636	else
1637		cm_deref_id(cm_id_priv);
1638	return 0;
1639out:
1640	cm_deref_id(cm_id_priv);
1641	return -EINVAL;
1642}
1643
1644static int cm_rtu_handler(struct cm_work *work)
1645{
1646	struct cm_id_private *cm_id_priv;
1647	struct cm_rtu_msg *rtu_msg;
1648	unsigned long flags;
1649	int ret;
1650
1651	rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
1652	cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
1653				   rtu_msg->local_comm_id);
1654	if (!cm_id_priv)
1655		return -EINVAL;
1656
1657	work->cm_event.private_data = &rtu_msg->private_data;
1658
1659	spin_lock_irqsave(&cm_id_priv->lock, flags);
1660	if (cm_id_priv->id.state != IB_CM_REP_SENT &&
1661	    cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
1662		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1663		goto out;
1664	}
1665	cm_id_priv->id.state = IB_CM_ESTABLISHED;
1666
1667	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1668	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1669	if (!ret)
1670		list_add_tail(&work->list, &cm_id_priv->work_list);
1671	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1672
1673	if (ret)
1674		cm_process_work(cm_id_priv, work);
1675	else
1676		cm_deref_id(cm_id_priv);
1677	return 0;
1678out:
1679	cm_deref_id(cm_id_priv);
1680	return -EINVAL;
1681}
1682
1683static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
1684			  struct cm_id_private *cm_id_priv,
1685			  const void *private_data,
1686			  u8 private_data_len)
1687{
1688	cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
1689			  cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
1690	dreq_msg->local_comm_id = cm_id_priv->id.local_id;
1691	dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
1692	cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
1693
1694	if (private_data && private_data_len)
1695		memcpy(dreq_msg->private_data, private_data, private_data_len);
1696}
1697
1698int ib_send_cm_dreq(struct ib_cm_id *cm_id,
1699		    const void *private_data,
1700		    u8 private_data_len)
1701{
1702	struct cm_id_private *cm_id_priv;
1703	struct ib_mad_send_buf *msg;
1704	unsigned long flags;
1705	int ret;
1706
1707	if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
1708		return -EINVAL;
1709
1710	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1711	spin_lock_irqsave(&cm_id_priv->lock, flags);
1712	if (cm_id->state != IB_CM_ESTABLISHED) {
1713		ret = -EINVAL;
1714		goto out;
1715	}
1716
1717	ret = cm_alloc_msg(cm_id_priv, &msg);
1718	if (ret) {
1719		cm_enter_timewait(cm_id_priv);
1720		goto out;
1721	}
1722
1723	cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
1724		       private_data, private_data_len);
1725	msg->timeout_ms = cm_id_priv->timeout_ms;
1726	msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
1727
1728	ret = ib_post_send_mad(msg, NULL);
1729	if (ret) {
1730		cm_enter_timewait(cm_id_priv);
1731		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1732		cm_free_msg(msg);
1733		return ret;
1734	}
1735
1736	cm_id->state = IB_CM_DREQ_SENT;
1737	cm_id_priv->msg = msg;
1738out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1739	return ret;
1740}
1741EXPORT_SYMBOL(ib_send_cm_dreq);
1742
1743static void cm_format_drep(struct cm_drep_msg *drep_msg,
1744			  struct cm_id_private *cm_id_priv,
1745			  const void *private_data,
1746			  u8 private_data_len)
1747{
1748	cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
1749	drep_msg->local_comm_id = cm_id_priv->id.local_id;
1750	drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1751
1752	if (private_data && private_data_len)
1753		memcpy(drep_msg->private_data, private_data, private_data_len);
1754}
1755
1756int ib_send_cm_drep(struct ib_cm_id *cm_id,
1757		    const void *private_data,
1758		    u8 private_data_len)
1759{
1760	struct cm_id_private *cm_id_priv;
1761	struct ib_mad_send_buf *msg;
1762	unsigned long flags;
1763	void *data;
1764	int ret;
1765
1766	if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
1767		return -EINVAL;
1768
1769	data = cm_copy_private_data(private_data, private_data_len);
1770	if (IS_ERR(data))
1771		return PTR_ERR(data);
1772
1773	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1774	spin_lock_irqsave(&cm_id_priv->lock, flags);
1775	if (cm_id->state != IB_CM_DREQ_RCVD) {
1776		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1777		kfree(data);
1778		return -EINVAL;
1779	}
1780
1781	cm_set_private_data(cm_id_priv, data, private_data_len);
1782	cm_enter_timewait(cm_id_priv);
1783
1784	ret = cm_alloc_msg(cm_id_priv, &msg);
1785	if (ret)
1786		goto out;
1787
1788	cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
1789		       private_data, private_data_len);
1790
1791	ret = ib_post_send_mad(msg, NULL);
1792	if (ret) {
1793		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1794		cm_free_msg(msg);
1795		return ret;
1796	}
1797
1798out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1799	return ret;
1800}
1801EXPORT_SYMBOL(ib_send_cm_drep);
1802
1803static int cm_dreq_handler(struct cm_work *work)
1804{
1805	struct cm_id_private *cm_id_priv;
1806	struct cm_dreq_msg *dreq_msg;
1807	struct ib_mad_send_buf *msg = NULL;
1808	unsigned long flags;
1809	int ret;
1810
1811	dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
1812	cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
1813				   dreq_msg->local_comm_id);
1814	if (!cm_id_priv)
1815		return -EINVAL;
1816
1817	work->cm_event.private_data = &dreq_msg->private_data;
1818
1819	spin_lock_irqsave(&cm_id_priv->lock, flags);
1820	if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
1821		goto unlock;
1822
1823	switch (cm_id_priv->id.state) {
1824	case IB_CM_REP_SENT:
1825	case IB_CM_DREQ_SENT:
1826		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1827		break;
1828	case IB_CM_ESTABLISHED:
1829	case IB_CM_MRA_REP_RCVD:
1830		break;
1831	case IB_CM_TIMEWAIT:
1832		if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
1833			goto unlock;
1834
1835		cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
1836			       cm_id_priv->private_data,
1837			       cm_id_priv->private_data_len);
1838		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1839
1840		if (ib_post_send_mad(msg, NULL))
1841			cm_free_msg(msg);
1842		goto deref;
1843	default:
1844		goto unlock;
1845	}
1846	cm_id_priv->id.state = IB_CM_DREQ_RCVD;
1847	cm_id_priv->tid = dreq_msg->hdr.tid;
1848	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1849	if (!ret)
1850		list_add_tail(&work->list, &cm_id_priv->work_list);
1851	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1852
1853	if (ret)
1854		cm_process_work(cm_id_priv, work);
1855	else
1856		cm_deref_id(cm_id_priv);
1857	return 0;
1858
1859unlock:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1860deref:	cm_deref_id(cm_id_priv);
1861	return -EINVAL;
1862}
1863
1864static int cm_drep_handler(struct cm_work *work)
1865{
1866	struct cm_id_private *cm_id_priv;
1867	struct cm_drep_msg *drep_msg;
1868	unsigned long flags;
1869	int ret;
1870
1871	drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
1872	cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
1873				   drep_msg->local_comm_id);
1874	if (!cm_id_priv)
1875		return -EINVAL;
1876
1877	work->cm_event.private_data = &drep_msg->private_data;
1878
1879	spin_lock_irqsave(&cm_id_priv->lock, flags);
1880	if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
1881	    cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
1882		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1883		goto out;
1884	}
1885	cm_enter_timewait(cm_id_priv);
1886
1887	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1888	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1889	if (!ret)
1890		list_add_tail(&work->list, &cm_id_priv->work_list);
1891	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1892
1893	if (ret)
1894		cm_process_work(cm_id_priv, work);
1895	else
1896		cm_deref_id(cm_id_priv);
1897	return 0;
1898out:
1899	cm_deref_id(cm_id_priv);
1900	return -EINVAL;
1901}
1902
1903int ib_send_cm_rej(struct ib_cm_id *cm_id,
1904		   enum ib_cm_rej_reason reason,
1905		   void *ari,
1906		   u8 ari_length,
1907		   const void *private_data,
1908		   u8 private_data_len)
1909{
1910	struct cm_id_private *cm_id_priv;
1911	struct ib_mad_send_buf *msg;
1912	unsigned long flags;
1913	int ret;
1914
1915	if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
1916	    (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
1917		return -EINVAL;
1918
1919	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1920
1921	spin_lock_irqsave(&cm_id_priv->lock, flags);
1922	switch (cm_id->state) {
1923	case IB_CM_REQ_SENT:
1924	case IB_CM_MRA_REQ_RCVD:
1925	case IB_CM_REQ_RCVD:
1926	case IB_CM_MRA_REQ_SENT:
1927	case IB_CM_REP_RCVD:
1928	case IB_CM_MRA_REP_SENT:
1929		ret = cm_alloc_msg(cm_id_priv, &msg);
1930		if (!ret)
1931			cm_format_rej((struct cm_rej_msg *) msg->mad,
1932				      cm_id_priv, reason, ari, ari_length,
1933				      private_data, private_data_len);
1934
1935		cm_reset_to_idle(cm_id_priv);
1936		break;
1937	case IB_CM_REP_SENT:
1938	case IB_CM_MRA_REP_RCVD:
1939		ret = cm_alloc_msg(cm_id_priv, &msg);
1940		if (!ret)
1941			cm_format_rej((struct cm_rej_msg *) msg->mad,
1942				      cm_id_priv, reason, ari, ari_length,
1943				      private_data, private_data_len);
1944
1945		cm_enter_timewait(cm_id_priv);
1946		break;
1947	default:
1948		ret = -EINVAL;
1949		goto out;
1950	}
1951
1952	if (ret)
1953		goto out;
1954
1955	ret = ib_post_send_mad(msg, NULL);
1956	if (ret)
1957		cm_free_msg(msg);
1958
1959out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1960	return ret;
1961}
1962EXPORT_SYMBOL(ib_send_cm_rej);
1963
1964static void cm_format_rej_event(struct cm_work *work)
1965{
1966	struct cm_rej_msg *rej_msg;
1967	struct ib_cm_rej_event_param *param;
1968
1969	rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
1970	param = &work->cm_event.param.rej_rcvd;
1971	param->ari = rej_msg->ari;
1972	param->ari_length = cm_rej_get_reject_info_len(rej_msg);
1973	param->reason = __be16_to_cpu(rej_msg->reason);
1974	work->cm_event.private_data = &rej_msg->private_data;
1975}
1976
1977static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
1978{
1979	struct cm_timewait_info *timewait_info;
1980	struct cm_id_private *cm_id_priv;
1981	unsigned long flags;
1982	__be32 remote_id;
1983
1984	remote_id = rej_msg->local_comm_id;
1985
1986	if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
1987		spin_lock_irqsave(&cm.lock, flags);
1988		timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
1989						  remote_id);
1990		if (!timewait_info) {
1991			spin_unlock_irqrestore(&cm.lock, flags);
1992			return NULL;
1993		}
1994		cm_id_priv = idr_find(&cm.local_id_table,
1995				      (__force int) timewait_info->work.local_id);
1996		if (cm_id_priv) {
1997			if (cm_id_priv->id.remote_id == remote_id)
1998				atomic_inc(&cm_id_priv->refcount);
1999			else
2000				cm_id_priv = NULL;
2001		}
2002		spin_unlock_irqrestore(&cm.lock, flags);
2003	} else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2004		cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2005	else
2006		cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2007
2008	return cm_id_priv;
2009}
2010
2011static int cm_rej_handler(struct cm_work *work)
2012{
2013	struct cm_id_private *cm_id_priv;
2014	struct cm_rej_msg *rej_msg;
2015	unsigned long flags;
2016	int ret;
2017
2018	rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2019	cm_id_priv = cm_acquire_rejected_id(rej_msg);
2020	if (!cm_id_priv)
2021		return -EINVAL;
2022
2023	cm_format_rej_event(work);
2024
2025	spin_lock_irqsave(&cm_id_priv->lock, flags);
2026	switch (cm_id_priv->id.state) {
2027	case IB_CM_REQ_SENT:
2028	case IB_CM_MRA_REQ_RCVD:
2029	case IB_CM_REP_SENT:
2030	case IB_CM_MRA_REP_RCVD:
2031		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2032		/* fall through */
2033	case IB_CM_REQ_RCVD:
2034	case IB_CM_MRA_REQ_SENT:
2035		if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2036			cm_enter_timewait(cm_id_priv);
2037		else
2038			cm_reset_to_idle(cm_id_priv);
2039		break;
2040	case IB_CM_DREQ_SENT:
2041		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2042		/* fall through */
2043	case IB_CM_REP_RCVD:
2044	case IB_CM_MRA_REP_SENT:
2045	case IB_CM_ESTABLISHED:
2046		cm_enter_timewait(cm_id_priv);
2047		break;
2048	default:
2049		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2050		ret = -EINVAL;
2051		goto out;
2052	}
2053
2054	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2055	if (!ret)
2056		list_add_tail(&work->list, &cm_id_priv->work_list);
2057	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2058
2059	if (ret)
2060		cm_process_work(cm_id_priv, work);
2061	else
2062		cm_deref_id(cm_id_priv);
2063	return 0;
2064out:
2065	cm_deref_id(cm_id_priv);
2066	return -EINVAL;
2067}
2068
2069int ib_send_cm_mra(struct ib_cm_id *cm_id,
2070		   u8 service_timeout,
2071		   const void *private_data,
2072		   u8 private_data_len)
2073{
2074	struct cm_id_private *cm_id_priv;
2075	struct ib_mad_send_buf *msg;
2076	void *data;
2077	unsigned long flags;
2078	int ret;
2079
2080	if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2081		return -EINVAL;
2082
2083	data = cm_copy_private_data(private_data, private_data_len);
2084	if (IS_ERR(data))
2085		return PTR_ERR(data);
2086
2087	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2088
2089	spin_lock_irqsave(&cm_id_priv->lock, flags);
2090	switch(cm_id_priv->id.state) {
2091	case IB_CM_REQ_RCVD:
2092		ret = cm_alloc_msg(cm_id_priv, &msg);
2093		if (ret)
2094			goto error1;
2095
2096		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2097			      CM_MSG_RESPONSE_REQ, service_timeout,
2098			      private_data, private_data_len);
2099		ret = ib_post_send_mad(msg, NULL);
2100		if (ret)
2101			goto error2;
2102		cm_id->state = IB_CM_MRA_REQ_SENT;
2103		break;
2104	case IB_CM_REP_RCVD:
2105		ret = cm_alloc_msg(cm_id_priv, &msg);
2106		if (ret)
2107			goto error1;
2108
2109		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2110			      CM_MSG_RESPONSE_REP, service_timeout,
2111			      private_data, private_data_len);
2112		ret = ib_post_send_mad(msg, NULL);
2113		if (ret)
2114			goto error2;
2115		cm_id->state = IB_CM_MRA_REP_SENT;
2116		break;
2117	case IB_CM_ESTABLISHED:
2118		ret = cm_alloc_msg(cm_id_priv, &msg);
2119		if (ret)
2120			goto error1;
2121
2122		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2123			      CM_MSG_RESPONSE_OTHER, service_timeout,
2124			      private_data, private_data_len);
2125		ret = ib_post_send_mad(msg, NULL);
2126		if (ret)
2127			goto error2;
2128		cm_id->lap_state = IB_CM_MRA_LAP_SENT;
2129		break;
2130	default:
2131		ret = -EINVAL;
2132		goto error1;
2133	}
2134	cm_id_priv->service_timeout = service_timeout;
2135	cm_set_private_data(cm_id_priv, data, private_data_len);
2136	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2137	return 0;
2138
2139error1:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2140	kfree(data);
2141	return ret;
2142
2143error2:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2144	kfree(data);
2145	cm_free_msg(msg);
2146	return ret;
2147}
2148EXPORT_SYMBOL(ib_send_cm_mra);
2149
2150static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2151{
2152	switch (cm_mra_get_msg_mraed(mra_msg)) {
2153	case CM_MSG_RESPONSE_REQ:
2154		return cm_acquire_id(mra_msg->remote_comm_id, 0);
2155	case CM_MSG_RESPONSE_REP:
2156	case CM_MSG_RESPONSE_OTHER:
2157		return cm_acquire_id(mra_msg->remote_comm_id,
2158				     mra_msg->local_comm_id);
2159	default:
2160		return NULL;
2161	}
2162}
2163
2164static int cm_mra_handler(struct cm_work *work)
2165{
2166	struct cm_id_private *cm_id_priv;
2167	struct cm_mra_msg *mra_msg;
2168	unsigned long flags;
2169	int timeout, ret;
2170
2171	mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
2172	cm_id_priv = cm_acquire_mraed_id(mra_msg);
2173	if (!cm_id_priv)
2174		return -EINVAL;
2175
2176	work->cm_event.private_data = &mra_msg->private_data;
2177	work->cm_event.param.mra_rcvd.service_timeout =
2178					cm_mra_get_service_timeout(mra_msg);
2179	timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2180		  cm_convert_to_ms(cm_id_priv->av.packet_life_time);
2181
2182	spin_lock_irqsave(&cm_id_priv->lock, flags);
2183	switch (cm_id_priv->id.state) {
2184	case IB_CM_REQ_SENT:
2185		if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
2186		    ib_modify_mad(cm_id_priv->av.port->mad_agent,
2187				  cm_id_priv->msg, timeout))
2188			goto out;
2189		cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
2190		break;
2191	case IB_CM_REP_SENT:
2192		if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
2193		    ib_modify_mad(cm_id_priv->av.port->mad_agent,
2194				  cm_id_priv->msg, timeout))
2195			goto out;
2196		cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
2197		break;
2198	case IB_CM_ESTABLISHED:
2199		if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2200		    cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2201		    ib_modify_mad(cm_id_priv->av.port->mad_agent,
2202				  cm_id_priv->msg, timeout))
2203			goto out;
2204		cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2205		break;
2206	default:
2207		goto out;
2208	}
2209
2210	cm_id_priv->msg->context[1] = (void *) (unsigned long)
2211				      cm_id_priv->id.state;
2212	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2213	if (!ret)
2214		list_add_tail(&work->list, &cm_id_priv->work_list);
2215	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2216
2217	if (ret)
2218		cm_process_work(cm_id_priv, work);
2219	else
2220		cm_deref_id(cm_id_priv);
2221	return 0;
2222out:
2223	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2224	cm_deref_id(cm_id_priv);
2225	return -EINVAL;
2226}
2227
2228static void cm_format_lap(struct cm_lap_msg *lap_msg,
2229			  struct cm_id_private *cm_id_priv,
2230			  struct ib_sa_path_rec *alternate_path,
2231			  const void *private_data,
2232			  u8 private_data_len)
2233{
2234	cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
2235			  cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
2236	lap_msg->local_comm_id = cm_id_priv->id.local_id;
2237	lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
2238	cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
2239	/* todo: need remote CM response timeout */
2240	cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
2241	lap_msg->alt_local_lid = alternate_path->slid;
2242	lap_msg->alt_remote_lid = alternate_path->dlid;
2243	lap_msg->alt_local_gid = alternate_path->sgid;
2244	lap_msg->alt_remote_gid = alternate_path->dgid;
2245	cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
2246	cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
2247	lap_msg->alt_hop_limit = alternate_path->hop_limit;
2248	cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
2249	cm_lap_set_sl(lap_msg, alternate_path->sl);
2250	cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
2251	cm_lap_set_local_ack_timeout(lap_msg,
2252		min(31, alternate_path->packet_life_time + 1));
2253
2254	if (private_data && private_data_len)
2255		memcpy(lap_msg->private_data, private_data, private_data_len);
2256}
2257
2258int ib_send_cm_lap(struct ib_cm_id *cm_id,
2259		   struct ib_sa_path_rec *alternate_path,
2260		   const void *private_data,
2261		   u8 private_data_len)
2262{
2263	struct cm_id_private *cm_id_priv;
2264	struct ib_mad_send_buf *msg;
2265	unsigned long flags;
2266	int ret;
2267
2268	if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
2269		return -EINVAL;
2270
2271	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2272	spin_lock_irqsave(&cm_id_priv->lock, flags);
2273	if (cm_id->state != IB_CM_ESTABLISHED ||
2274	    cm_id->lap_state != IB_CM_LAP_IDLE) {
2275		ret = -EINVAL;
2276		goto out;
2277	}
2278
2279	ret = cm_alloc_msg(cm_id_priv, &msg);
2280	if (ret)
2281		goto out;
2282
2283	cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
2284		      alternate_path, private_data, private_data_len);
2285	msg->timeout_ms = cm_id_priv->timeout_ms;
2286	msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
2287
2288	ret = ib_post_send_mad(msg, NULL);
2289	if (ret) {
2290		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2291		cm_free_msg(msg);
2292		return ret;
2293	}
2294
2295	cm_id->lap_state = IB_CM_LAP_SENT;
2296	cm_id_priv->msg = msg;
2297
2298out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2299	return ret;
2300}
2301EXPORT_SYMBOL(ib_send_cm_lap);
2302
2303static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
2304				    struct cm_lap_msg *lap_msg)
2305{
2306	memset(path, 0, sizeof *path);
2307	path->dgid = lap_msg->alt_local_gid;
2308	path->sgid = lap_msg->alt_remote_gid;
2309	path->dlid = lap_msg->alt_local_lid;
2310	path->slid = lap_msg->alt_remote_lid;
2311	path->flow_label = cm_lap_get_flow_label(lap_msg);
2312	path->hop_limit = lap_msg->alt_hop_limit;
2313	path->traffic_class = cm_lap_get_traffic_class(lap_msg);
2314	path->reversible = 1;
2315	/* pkey is same as in REQ */
2316	path->sl = cm_lap_get_sl(lap_msg);
2317	path->mtu_selector = IB_SA_EQ;
2318	/* mtu is same as in REQ */
2319	path->rate_selector = IB_SA_EQ;
2320	path->rate = cm_lap_get_packet_rate(lap_msg);
2321	path->packet_life_time_selector = IB_SA_EQ;
2322	path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
2323	path->packet_life_time -= (path->packet_life_time > 0);
2324}
2325
2326static int cm_lap_handler(struct cm_work *work)
2327{
2328	struct cm_id_private *cm_id_priv;
2329	struct cm_lap_msg *lap_msg;
2330	struct ib_cm_lap_event_param *param;
2331	struct ib_mad_send_buf *msg = NULL;
2332	unsigned long flags;
2333	int ret;
2334
2335	/* todo: verify LAP request and send reject APR if invalid. */
2336	lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
2337	cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
2338				   lap_msg->local_comm_id);
2339	if (!cm_id_priv)
2340		return -EINVAL;
2341
2342	param = &work->cm_event.param.lap_rcvd;
2343	param->alternate_path = &work->path[0];
2344	cm_format_path_from_lap(param->alternate_path, lap_msg);
2345	work->cm_event.private_data = &lap_msg->private_data;
2346
2347	spin_lock_irqsave(&cm_id_priv->lock, flags);
2348	if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
2349		goto unlock;
2350
2351	switch (cm_id_priv->id.lap_state) {
2352	case IB_CM_LAP_IDLE:
2353		break;
2354	case IB_CM_MRA_LAP_SENT:
2355		if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2356			goto unlock;
2357
2358		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2359			      CM_MSG_RESPONSE_OTHER,
2360			      cm_id_priv->service_timeout,
2361			      cm_id_priv->private_data,
2362			      cm_id_priv->private_data_len);
2363		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2364
2365		if (ib_post_send_mad(msg, NULL))
2366			cm_free_msg(msg);
2367		goto deref;
2368	default:
2369		goto unlock;
2370	}
2371
2372	cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
2373	cm_id_priv->tid = lap_msg->hdr.tid;
2374	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2375	if (!ret)
2376		list_add_tail(&work->list, &cm_id_priv->work_list);
2377	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2378
2379	if (ret)
2380		cm_process_work(cm_id_priv, work);
2381	else
2382		cm_deref_id(cm_id_priv);
2383	return 0;
2384
2385unlock:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2386deref:	cm_deref_id(cm_id_priv);
2387	return -EINVAL;
2388}
2389
2390static void cm_format_apr(struct cm_apr_msg *apr_msg,
2391			  struct cm_id_private *cm_id_priv,
2392			  enum ib_cm_apr_status status,
2393			  void *info,
2394			  u8 info_length,
2395			  const void *private_data,
2396			  u8 private_data_len)
2397{
2398	cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
2399	apr_msg->local_comm_id = cm_id_priv->id.local_id;
2400	apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
2401	apr_msg->ap_status = (u8) status;
2402
2403	if (info && info_length) {
2404		apr_msg->info_length = info_length;
2405		memcpy(apr_msg->info, info, info_length);
2406	}
2407
2408	if (private_data && private_data_len)
2409		memcpy(apr_msg->private_data, private_data, private_data_len);
2410}
2411
2412int ib_send_cm_apr(struct ib_cm_id *cm_id,
2413		   enum ib_cm_apr_status status,
2414		   void *info,
2415		   u8 info_length,
2416		   const void *private_data,
2417		   u8 private_data_len)
2418{
2419	struct cm_id_private *cm_id_priv;
2420	struct ib_mad_send_buf *msg;
2421	unsigned long flags;
2422	int ret;
2423
2424	if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
2425	    (info && info_length > IB_CM_APR_INFO_LENGTH))
2426		return -EINVAL;
2427
2428	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2429	spin_lock_irqsave(&cm_id_priv->lock, flags);
2430	if (cm_id->state != IB_CM_ESTABLISHED ||
2431	    (cm_id->lap_state != IB_CM_LAP_RCVD &&
2432	     cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
2433		ret = -EINVAL;
2434		goto out;
2435	}
2436
2437	ret = cm_alloc_msg(cm_id_priv, &msg);
2438	if (ret)
2439		goto out;
2440
2441	cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
2442		      info, info_length, private_data, private_data_len);
2443	ret = ib_post_send_mad(msg, NULL);
2444	if (ret) {
2445		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2446		cm_free_msg(msg);
2447		return ret;
2448	}
2449
2450	cm_id->lap_state = IB_CM_LAP_IDLE;
2451out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2452	return ret;
2453}
2454EXPORT_SYMBOL(ib_send_cm_apr);
2455
2456static int cm_apr_handler(struct cm_work *work)
2457{
2458	struct cm_id_private *cm_id_priv;
2459	struct cm_apr_msg *apr_msg;
2460	unsigned long flags;
2461	int ret;
2462
2463	apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
2464	cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
2465				   apr_msg->local_comm_id);
2466	if (!cm_id_priv)
2467		return -EINVAL; /* Unmatched reply. */
2468
2469	work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
2470	work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
2471	work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
2472	work->cm_event.private_data = &apr_msg->private_data;
2473
2474	spin_lock_irqsave(&cm_id_priv->lock, flags);
2475	if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
2476	    (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
2477	     cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
2478		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2479		goto out;
2480	}
2481	cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
2482	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2483	cm_id_priv->msg = NULL;
2484
2485	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2486	if (!ret)
2487		list_add_tail(&work->list, &cm_id_priv->work_list);
2488	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2489
2490	if (ret)
2491		cm_process_work(cm_id_priv, work);
2492	else
2493		cm_deref_id(cm_id_priv);
2494	return 0;
2495out:
2496	cm_deref_id(cm_id_priv);
2497	return -EINVAL;
2498}
2499
2500static int cm_timewait_handler(struct cm_work *work)
2501{
2502	struct cm_timewait_info *timewait_info;
2503	struct cm_id_private *cm_id_priv;
2504	unsigned long flags;
2505	int ret;
2506
2507	timewait_info = (struct cm_timewait_info *)work;
2508	cm_cleanup_timewait(timewait_info);
2509
2510	cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2511				   timewait_info->work.remote_id);
2512	if (!cm_id_priv)
2513		return -EINVAL;
2514
2515	spin_lock_irqsave(&cm_id_priv->lock, flags);
2516	if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
2517	    cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
2518		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2519		goto out;
2520	}
2521	cm_id_priv->id.state = IB_CM_IDLE;
2522	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2523	if (!ret)
2524		list_add_tail(&work->list, &cm_id_priv->work_list);
2525	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2526
2527	if (ret)
2528		cm_process_work(cm_id_priv, work);
2529	else
2530		cm_deref_id(cm_id_priv);
2531	return 0;
2532out:
2533	cm_deref_id(cm_id_priv);
2534	return -EINVAL;
2535}
2536
2537static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
2538			       struct cm_id_private *cm_id_priv,
2539			       struct ib_cm_sidr_req_param *param)
2540{
2541	cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
2542			  cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
2543	sidr_req_msg->request_id = cm_id_priv->id.local_id;
2544	sidr_req_msg->pkey = cpu_to_be16(param->pkey);
2545	sidr_req_msg->service_id = param->service_id;
2546
2547	if (param->private_data && param->private_data_len)
2548		memcpy(sidr_req_msg->private_data, param->private_data,
2549		       param->private_data_len);
2550}
2551
2552int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
2553			struct ib_cm_sidr_req_param *param)
2554{
2555	struct cm_id_private *cm_id_priv;
2556	struct ib_mad_send_buf *msg;
2557	unsigned long flags;
2558	int ret;
2559
2560	if (!param->path || (param->private_data &&
2561	     param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
2562		return -EINVAL;
2563
2564	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2565	ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
2566	if (ret)
2567		goto out;
2568
2569	cm_id->service_id = param->service_id;
2570	cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
2571	cm_id_priv->timeout_ms = param->timeout_ms;
2572	cm_id_priv->max_cm_retries = param->max_cm_retries;
2573	ret = cm_alloc_msg(cm_id_priv, &msg);
2574	if (ret)
2575		goto out;
2576
2577	cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
2578			   param);
2579	msg->timeout_ms = cm_id_priv->timeout_ms;
2580	msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
2581
2582	spin_lock_irqsave(&cm_id_priv->lock, flags);
2583	if (cm_id->state == IB_CM_IDLE)
2584		ret = ib_post_send_mad(msg, NULL);
2585	else
2586		ret = -EINVAL;
2587
2588	if (ret) {
2589		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2590		cm_free_msg(msg);
2591		goto out;
2592	}
2593	cm_id->state = IB_CM_SIDR_REQ_SENT;
2594	cm_id_priv->msg = msg;
2595	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2596out:
2597	return ret;
2598}
2599EXPORT_SYMBOL(ib_send_cm_sidr_req);
2600
2601static void cm_format_sidr_req_event(struct cm_work *work,
2602				     struct ib_cm_id *listen_id)
2603{
2604	struct cm_sidr_req_msg *sidr_req_msg;
2605	struct ib_cm_sidr_req_event_param *param;
2606
2607	sidr_req_msg = (struct cm_sidr_req_msg *)
2608				work->mad_recv_wc->recv_buf.mad;
2609	param = &work->cm_event.param.sidr_req_rcvd;
2610	param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
2611	param->listen_id = listen_id;
2612	param->port = work->port->port_num;
2613	work->cm_event.private_data = &sidr_req_msg->private_data;
2614}
2615
2616static int cm_sidr_req_handler(struct cm_work *work)
2617{
2618	struct ib_cm_id *cm_id;
2619	struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
2620	struct cm_sidr_req_msg *sidr_req_msg;
2621	struct ib_wc *wc;
2622	unsigned long flags;
2623
2624	cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
2625	if (IS_ERR(cm_id))
2626		return PTR_ERR(cm_id);
2627	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2628
2629	/* Record SGID/SLID and request ID for lookup. */
2630	sidr_req_msg = (struct cm_sidr_req_msg *)
2631				work->mad_recv_wc->recv_buf.mad;
2632	wc = work->mad_recv_wc->wc;
2633	cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
2634	cm_id_priv->av.dgid.global.interface_id = 0;
2635	cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2636				&cm_id_priv->av);
2637	cm_id_priv->id.remote_id = sidr_req_msg->request_id;
2638	cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
2639	cm_id_priv->tid = sidr_req_msg->hdr.tid;
2640	atomic_inc(&cm_id_priv->work_count);
2641
2642	spin_lock_irqsave(&cm.lock, flags);
2643	cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
2644	if (cur_cm_id_priv) {
2645		spin_unlock_irqrestore(&cm.lock, flags);
2646		goto out; /* Duplicate message. */
2647	}
2648	cur_cm_id_priv = cm_find_listen(cm_id->device,
2649					sidr_req_msg->service_id);
2650	if (!cur_cm_id_priv) {
2651		rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
2652		spin_unlock_irqrestore(&cm.lock, flags);
2653		/* todo: reply with no match */
2654		goto out; /* No match. */
2655	}
2656	atomic_inc(&cur_cm_id_priv->refcount);
2657	spin_unlock_irqrestore(&cm.lock, flags);
2658
2659	cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
2660	cm_id_priv->id.context = cur_cm_id_priv->id.context;
2661	cm_id_priv->id.service_id = sidr_req_msg->service_id;
2662	cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
2663
2664	cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
2665	cm_process_work(cm_id_priv, work);
2666	cm_deref_id(cur_cm_id_priv);
2667	return 0;
2668out:
2669	ib_destroy_cm_id(&cm_id_priv->id);
2670	return -EINVAL;
2671}
2672
2673static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
2674			       struct cm_id_private *cm_id_priv,
2675			       struct ib_cm_sidr_rep_param *param)
2676{
2677	cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
2678			  cm_id_priv->tid);
2679	sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
2680	sidr_rep_msg->status = param->status;
2681	cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
2682	sidr_rep_msg->service_id = cm_id_priv->id.service_id;
2683	sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
2684
2685	if (param->info && param->info_length)
2686		memcpy(sidr_rep_msg->info, param->info, param->info_length);
2687
2688	if (param->private_data && param->private_data_len)
2689		memcpy(sidr_rep_msg->private_data, param->private_data,
2690		       param->private_data_len);
2691}
2692
2693int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
2694			struct ib_cm_sidr_rep_param *param)
2695{
2696	struct cm_id_private *cm_id_priv;
2697	struct ib_mad_send_buf *msg;
2698	unsigned long flags;
2699	int ret;
2700
2701	if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
2702	    (param->private_data &&
2703	     param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
2704		return -EINVAL;
2705
2706	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2707	spin_lock_irqsave(&cm_id_priv->lock, flags);
2708	if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
2709		ret = -EINVAL;
2710		goto error;
2711	}
2712
2713	ret = cm_alloc_msg(cm_id_priv, &msg);
2714	if (ret)
2715		goto error;
2716
2717	cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
2718			   param);
2719	ret = ib_post_send_mad(msg, NULL);
2720	if (ret) {
2721		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2722		cm_free_msg(msg);
2723		return ret;
2724	}
2725	cm_id->state = IB_CM_IDLE;
2726	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2727
2728	spin_lock_irqsave(&cm.lock, flags);
2729	rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
2730	spin_unlock_irqrestore(&cm.lock, flags);
2731	return 0;
2732
2733error:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2734	return ret;
2735}
2736EXPORT_SYMBOL(ib_send_cm_sidr_rep);
2737
2738static void cm_format_sidr_rep_event(struct cm_work *work)
2739{
2740	struct cm_sidr_rep_msg *sidr_rep_msg;
2741	struct ib_cm_sidr_rep_event_param *param;
2742
2743	sidr_rep_msg = (struct cm_sidr_rep_msg *)
2744				work->mad_recv_wc->recv_buf.mad;
2745	param = &work->cm_event.param.sidr_rep_rcvd;
2746	param->status = sidr_rep_msg->status;
2747	param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
2748	param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
2749	param->info = &sidr_rep_msg->info;
2750	param->info_len = sidr_rep_msg->info_length;
2751	work->cm_event.private_data = &sidr_rep_msg->private_data;
2752}
2753
2754static int cm_sidr_rep_handler(struct cm_work *work)
2755{
2756	struct cm_sidr_rep_msg *sidr_rep_msg;
2757	struct cm_id_private *cm_id_priv;
2758	unsigned long flags;
2759
2760	sidr_rep_msg = (struct cm_sidr_rep_msg *)
2761				work->mad_recv_wc->recv_buf.mad;
2762	cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
2763	if (!cm_id_priv)
2764		return -EINVAL; /* Unmatched reply. */
2765
2766	spin_lock_irqsave(&cm_id_priv->lock, flags);
2767	if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
2768		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2769		goto out;
2770	}
2771	cm_id_priv->id.state = IB_CM_IDLE;
2772	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2773	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2774
2775	cm_format_sidr_rep_event(work);
2776	cm_process_work(cm_id_priv, work);
2777	return 0;
2778out:
2779	cm_deref_id(cm_id_priv);
2780	return -EINVAL;
2781}
2782
2783static void cm_process_send_error(struct ib_mad_send_buf *msg,
2784				  enum ib_wc_status wc_status)
2785{
2786	struct cm_id_private *cm_id_priv;
2787	struct ib_cm_event cm_event;
2788	enum ib_cm_state state;
2789	unsigned long flags;
2790	int ret;
2791
2792	memset(&cm_event, 0, sizeof cm_event);
2793	cm_id_priv = msg->context[0];
2794
2795	/* Discard old sends or ones without a response. */
2796	spin_lock_irqsave(&cm_id_priv->lock, flags);
2797	state = (enum ib_cm_state) (unsigned long) msg->context[1];
2798	if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
2799		goto discard;
2800
2801	switch (state) {
2802	case IB_CM_REQ_SENT:
2803	case IB_CM_MRA_REQ_RCVD:
2804		cm_reset_to_idle(cm_id_priv);
2805		cm_event.event = IB_CM_REQ_ERROR;
2806		break;
2807	case IB_CM_REP_SENT:
2808	case IB_CM_MRA_REP_RCVD:
2809		cm_reset_to_idle(cm_id_priv);
2810		cm_event.event = IB_CM_REP_ERROR;
2811		break;
2812	case IB_CM_DREQ_SENT:
2813		cm_enter_timewait(cm_id_priv);
2814		cm_event.event = IB_CM_DREQ_ERROR;
2815		break;
2816	case IB_CM_SIDR_REQ_SENT:
2817		cm_id_priv->id.state = IB_CM_IDLE;
2818		cm_event.event = IB_CM_SIDR_REQ_ERROR;
2819		break;
2820	default:
2821		goto discard;
2822	}
2823	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2824	cm_event.param.send_status = wc_status;
2825
2826	/* No other events can occur on the cm_id at this point. */
2827	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
2828	cm_free_msg(msg);
2829	if (ret)
2830		ib_destroy_cm_id(&cm_id_priv->id);
2831	return;
2832discard:
2833	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2834	cm_free_msg(msg);
2835}
2836
2837static void cm_send_handler(struct ib_mad_agent *mad_agent,
2838			    struct ib_mad_send_wc *mad_send_wc)
2839{
2840	struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
2841
2842	switch (mad_send_wc->status) {
2843	case IB_WC_SUCCESS:
2844	case IB_WC_WR_FLUSH_ERR:
2845		cm_free_msg(msg);
2846		break;
2847	default:
2848		if (msg->context[0] && msg->context[1])
2849			cm_process_send_error(msg, mad_send_wc->status);
2850		else
2851			cm_free_msg(msg);
2852		break;
2853	}
2854}
2855
2856static void cm_work_handler(void *data)
2857{
2858	struct cm_work *work = data;
2859	int ret;
2860
2861	switch (work->cm_event.event) {
2862	case IB_CM_REQ_RECEIVED:
2863		ret = cm_req_handler(work);
2864		break;
2865	case IB_CM_MRA_RECEIVED:
2866		ret = cm_mra_handler(work);
2867		break;
2868	case IB_CM_REJ_RECEIVED:
2869		ret = cm_rej_handler(work);
2870		break;
2871	case IB_CM_REP_RECEIVED:
2872		ret = cm_rep_handler(work);
2873		break;
2874	case IB_CM_RTU_RECEIVED:
2875		ret = cm_rtu_handler(work);
2876		break;
2877	case IB_CM_USER_ESTABLISHED:
2878		ret = cm_establish_handler(work);
2879		break;
2880	case IB_CM_DREQ_RECEIVED:
2881		ret = cm_dreq_handler(work);
2882		break;
2883	case IB_CM_DREP_RECEIVED:
2884		ret = cm_drep_handler(work);
2885		break;
2886	case IB_CM_SIDR_REQ_RECEIVED:
2887		ret = cm_sidr_req_handler(work);
2888		break;
2889	case IB_CM_SIDR_REP_RECEIVED:
2890		ret = cm_sidr_rep_handler(work);
2891		break;
2892	case IB_CM_LAP_RECEIVED:
2893		ret = cm_lap_handler(work);
2894		break;
2895	case IB_CM_APR_RECEIVED:
2896		ret = cm_apr_handler(work);
2897		break;
2898	case IB_CM_TIMEWAIT_EXIT:
2899		ret = cm_timewait_handler(work);
2900		break;
2901	default:
2902		ret = -EINVAL;
2903		break;
2904	}
2905	if (ret)
2906		cm_free_work(work);
2907}
2908
2909int ib_cm_establish(struct ib_cm_id *cm_id)
2910{
2911	struct cm_id_private *cm_id_priv;
2912	struct cm_work *work;
2913	unsigned long flags;
2914	int ret = 0;
2915
2916	work = kmalloc(sizeof *work, GFP_ATOMIC);
2917	if (!work)
2918		return -ENOMEM;
2919
2920	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2921	spin_lock_irqsave(&cm_id_priv->lock, flags);
2922	switch (cm_id->state)
2923	{
2924	case IB_CM_REP_SENT:
2925	case IB_CM_MRA_REP_RCVD:
2926		cm_id->state = IB_CM_ESTABLISHED;
2927		break;
2928	case IB_CM_ESTABLISHED:
2929		ret = -EISCONN;
2930		break;
2931	default:
2932		ret = -EINVAL;
2933		break;
2934	}
2935	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2936
2937	if (ret) {
2938		kfree(work);
2939		goto out;
2940	}
2941
2942	/*
2943	 * The CM worker thread may try to destroy the cm_id before it
2944	 * can execute this work item.  To prevent potential deadlock,
2945	 * we need to find the cm_id once we're in the context of the
2946	 * worker thread, rather than holding a reference on it.
2947	 */
2948	INIT_WORK(&work->work, cm_work_handler, work);
2949	work->local_id = cm_id->local_id;
2950	work->remote_id = cm_id->remote_id;
2951	work->mad_recv_wc = NULL;
2952	work->cm_event.event = IB_CM_USER_ESTABLISHED;
2953	queue_work(cm.wq, &work->work);
2954out:
2955	return ret;
2956}
2957EXPORT_SYMBOL(ib_cm_establish);
2958
2959static void cm_recv_handler(struct ib_mad_agent *mad_agent,
2960			    struct ib_mad_recv_wc *mad_recv_wc)
2961{
2962	struct cm_work *work;
2963	enum ib_cm_event_type event;
2964	int paths = 0;
2965
2966	switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
2967	case CM_REQ_ATTR_ID:
2968		paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
2969						    alt_local_lid != 0);
2970		event = IB_CM_REQ_RECEIVED;
2971		break;
2972	case CM_MRA_ATTR_ID:
2973		event = IB_CM_MRA_RECEIVED;
2974		break;
2975	case CM_REJ_ATTR_ID:
2976		event = IB_CM_REJ_RECEIVED;
2977		break;
2978	case CM_REP_ATTR_ID:
2979		event = IB_CM_REP_RECEIVED;
2980		break;
2981	case CM_RTU_ATTR_ID:
2982		event = IB_CM_RTU_RECEIVED;
2983		break;
2984	case CM_DREQ_ATTR_ID:
2985		event = IB_CM_DREQ_RECEIVED;
2986		break;
2987	case CM_DREP_ATTR_ID:
2988		event = IB_CM_DREP_RECEIVED;
2989		break;
2990	case CM_SIDR_REQ_ATTR_ID:
2991		event = IB_CM_SIDR_REQ_RECEIVED;
2992		break;
2993	case CM_SIDR_REP_ATTR_ID:
2994		event = IB_CM_SIDR_REP_RECEIVED;
2995		break;
2996	case CM_LAP_ATTR_ID:
2997		paths = 1;
2998		event = IB_CM_LAP_RECEIVED;
2999		break;
3000	case CM_APR_ATTR_ID:
3001		event = IB_CM_APR_RECEIVED;
3002		break;
3003	default:
3004		ib_free_recv_mad(mad_recv_wc);
3005		return;
3006	}
3007
3008	work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
3009		       GFP_KERNEL);
3010	if (!work) {
3011		ib_free_recv_mad(mad_recv_wc);
3012		return;
3013	}
3014
3015	INIT_WORK(&work->work, cm_work_handler, work);
3016	work->cm_event.event = event;
3017	work->mad_recv_wc = mad_recv_wc;
3018	work->port = (struct cm_port *)mad_agent->context;
3019	queue_work(cm.wq, &work->work);
3020}
3021
3022static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3023				struct ib_qp_attr *qp_attr,
3024				int *qp_attr_mask)
3025{
3026	unsigned long flags;
3027	int ret;
3028
3029	spin_lock_irqsave(&cm_id_priv->lock, flags);
3030	switch (cm_id_priv->id.state) {
3031	case IB_CM_REQ_SENT:
3032	case IB_CM_MRA_REQ_RCVD:
3033	case IB_CM_REQ_RCVD:
3034	case IB_CM_MRA_REQ_SENT:
3035	case IB_CM_REP_RCVD:
3036	case IB_CM_MRA_REP_SENT:
3037	case IB_CM_REP_SENT:
3038	case IB_CM_MRA_REP_RCVD:
3039	case IB_CM_ESTABLISHED:
3040		*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3041				IB_QP_PKEY_INDEX | IB_QP_PORT;
3042		qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
3043					   IB_ACCESS_REMOTE_WRITE;
3044		if (cm_id_priv->responder_resources)
3045			qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ;
3046		qp_attr->pkey_index = cm_id_priv->av.pkey_index;
3047		qp_attr->port_num = cm_id_priv->av.port->port_num;
3048		ret = 0;
3049		break;
3050	default:
3051		ret = -EINVAL;
3052		break;
3053	}
3054	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3055	return ret;
3056}
3057
3058static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3059			       struct ib_qp_attr *qp_attr,
3060			       int *qp_attr_mask)
3061{
3062	unsigned long flags;
3063	int ret;
3064
3065	spin_lock_irqsave(&cm_id_priv->lock, flags);
3066	switch (cm_id_priv->id.state) {
3067	case IB_CM_REQ_RCVD:
3068	case IB_CM_MRA_REQ_SENT:
3069	case IB_CM_REP_RCVD:
3070	case IB_CM_MRA_REP_SENT:
3071	case IB_CM_REP_SENT:
3072	case IB_CM_MRA_REP_RCVD:
3073	case IB_CM_ESTABLISHED:
3074		*qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
3075				IB_QP_DEST_QPN | IB_QP_RQ_PSN;
3076		qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3077		qp_attr->path_mtu = cm_id_priv->path_mtu;
3078		qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3079		qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
3080		if (cm_id_priv->qp_type == IB_QPT_RC) {
3081			*qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
3082					 IB_QP_MIN_RNR_TIMER;
3083			qp_attr->max_dest_rd_atomic =
3084					cm_id_priv->responder_resources;
3085			qp_attr->min_rnr_timer = 0;
3086		}
3087		if (cm_id_priv->alt_av.ah_attr.dlid) {
3088			*qp_attr_mask |= IB_QP_ALT_PATH;
3089			qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3090		}
3091		ret = 0;
3092		break;
3093	default:
3094		ret = -EINVAL;
3095		break;
3096	}
3097	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3098	return ret;
3099}
3100
3101static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3102			       struct ib_qp_attr *qp_attr,
3103			       int *qp_attr_mask)
3104{
3105	unsigned long flags;
3106	int ret;
3107
3108	spin_lock_irqsave(&cm_id_priv->lock, flags);
3109	switch (cm_id_priv->id.state) {
3110	case IB_CM_REP_RCVD:
3111	case IB_CM_MRA_REP_SENT:
3112	case IB_CM_REP_SENT:
3113	case IB_CM_MRA_REP_RCVD:
3114	case IB_CM_ESTABLISHED:
3115		*qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
3116		qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
3117		if (cm_id_priv->qp_type == IB_QPT_RC) {
3118			*qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
3119					 IB_QP_RNR_RETRY |
3120					 IB_QP_MAX_QP_RD_ATOMIC;
3121			qp_attr->timeout = cm_id_priv->local_ack_timeout;
3122			qp_attr->retry_cnt = cm_id_priv->retry_count;
3123			qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
3124			qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
3125		}
3126		if (cm_id_priv->alt_av.ah_attr.dlid) {
3127			*qp_attr_mask |= IB_QP_PATH_MIG_STATE;
3128			qp_attr->path_mig_state = IB_MIG_REARM;
3129		}
3130		ret = 0;
3131		break;
3132	default:
3133		ret = -EINVAL;
3134		break;
3135	}
3136	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3137	return ret;
3138}
3139
3140int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
3141		       struct ib_qp_attr *qp_attr,
3142		       int *qp_attr_mask)
3143{
3144	struct cm_id_private *cm_id_priv;
3145	int ret;
3146
3147	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3148	switch (qp_attr->qp_state) {
3149	case IB_QPS_INIT:
3150		ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
3151		break;
3152	case IB_QPS_RTR:
3153		ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
3154		break;
3155	case IB_QPS_RTS:
3156		ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
3157		break;
3158	default:
3159		ret = -EINVAL;
3160		break;
3161	}
3162	return ret;
3163}
3164EXPORT_SYMBOL(ib_cm_init_qp_attr);
3165
3166static void cm_add_one(struct ib_device *device)
3167{
3168	struct cm_device *cm_dev;
3169	struct cm_port *port;
3170	struct ib_mad_reg_req reg_req = {
3171		.mgmt_class = IB_MGMT_CLASS_CM,
3172		.mgmt_class_version = IB_CM_CLASS_VERSION
3173	};
3174	struct ib_port_modify port_modify = {
3175		.set_port_cap_mask = IB_PORT_CM_SUP
3176	};
3177	unsigned long flags;
3178	int ret;
3179	u8 i;
3180
3181	cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) *
3182			 device->phys_port_cnt, GFP_KERNEL);
3183	if (!cm_dev)
3184		return;
3185
3186	cm_dev->device = device;
3187	cm_dev->ca_guid = device->node_guid;
3188
3189	set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
3190	for (i = 1; i <= device->phys_port_cnt; i++) {
3191		port = &cm_dev->port[i-1];
3192		port->cm_dev = cm_dev;
3193		port->port_num = i;
3194		port->mad_agent = ib_register_mad_agent(device, i,
3195							IB_QPT_GSI,
3196							&reg_req,
3197							0,
3198							cm_send_handler,
3199							cm_recv_handler,
3200							port);
3201		if (IS_ERR(port->mad_agent))
3202			goto error1;
3203
3204		ret = ib_modify_port(device, i, 0, &port_modify);
3205		if (ret)
3206			goto error2;
3207	}
3208	ib_set_client_data(device, &cm_client, cm_dev);
3209
3210	write_lock_irqsave(&cm.device_lock, flags);
3211	list_add_tail(&cm_dev->list, &cm.device_list);
3212	write_unlock_irqrestore(&cm.device_lock, flags);
3213	return;
3214
3215error2:
3216	ib_unregister_mad_agent(port->mad_agent);
3217error1:
3218	port_modify.set_port_cap_mask = 0;
3219	port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
3220	while (--i) {
3221		port = &cm_dev->port[i-1];
3222		ib_modify_port(device, port->port_num, 0, &port_modify);
3223		ib_unregister_mad_agent(port->mad_agent);
3224	}
3225	kfree(cm_dev);
3226}
3227
3228static void cm_remove_one(struct ib_device *device)
3229{
3230	struct cm_device *cm_dev;
3231	struct cm_port *port;
3232	struct ib_port_modify port_modify = {
3233		.clr_port_cap_mask = IB_PORT_CM_SUP
3234	};
3235	unsigned long flags;
3236	int i;
3237
3238	cm_dev = ib_get_client_data(device, &cm_client);
3239	if (!cm_dev)
3240		return;
3241
3242	write_lock_irqsave(&cm.device_lock, flags);
3243	list_del(&cm_dev->list);
3244	write_unlock_irqrestore(&cm.device_lock, flags);
3245
3246	for (i = 1; i <= device->phys_port_cnt; i++) {
3247		port = &cm_dev->port[i-1];
3248		ib_modify_port(device, port->port_num, 0, &port_modify);
3249		ib_unregister_mad_agent(port->mad_agent);
3250	}
3251	kfree(cm_dev);
3252}
3253
3254static int __init ib_cm_init(void)
3255{
3256	int ret;
3257
3258	memset(&cm, 0, sizeof cm);
3259	INIT_LIST_HEAD(&cm.device_list);
3260	rwlock_init(&cm.device_lock);
3261	spin_lock_init(&cm.lock);
3262	cm.listen_service_table = RB_ROOT;
3263	cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
3264	cm.remote_id_table = RB_ROOT;
3265	cm.remote_qp_table = RB_ROOT;
3266	cm.remote_sidr_table = RB_ROOT;
3267	idr_init(&cm.local_id_table);
3268	idr_pre_get(&cm.local_id_table, GFP_KERNEL);
3269
3270	cm.wq = create_workqueue("ib_cm");
3271	if (!cm.wq)
3272		return -ENOMEM;
3273
3274	ret = ib_register_client(&cm_client);
3275	if (ret)
3276		goto error;
3277
3278	return 0;
3279error:
3280	destroy_workqueue(cm.wq);
3281	return ret;
3282}
3283
3284static void __exit ib_cm_cleanup(void)
3285{
3286	flush_workqueue(cm.wq);
3287	destroy_workqueue(cm.wq);
3288	ib_unregister_client(&cm_client);
3289	idr_destroy(&cm.local_id_table);
3290}
3291
3292module_init(ib_cm_init);
3293module_exit(ib_cm_cleanup);
3294
3295