mad_rmpp.c revision fe9e08e17af414a5fd8f3141b0fd88677f81a883
1/*
2 * Copyright (c) 2005 Intel Inc. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 *     Redistribution and use in source and binary forms, with or
12 *     without modification, are permitted provided that the following
13 *     conditions are met:
14 *
15 *      - Redistributions of source code must retain the above
16 *        copyright notice, this list of conditions and the following
17 *        disclaimer.
18 *
19 *      - Redistributions in binary form must reproduce the above
20 *        copyright notice, this list of conditions and the following
21 *        disclaimer in the documentation and/or other materials
22 *        provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 * $Id: mad_rmpp.c 1921 2005-03-02 22:58:44Z sean.hefty $
34 */
35
36#include <linux/dma-mapping.h>
37
38#include "mad_priv.h"
39#include "mad_rmpp.h"
40
41enum rmpp_state {
42	RMPP_STATE_ACTIVE,
43	RMPP_STATE_TIMEOUT,
44	RMPP_STATE_COMPLETE
45};
46
47struct mad_rmpp_recv {
48	struct ib_mad_agent_private *agent;
49	struct list_head list;
50	struct work_struct timeout_work;
51	struct work_struct cleanup_work;
52	wait_queue_head_t wait;
53	enum rmpp_state state;
54	spinlock_t lock;
55	atomic_t refcount;
56
57	struct ib_ah *ah;
58	struct ib_mad_recv_wc *rmpp_wc;
59	struct ib_mad_recv_buf *cur_seg_buf;
60	int last_ack;
61	int seg_num;
62	int newwin;
63
64	__be64 tid;
65	u32 src_qp;
66	u16 slid;
67	u8 mgmt_class;
68	u8 class_version;
69	u8 method;
70};
71
72static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
73{
74	atomic_dec(&rmpp_recv->refcount);
75	wait_event(rmpp_recv->wait, !atomic_read(&rmpp_recv->refcount));
76	ib_destroy_ah(rmpp_recv->ah);
77	kfree(rmpp_recv);
78}
79
80void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
81{
82	struct mad_rmpp_recv *rmpp_recv, *temp_rmpp_recv;
83	unsigned long flags;
84
85	spin_lock_irqsave(&agent->lock, flags);
86	list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
87		cancel_delayed_work(&rmpp_recv->timeout_work);
88		cancel_delayed_work(&rmpp_recv->cleanup_work);
89	}
90	spin_unlock_irqrestore(&agent->lock, flags);
91
92	flush_workqueue(agent->qp_info->port_priv->wq);
93
94	list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv,
95				 &agent->rmpp_list, list) {
96		list_del(&rmpp_recv->list);
97		if (rmpp_recv->state != RMPP_STATE_COMPLETE)
98			ib_free_recv_mad(rmpp_recv->rmpp_wc);
99		destroy_rmpp_recv(rmpp_recv);
100	}
101}
102
103static int data_offset(u8 mgmt_class)
104{
105	if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
106		return offsetof(struct ib_sa_mad, data);
107	else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
108		 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
109		return offsetof(struct ib_vendor_mad, data);
110	else
111		return offsetof(struct ib_rmpp_mad, data);
112}
113
114static void format_ack(struct ib_rmpp_mad *ack,
115		       struct ib_rmpp_mad *data,
116		       struct mad_rmpp_recv *rmpp_recv)
117{
118	unsigned long flags;
119
120	memcpy(&ack->mad_hdr, &data->mad_hdr,
121	       data_offset(data->mad_hdr.mgmt_class));
122
123	ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
124	ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK;
125	ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
126
127	spin_lock_irqsave(&rmpp_recv->lock, flags);
128	rmpp_recv->last_ack = rmpp_recv->seg_num;
129	ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num);
130	ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin);
131	spin_unlock_irqrestore(&rmpp_recv->lock, flags);
132}
133
134static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
135		     struct ib_mad_recv_wc *recv_wc)
136{
137	struct ib_mad_send_buf *msg;
138	struct ib_send_wr *bad_send_wr;
139	int hdr_len, ret;
140
141	hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
142	msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
143				 recv_wc->wc->pkey_index, rmpp_recv->ah, 1,
144				 hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len,
145				 GFP_KERNEL);
146	if (!msg)
147		return;
148
149	format_ack((struct ib_rmpp_mad *) msg->mad,
150		   (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
151	ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr,
152			       &bad_send_wr);
153	if (ret)
154		ib_free_send_mad(msg);
155}
156
157static int alloc_response_msg(struct ib_mad_agent *agent,
158			      struct ib_mad_recv_wc *recv_wc,
159			      struct ib_mad_send_buf **msg)
160{
161	struct ib_mad_send_buf *m;
162	struct ib_ah *ah;
163	int hdr_len;
164
165	ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc,
166				  recv_wc->recv_buf.grh, agent->port_num);
167	if (IS_ERR(ah))
168		return PTR_ERR(ah);
169
170	hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
171	m = ib_create_send_mad(agent, recv_wc->wc->src_qp,
172			       recv_wc->wc->pkey_index, ah, 1, hdr_len,
173			       sizeof(struct ib_rmpp_mad) - hdr_len,
174			       GFP_KERNEL);
175	if (IS_ERR(m)) {
176		ib_destroy_ah(ah);
177		return PTR_ERR(m);
178	}
179	*msg = m;
180	return 0;
181}
182
183static void free_msg(struct ib_mad_send_buf *msg)
184{
185	ib_destroy_ah(msg->send_wr.wr.ud.ah);
186	ib_free_send_mad(msg);
187}
188
189static void nack_recv(struct ib_mad_agent_private *agent,
190		      struct ib_mad_recv_wc *recv_wc, u8 rmpp_status)
191{
192	struct ib_mad_send_buf *msg;
193	struct ib_rmpp_mad *rmpp_mad;
194	struct ib_send_wr *bad_send_wr;
195	int ret;
196
197	ret = alloc_response_msg(&agent->agent, recv_wc, &msg);
198	if (ret)
199		return;
200
201	rmpp_mad = (struct ib_rmpp_mad *) msg->mad;
202	memcpy(rmpp_mad, recv_wc->recv_buf.mad,
203	       data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class));
204
205	rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
206	rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION;
207	rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT;
208	ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
209	rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status;
210	rmpp_mad->rmpp_hdr.seg_num = 0;
211	rmpp_mad->rmpp_hdr.paylen_newwin = 0;
212
213	ret = ib_post_send_mad(&agent->agent, &msg->send_wr, &bad_send_wr);
214	if (ret)
215		free_msg(msg);
216}
217
218static void recv_timeout_handler(void *data)
219{
220	struct mad_rmpp_recv *rmpp_recv = data;
221	struct ib_mad_recv_wc *rmpp_wc;
222	unsigned long flags;
223
224	spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
225	if (rmpp_recv->state != RMPP_STATE_ACTIVE) {
226		spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
227		return;
228	}
229	rmpp_recv->state = RMPP_STATE_TIMEOUT;
230	list_del(&rmpp_recv->list);
231	spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
232
233	rmpp_wc = rmpp_recv->rmpp_wc;
234	nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L);
235	destroy_rmpp_recv(rmpp_recv);
236	ib_free_recv_mad(rmpp_wc);
237}
238
239static void recv_cleanup_handler(void *data)
240{
241	struct mad_rmpp_recv *rmpp_recv = data;
242	unsigned long flags;
243
244	spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
245	list_del(&rmpp_recv->list);
246	spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
247	destroy_rmpp_recv(rmpp_recv);
248}
249
250static struct mad_rmpp_recv *
251create_rmpp_recv(struct ib_mad_agent_private *agent,
252		 struct ib_mad_recv_wc *mad_recv_wc)
253{
254	struct mad_rmpp_recv *rmpp_recv;
255	struct ib_mad_hdr *mad_hdr;
256
257	rmpp_recv = kmalloc(sizeof *rmpp_recv, GFP_KERNEL);
258	if (!rmpp_recv)
259		return NULL;
260
261	rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd,
262					     mad_recv_wc->wc,
263					     mad_recv_wc->recv_buf.grh,
264					     agent->agent.port_num);
265	if (IS_ERR(rmpp_recv->ah))
266		goto error;
267
268	rmpp_recv->agent = agent;
269	init_waitqueue_head(&rmpp_recv->wait);
270	INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv);
271	INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv);
272	spin_lock_init(&rmpp_recv->lock);
273	rmpp_recv->state = RMPP_STATE_ACTIVE;
274	atomic_set(&rmpp_recv->refcount, 1);
275
276	rmpp_recv->rmpp_wc = mad_recv_wc;
277	rmpp_recv->cur_seg_buf = &mad_recv_wc->recv_buf;
278	rmpp_recv->newwin = 1;
279	rmpp_recv->seg_num = 1;
280	rmpp_recv->last_ack = 0;
281
282	mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
283	rmpp_recv->tid = mad_hdr->tid;
284	rmpp_recv->src_qp = mad_recv_wc->wc->src_qp;
285	rmpp_recv->slid = mad_recv_wc->wc->slid;
286	rmpp_recv->mgmt_class = mad_hdr->mgmt_class;
287	rmpp_recv->class_version = mad_hdr->class_version;
288	rmpp_recv->method  = mad_hdr->method;
289	return rmpp_recv;
290
291error:	kfree(rmpp_recv);
292	return NULL;
293}
294
295static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
296{
297	if (atomic_dec_and_test(&rmpp_recv->refcount))
298		wake_up(&rmpp_recv->wait);
299}
300
301static struct mad_rmpp_recv *
302find_rmpp_recv(struct ib_mad_agent_private *agent,
303	       struct ib_mad_recv_wc *mad_recv_wc)
304{
305	struct mad_rmpp_recv *rmpp_recv;
306	struct ib_mad_hdr *mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
307
308	list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
309		if (rmpp_recv->tid == mad_hdr->tid &&
310		    rmpp_recv->src_qp == mad_recv_wc->wc->src_qp &&
311		    rmpp_recv->slid == mad_recv_wc->wc->slid &&
312		    rmpp_recv->mgmt_class == mad_hdr->mgmt_class &&
313		    rmpp_recv->class_version == mad_hdr->class_version &&
314		    rmpp_recv->method == mad_hdr->method)
315			return rmpp_recv;
316	}
317	return NULL;
318}
319
320static struct mad_rmpp_recv *
321acquire_rmpp_recv(struct ib_mad_agent_private *agent,
322		  struct ib_mad_recv_wc *mad_recv_wc)
323{
324	struct mad_rmpp_recv *rmpp_recv;
325	unsigned long flags;
326
327	spin_lock_irqsave(&agent->lock, flags);
328	rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
329	if (rmpp_recv)
330		atomic_inc(&rmpp_recv->refcount);
331	spin_unlock_irqrestore(&agent->lock, flags);
332	return rmpp_recv;
333}
334
335static struct mad_rmpp_recv *
336insert_rmpp_recv(struct ib_mad_agent_private *agent,
337		 struct mad_rmpp_recv *rmpp_recv)
338{
339	struct mad_rmpp_recv *cur_rmpp_recv;
340
341	cur_rmpp_recv = find_rmpp_recv(agent, rmpp_recv->rmpp_wc);
342	if (!cur_rmpp_recv)
343		list_add_tail(&rmpp_recv->list, &agent->rmpp_list);
344
345	return cur_rmpp_recv;
346}
347
348static inline int get_last_flag(struct ib_mad_recv_buf *seg)
349{
350	struct ib_rmpp_mad *rmpp_mad;
351
352	rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
353	return ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_LAST;
354}
355
356static inline int get_seg_num(struct ib_mad_recv_buf *seg)
357{
358	struct ib_rmpp_mad *rmpp_mad;
359
360	rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
361	return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
362}
363
364static inline struct ib_mad_recv_buf * get_next_seg(struct list_head *rmpp_list,
365						    struct ib_mad_recv_buf *seg)
366{
367	if (seg->list.next == rmpp_list)
368		return NULL;
369
370	return container_of(seg->list.next, struct ib_mad_recv_buf, list);
371}
372
373static inline int window_size(struct ib_mad_agent_private *agent)
374{
375	return max(agent->qp_info->recv_queue.max_active >> 3, 1);
376}
377
378static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list,
379						  int seg_num)
380{
381        struct ib_mad_recv_buf *seg_buf;
382	int cur_seg_num;
383
384	list_for_each_entry_reverse(seg_buf, rmpp_list, list) {
385		cur_seg_num = get_seg_num(seg_buf);
386		if (seg_num > cur_seg_num)
387			return seg_buf;
388		if (seg_num == cur_seg_num)
389			break;
390	}
391	return NULL;
392}
393
394static void update_seg_num(struct mad_rmpp_recv *rmpp_recv,
395			   struct ib_mad_recv_buf *new_buf)
396{
397	struct list_head *rmpp_list = &rmpp_recv->rmpp_wc->rmpp_list;
398
399	while (new_buf && (get_seg_num(new_buf) == rmpp_recv->seg_num + 1)) {
400		rmpp_recv->cur_seg_buf = new_buf;
401		rmpp_recv->seg_num++;
402		new_buf = get_next_seg(rmpp_list, new_buf);
403	}
404}
405
406static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
407{
408	struct ib_rmpp_mad *rmpp_mad;
409	int hdr_size, data_size, pad;
410
411	rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad;
412
413	hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class);
414	data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
415	pad = data_size - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
416	if (pad > data_size || pad < 0)
417		pad = 0;
418
419	return hdr_size + rmpp_recv->seg_num * data_size - pad;
420}
421
422static struct ib_mad_recv_wc * complete_rmpp(struct mad_rmpp_recv *rmpp_recv)
423{
424	struct ib_mad_recv_wc *rmpp_wc;
425
426	ack_recv(rmpp_recv, rmpp_recv->rmpp_wc);
427	if (rmpp_recv->seg_num > 1)
428		cancel_delayed_work(&rmpp_recv->timeout_work);
429
430	rmpp_wc = rmpp_recv->rmpp_wc;
431	rmpp_wc->mad_len = get_mad_len(rmpp_recv);
432	/* 10 seconds until we can find the packet lifetime */
433	queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq,
434			   &rmpp_recv->cleanup_work, msecs_to_jiffies(10000));
435	return rmpp_wc;
436}
437
438void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc, void *buf)
439{
440	struct ib_mad_recv_buf *seg_buf;
441	struct ib_rmpp_mad *rmpp_mad;
442	void *data;
443	int size, len, offset;
444	u8 flags;
445
446	len = mad_recv_wc->mad_len;
447	if (len <= sizeof(struct ib_mad)) {
448		memcpy(buf, mad_recv_wc->recv_buf.mad, len);
449		return;
450	}
451
452	offset = data_offset(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
453
454	list_for_each_entry(seg_buf, &mad_recv_wc->rmpp_list, list) {
455		rmpp_mad = (struct ib_rmpp_mad *)seg_buf->mad;
456		flags = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr);
457
458		if (flags & IB_MGMT_RMPP_FLAG_FIRST) {
459			data = rmpp_mad;
460			size = sizeof(*rmpp_mad);
461		} else {
462			data = (void *) rmpp_mad + offset;
463			if (flags & IB_MGMT_RMPP_FLAG_LAST)
464				size = len;
465			else
466				size = sizeof(*rmpp_mad) - offset;
467		}
468
469		memcpy(buf, data, size);
470		len -= size;
471		buf += size;
472	}
473}
474EXPORT_SYMBOL(ib_coalesce_recv_mad);
475
476static struct ib_mad_recv_wc *
477continue_rmpp(struct ib_mad_agent_private *agent,
478	      struct ib_mad_recv_wc *mad_recv_wc)
479{
480	struct mad_rmpp_recv *rmpp_recv;
481	struct ib_mad_recv_buf *prev_buf;
482	struct ib_mad_recv_wc *done_wc;
483	int seg_num;
484	unsigned long flags;
485
486	rmpp_recv = acquire_rmpp_recv(agent, mad_recv_wc);
487	if (!rmpp_recv)
488		goto drop1;
489
490	seg_num = get_seg_num(&mad_recv_wc->recv_buf);
491
492	spin_lock_irqsave(&rmpp_recv->lock, flags);
493	if ((rmpp_recv->state == RMPP_STATE_TIMEOUT) ||
494	    (seg_num > rmpp_recv->newwin))
495		goto drop3;
496
497	if ((seg_num <= rmpp_recv->last_ack) ||
498	    (rmpp_recv->state == RMPP_STATE_COMPLETE)) {
499		spin_unlock_irqrestore(&rmpp_recv->lock, flags);
500		ack_recv(rmpp_recv, mad_recv_wc);
501		goto drop2;
502	}
503
504	prev_buf = find_seg_location(&rmpp_recv->rmpp_wc->rmpp_list, seg_num);
505	if (!prev_buf)
506		goto drop3;
507
508	done_wc = NULL;
509	list_add(&mad_recv_wc->recv_buf.list, &prev_buf->list);
510	if (rmpp_recv->cur_seg_buf == prev_buf) {
511		update_seg_num(rmpp_recv, &mad_recv_wc->recv_buf);
512		if (get_last_flag(rmpp_recv->cur_seg_buf)) {
513			rmpp_recv->state = RMPP_STATE_COMPLETE;
514			spin_unlock_irqrestore(&rmpp_recv->lock, flags);
515			done_wc = complete_rmpp(rmpp_recv);
516			goto out;
517		} else if (rmpp_recv->seg_num == rmpp_recv->newwin) {
518			rmpp_recv->newwin += window_size(agent);
519			spin_unlock_irqrestore(&rmpp_recv->lock, flags);
520			ack_recv(rmpp_recv, mad_recv_wc);
521			goto out;
522		}
523	}
524	spin_unlock_irqrestore(&rmpp_recv->lock, flags);
525out:
526	deref_rmpp_recv(rmpp_recv);
527	return done_wc;
528
529drop3:	spin_unlock_irqrestore(&rmpp_recv->lock, flags);
530drop2:	deref_rmpp_recv(rmpp_recv);
531drop1:	ib_free_recv_mad(mad_recv_wc);
532	return NULL;
533}
534
535static struct ib_mad_recv_wc *
536start_rmpp(struct ib_mad_agent_private *agent,
537	   struct ib_mad_recv_wc *mad_recv_wc)
538{
539	struct mad_rmpp_recv *rmpp_recv;
540	unsigned long flags;
541
542	rmpp_recv = create_rmpp_recv(agent, mad_recv_wc);
543	if (!rmpp_recv) {
544		ib_free_recv_mad(mad_recv_wc);
545		return NULL;
546	}
547
548	spin_lock_irqsave(&agent->lock, flags);
549	if (insert_rmpp_recv(agent, rmpp_recv)) {
550		spin_unlock_irqrestore(&agent->lock, flags);
551		/* duplicate first MAD */
552		destroy_rmpp_recv(rmpp_recv);
553		return continue_rmpp(agent, mad_recv_wc);
554	}
555	atomic_inc(&rmpp_recv->refcount);
556
557	if (get_last_flag(&mad_recv_wc->recv_buf)) {
558		rmpp_recv->state = RMPP_STATE_COMPLETE;
559		spin_unlock_irqrestore(&agent->lock, flags);
560		complete_rmpp(rmpp_recv);
561	} else {
562		spin_unlock_irqrestore(&agent->lock, flags);
563		/* 40 seconds until we can find the packet lifetimes */
564		queue_delayed_work(agent->qp_info->port_priv->wq,
565				   &rmpp_recv->timeout_work,
566				   msecs_to_jiffies(40000));
567		rmpp_recv->newwin += window_size(agent);
568		ack_recv(rmpp_recv, mad_recv_wc);
569		mad_recv_wc = NULL;
570	}
571	deref_rmpp_recv(rmpp_recv);
572	return mad_recv_wc;
573}
574
575static inline u64 get_seg_addr(struct ib_mad_send_wr_private *mad_send_wr)
576{
577	return mad_send_wr->sg_list[0].addr + mad_send_wr->data_offset +
578	       (sizeof(struct ib_rmpp_mad) - mad_send_wr->data_offset) *
579	       (mad_send_wr->seg_num - 1);
580}
581
582static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
583{
584	struct ib_rmpp_mad *rmpp_mad;
585	int timeout;
586
587	rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
588	ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
589	rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(mad_send_wr->seg_num);
590
591	if (mad_send_wr->seg_num == 1) {
592		rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST;
593		rmpp_mad->rmpp_hdr.paylen_newwin =
594			cpu_to_be32(mad_send_wr->total_seg *
595				    (sizeof(struct ib_rmpp_mad) -
596				       offsetof(struct ib_rmpp_mad, data)));
597		mad_send_wr->sg_list[0].length = sizeof(struct ib_rmpp_mad);
598	} else {
599		mad_send_wr->send_wr.num_sge = 2;
600		mad_send_wr->sg_list[0].length = mad_send_wr->data_offset;
601		mad_send_wr->sg_list[1].addr = get_seg_addr(mad_send_wr);
602		mad_send_wr->sg_list[1].length = sizeof(struct ib_rmpp_mad) -
603						 mad_send_wr->data_offset;
604		mad_send_wr->sg_list[1].lkey = mad_send_wr->sg_list[0].lkey;
605	}
606
607	if (mad_send_wr->seg_num == mad_send_wr->total_seg) {
608		rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST;
609		rmpp_mad->rmpp_hdr.paylen_newwin =
610			cpu_to_be32(sizeof(struct ib_rmpp_mad) -
611				    offsetof(struct ib_rmpp_mad, data) -
612				    mad_send_wr->pad);
613	}
614
615	/* 2 seconds for an ACK until we can find the packet lifetime */
616	timeout = mad_send_wr->send_wr.wr.ud.timeout_ms;
617	if (!timeout || timeout > 2000)
618		mad_send_wr->timeout = msecs_to_jiffies(2000);
619	mad_send_wr->seg_num++;
620	return ib_send_mad(mad_send_wr);
621}
622
623static void abort_send(struct ib_mad_agent_private *agent, __be64 tid,
624		       u8 rmpp_status)
625{
626	struct ib_mad_send_wr_private *mad_send_wr;
627	struct ib_mad_send_wc wc;
628	unsigned long flags;
629
630	spin_lock_irqsave(&agent->lock, flags);
631	mad_send_wr = ib_find_send_mad(agent, tid);
632	if (!mad_send_wr)
633		goto out;	/* Unmatched send */
634
635	if ((mad_send_wr->last_ack == mad_send_wr->total_seg) ||
636	    (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
637		goto out;	/* Send is already done */
638
639	ib_mark_mad_done(mad_send_wr);
640	spin_unlock_irqrestore(&agent->lock, flags);
641
642	wc.status = IB_WC_REM_ABORT_ERR;
643	wc.vendor_err = rmpp_status;
644	wc.wr_id = mad_send_wr->wr_id;
645	ib_mad_complete_send_wr(mad_send_wr, &wc);
646	return;
647out:
648	spin_unlock_irqrestore(&agent->lock, flags);
649}
650
651static void process_rmpp_ack(struct ib_mad_agent_private *agent,
652			     struct ib_mad_recv_wc *mad_recv_wc)
653{
654	struct ib_mad_send_wr_private *mad_send_wr;
655	struct ib_rmpp_mad *rmpp_mad;
656	unsigned long flags;
657	int seg_num, newwin, ret;
658
659	rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
660	if (rmpp_mad->rmpp_hdr.rmpp_status) {
661		abort_send(agent, rmpp_mad->mad_hdr.tid,
662			   IB_MGMT_RMPP_STATUS_BAD_STATUS);
663		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
664		return;
665	}
666
667	seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
668	newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
669	if (newwin < seg_num) {
670		abort_send(agent, rmpp_mad->mad_hdr.tid,
671			   IB_MGMT_RMPP_STATUS_W2S);
672		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
673		return;
674	}
675
676	spin_lock_irqsave(&agent->lock, flags);
677	mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid);
678	if (!mad_send_wr)
679		goto out;	/* Unmatched ACK */
680
681	if ((mad_send_wr->last_ack == mad_send_wr->total_seg) ||
682	    (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
683		goto out;	/* Send is already done */
684
685	if (seg_num > mad_send_wr->total_seg || seg_num > mad_send_wr->newwin) {
686		spin_unlock_irqrestore(&agent->lock, flags);
687		abort_send(agent, rmpp_mad->mad_hdr.tid,
688			   IB_MGMT_RMPP_STATUS_S2B);
689		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
690		return;
691	}
692
693	if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack)
694		goto out;	/* Old ACK */
695
696	if (seg_num > mad_send_wr->last_ack) {
697		mad_send_wr->last_ack = seg_num;
698		mad_send_wr->retries = mad_send_wr->send_wr.wr.ud.retries;
699	}
700	mad_send_wr->newwin = newwin;
701	if (mad_send_wr->last_ack == mad_send_wr->total_seg) {
702		/* If no response is expected, the ACK completes the send */
703		if (!mad_send_wr->send_wr.wr.ud.timeout_ms) {
704			struct ib_mad_send_wc wc;
705
706			ib_mark_mad_done(mad_send_wr);
707			spin_unlock_irqrestore(&agent->lock, flags);
708
709			wc.status = IB_WC_SUCCESS;
710			wc.vendor_err = 0;
711			wc.wr_id = mad_send_wr->wr_id;
712			ib_mad_complete_send_wr(mad_send_wr, &wc);
713			return;
714		}
715		if (mad_send_wr->refcount == 1)
716			ib_reset_mad_timeout(mad_send_wr, mad_send_wr->
717					     send_wr.wr.ud.timeout_ms);
718	} else if (mad_send_wr->refcount == 1 &&
719		   mad_send_wr->seg_num < mad_send_wr->newwin &&
720		   mad_send_wr->seg_num <= mad_send_wr->total_seg) {
721		/* Send failure will just result in a timeout/retry */
722		ret = send_next_seg(mad_send_wr);
723		if (ret)
724			goto out;
725
726		mad_send_wr->refcount++;
727		list_del(&mad_send_wr->agent_list);
728		list_add_tail(&mad_send_wr->agent_list,
729			      &mad_send_wr->mad_agent_priv->send_list);
730	}
731out:
732	spin_unlock_irqrestore(&agent->lock, flags);
733}
734
735static struct ib_mad_recv_wc *
736process_rmpp_data(struct ib_mad_agent_private *agent,
737		  struct ib_mad_recv_wc *mad_recv_wc)
738{
739	struct ib_rmpp_hdr *rmpp_hdr;
740	u8 rmpp_status;
741
742	rmpp_hdr = &((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr;
743
744	if (rmpp_hdr->rmpp_status) {
745		rmpp_status = IB_MGMT_RMPP_STATUS_BAD_STATUS;
746		goto bad;
747	}
748
749	if (rmpp_hdr->seg_num == __constant_htonl(1)) {
750		if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) {
751			rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
752			goto bad;
753		}
754		return start_rmpp(agent, mad_recv_wc);
755	} else {
756		if (ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST) {
757			rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
758			goto bad;
759		}
760		return continue_rmpp(agent, mad_recv_wc);
761	}
762bad:
763	nack_recv(agent, mad_recv_wc, rmpp_status);
764	ib_free_recv_mad(mad_recv_wc);
765	return NULL;
766}
767
768static void process_rmpp_stop(struct ib_mad_agent_private *agent,
769			      struct ib_mad_recv_wc *mad_recv_wc)
770{
771	struct ib_rmpp_mad *rmpp_mad;
772
773	rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
774
775	if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) {
776		abort_send(agent, rmpp_mad->mad_hdr.tid,
777			   IB_MGMT_RMPP_STATUS_BAD_STATUS);
778		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
779	} else
780		abort_send(agent, rmpp_mad->mad_hdr.tid,
781			   rmpp_mad->rmpp_hdr.rmpp_status);
782}
783
784static void process_rmpp_abort(struct ib_mad_agent_private *agent,
785			       struct ib_mad_recv_wc *mad_recv_wc)
786{
787	struct ib_rmpp_mad *rmpp_mad;
788
789	rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
790
791	if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN ||
792	    rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) {
793		abort_send(agent, rmpp_mad->mad_hdr.tid,
794			   IB_MGMT_RMPP_STATUS_BAD_STATUS);
795		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
796	} else
797		abort_send(agent, rmpp_mad->mad_hdr.tid,
798			   rmpp_mad->rmpp_hdr.rmpp_status);
799}
800
801struct ib_mad_recv_wc *
802ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
803			struct ib_mad_recv_wc *mad_recv_wc)
804{
805	struct ib_rmpp_mad *rmpp_mad;
806
807	rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
808	if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE))
809		return mad_recv_wc;
810
811	if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) {
812		abort_send(agent, rmpp_mad->mad_hdr.tid,
813			   IB_MGMT_RMPP_STATUS_UNV);
814		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
815		goto out;
816	}
817
818	switch (rmpp_mad->rmpp_hdr.rmpp_type) {
819	case IB_MGMT_RMPP_TYPE_DATA:
820		return process_rmpp_data(agent, mad_recv_wc);
821	case IB_MGMT_RMPP_TYPE_ACK:
822		process_rmpp_ack(agent, mad_recv_wc);
823		break;
824	case IB_MGMT_RMPP_TYPE_STOP:
825		process_rmpp_stop(agent, mad_recv_wc);
826		break;
827	case IB_MGMT_RMPP_TYPE_ABORT:
828		process_rmpp_abort(agent, mad_recv_wc);
829		break;
830	default:
831		abort_send(agent, rmpp_mad->mad_hdr.tid,
832			   IB_MGMT_RMPP_STATUS_BADT);
833		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
834		break;
835	}
836out:
837	ib_free_recv_mad(mad_recv_wc);
838	return NULL;
839}
840
841int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
842{
843	struct ib_rmpp_mad *rmpp_mad;
844	int i, total_len, ret;
845
846	rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
847	if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
848	      IB_MGMT_RMPP_FLAG_ACTIVE))
849		return IB_RMPP_RESULT_UNHANDLED;
850
851	if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA)
852		return IB_RMPP_RESULT_INTERNAL;
853
854	if (mad_send_wr->send_wr.num_sge > 1)
855		return -EINVAL;		/* TODO: support num_sge > 1 */
856
857	mad_send_wr->seg_num = 1;
858	mad_send_wr->newwin = 1;
859	mad_send_wr->data_offset = data_offset(rmpp_mad->mad_hdr.mgmt_class);
860
861	total_len = 0;
862	for (i = 0; i < mad_send_wr->send_wr.num_sge; i++)
863		total_len += mad_send_wr->send_wr.sg_list[i].length;
864
865        mad_send_wr->total_seg = (total_len - mad_send_wr->data_offset) /
866			(sizeof(struct ib_rmpp_mad) - mad_send_wr->data_offset);
867	mad_send_wr->pad = total_len - offsetof(struct ib_rmpp_mad, data) -
868			   be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
869
870	/* We need to wait for the final ACK even if there isn't a response */
871	mad_send_wr->refcount += (mad_send_wr->timeout == 0);
872	ret = send_next_seg(mad_send_wr);
873	if (!ret)
874		return IB_RMPP_RESULT_CONSUMED;
875	return ret;
876}
877
878int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
879			    struct ib_mad_send_wc *mad_send_wc)
880{
881	struct ib_rmpp_mad *rmpp_mad;
882	struct ib_mad_send_buf *msg;
883	int ret;
884
885	rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
886	if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
887	      IB_MGMT_RMPP_FLAG_ACTIVE))
888		return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
889
890	if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) {
891		msg = (struct ib_mad_send_buf *) (unsigned long)
892		      mad_send_wc->wr_id;
893		if (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_ACK)
894			ib_free_send_mad(msg);
895		else
896			free_msg(msg);
897		return IB_RMPP_RESULT_INTERNAL;	 /* ACK, STOP, or ABORT */
898	}
899
900	if (mad_send_wc->status != IB_WC_SUCCESS ||
901	    mad_send_wr->status != IB_WC_SUCCESS)
902		return IB_RMPP_RESULT_PROCESSED; /* Canceled or send error */
903
904	if (!mad_send_wr->timeout)
905		return IB_RMPP_RESULT_PROCESSED; /* Response received */
906
907	if (mad_send_wr->last_ack == mad_send_wr->total_seg) {
908		mad_send_wr->timeout =
909			msecs_to_jiffies(mad_send_wr->send_wr.wr.ud.timeout_ms);
910		return IB_RMPP_RESULT_PROCESSED; /* Send done */
911	}
912
913	if (mad_send_wr->seg_num > mad_send_wr->newwin ||
914	    mad_send_wr->seg_num > mad_send_wr->total_seg)
915		return IB_RMPP_RESULT_PROCESSED; /* Wait for ACK */
916
917	ret = send_next_seg(mad_send_wr);
918	if (ret) {
919		mad_send_wc->status = IB_WC_GENERAL_ERR;
920		return IB_RMPP_RESULT_PROCESSED;
921	}
922	return IB_RMPP_RESULT_CONSUMED;
923}
924
925int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr)
926{
927	struct ib_rmpp_mad *rmpp_mad;
928	int ret;
929
930	rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
931	if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
932	      IB_MGMT_RMPP_FLAG_ACTIVE))
933		return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
934
935	if (mad_send_wr->last_ack == mad_send_wr->total_seg)
936		return IB_RMPP_RESULT_PROCESSED;
937
938	mad_send_wr->seg_num = mad_send_wr->last_ack + 1;
939	ret = send_next_seg(mad_send_wr);
940	if (ret)
941		return IB_RMPP_RESULT_PROCESSED;
942
943	return IB_RMPP_RESULT_CONSUMED;
944}
945