mad_rmpp.c revision 2b3e258e5dd1938e2708eb5354ad8ba056fe8154
1/*
2 * Copyright (c) 2005 Intel Inc. All rights reserved.
3 * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 *     Redistribution and use in source and binary forms, with or
12 *     without modification, are permitted provided that the following
13 *     conditions are met:
14 *
15 *      - Redistributions of source code must retain the above
16 *        copyright notice, this list of conditions and the following
17 *        disclaimer.
18 *
19 *      - Redistributions in binary form must reproduce the above
20 *        copyright notice, this list of conditions and the following
21 *        disclaimer in the documentation and/or other materials
22 *        provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 * $Id: mad_rmpp.c 1921 2005-03-02 22:58:44Z sean.hefty $
34 */
35
36#include "mad_priv.h"
37#include "mad_rmpp.h"
38
39enum rmpp_state {
40	RMPP_STATE_ACTIVE,
41	RMPP_STATE_TIMEOUT,
42	RMPP_STATE_COMPLETE
43};
44
45struct mad_rmpp_recv {
46	struct ib_mad_agent_private *agent;
47	struct list_head list;
48	struct work_struct timeout_work;
49	struct work_struct cleanup_work;
50	struct completion comp;
51	enum rmpp_state state;
52	spinlock_t lock;
53	atomic_t refcount;
54
55	struct ib_ah *ah;
56	struct ib_mad_recv_wc *rmpp_wc;
57	struct ib_mad_recv_buf *cur_seg_buf;
58	int last_ack;
59	int seg_num;
60	int newwin;
61	int repwin;
62
63	__be64 tid;
64	u32 src_qp;
65	u16 slid;
66	u8 mgmt_class;
67	u8 class_version;
68	u8 method;
69};
70
71static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
72{
73	if (atomic_dec_and_test(&rmpp_recv->refcount))
74		complete(&rmpp_recv->comp);
75}
76
77static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
78{
79	deref_rmpp_recv(rmpp_recv);
80	wait_for_completion(&rmpp_recv->comp);
81	ib_destroy_ah(rmpp_recv->ah);
82	kfree(rmpp_recv);
83}
84
85void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
86{
87	struct mad_rmpp_recv *rmpp_recv, *temp_rmpp_recv;
88	unsigned long flags;
89
90	spin_lock_irqsave(&agent->lock, flags);
91	list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
92		cancel_delayed_work(&rmpp_recv->timeout_work);
93		cancel_delayed_work(&rmpp_recv->cleanup_work);
94	}
95	spin_unlock_irqrestore(&agent->lock, flags);
96
97	flush_workqueue(agent->qp_info->port_priv->wq);
98
99	list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv,
100				 &agent->rmpp_list, list) {
101		list_del(&rmpp_recv->list);
102		if (rmpp_recv->state != RMPP_STATE_COMPLETE)
103			ib_free_recv_mad(rmpp_recv->rmpp_wc);
104		destroy_rmpp_recv(rmpp_recv);
105	}
106}
107
108static void format_ack(struct ib_mad_send_buf *msg,
109		       struct ib_rmpp_mad *data,
110		       struct mad_rmpp_recv *rmpp_recv)
111{
112	struct ib_rmpp_mad *ack = msg->mad;
113	unsigned long flags;
114
115	memcpy(ack, &data->mad_hdr, msg->hdr_len);
116
117	ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
118	ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK;
119	ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
120
121	spin_lock_irqsave(&rmpp_recv->lock, flags);
122	rmpp_recv->last_ack = rmpp_recv->seg_num;
123	ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num);
124	ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin);
125	spin_unlock_irqrestore(&rmpp_recv->lock, flags);
126}
127
128static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
129		     struct ib_mad_recv_wc *recv_wc)
130{
131	struct ib_mad_send_buf *msg;
132	int ret, hdr_len;
133
134	hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
135	msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
136				 recv_wc->wc->pkey_index, 1, hdr_len,
137				 0, GFP_KERNEL);
138	if (!msg)
139		return;
140
141	format_ack(msg, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
142	msg->ah = rmpp_recv->ah;
143	ret = ib_post_send_mad(msg, NULL);
144	if (ret)
145		ib_free_send_mad(msg);
146}
147
148static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
149						  struct ib_mad_recv_wc *recv_wc)
150{
151	struct ib_mad_send_buf *msg;
152	struct ib_ah *ah;
153	int hdr_len;
154
155	ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc,
156				  recv_wc->recv_buf.grh, agent->port_num);
157	if (IS_ERR(ah))
158		return (void *) ah;
159
160	hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
161	msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
162				 recv_wc->wc->pkey_index, 1,
163				 hdr_len, 0, GFP_KERNEL);
164	if (IS_ERR(msg))
165		ib_destroy_ah(ah);
166	else
167		msg->ah = ah;
168
169	return msg;
170}
171
172static void ack_ds_ack(struct ib_mad_agent_private *agent,
173		       struct ib_mad_recv_wc *recv_wc)
174{
175	struct ib_mad_send_buf *msg;
176	struct ib_rmpp_mad *rmpp_mad;
177	int ret;
178
179	msg = alloc_response_msg(&agent->agent, recv_wc);
180	if (IS_ERR(msg))
181		return;
182
183	rmpp_mad = msg->mad;
184	memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len);
185
186	rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
187	ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
188	rmpp_mad->rmpp_hdr.seg_num = 0;
189	rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(1);
190
191	ret = ib_post_send_mad(msg, NULL);
192	if (ret) {
193		ib_destroy_ah(msg->ah);
194		ib_free_send_mad(msg);
195	}
196}
197
198void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc)
199{
200	struct ib_rmpp_mad *rmpp_mad = mad_send_wc->send_buf->mad;
201
202	if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_ACK)
203		ib_destroy_ah(mad_send_wc->send_buf->ah);
204	ib_free_send_mad(mad_send_wc->send_buf);
205}
206
207static void nack_recv(struct ib_mad_agent_private *agent,
208		      struct ib_mad_recv_wc *recv_wc, u8 rmpp_status)
209{
210	struct ib_mad_send_buf *msg;
211	struct ib_rmpp_mad *rmpp_mad;
212	int ret;
213
214	msg = alloc_response_msg(&agent->agent, recv_wc);
215	if (IS_ERR(msg))
216		return;
217
218	rmpp_mad = msg->mad;
219	memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len);
220
221	rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
222	rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION;
223	rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT;
224	ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
225	rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status;
226	rmpp_mad->rmpp_hdr.seg_num = 0;
227	rmpp_mad->rmpp_hdr.paylen_newwin = 0;
228
229	ret = ib_post_send_mad(msg, NULL);
230	if (ret) {
231		ib_destroy_ah(msg->ah);
232		ib_free_send_mad(msg);
233	}
234}
235
236static void recv_timeout_handler(void *data)
237{
238	struct mad_rmpp_recv *rmpp_recv = data;
239	struct ib_mad_recv_wc *rmpp_wc;
240	unsigned long flags;
241
242	spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
243	if (rmpp_recv->state != RMPP_STATE_ACTIVE) {
244		spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
245		return;
246	}
247	rmpp_recv->state = RMPP_STATE_TIMEOUT;
248	list_del(&rmpp_recv->list);
249	spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
250
251	rmpp_wc = rmpp_recv->rmpp_wc;
252	nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L);
253	destroy_rmpp_recv(rmpp_recv);
254	ib_free_recv_mad(rmpp_wc);
255}
256
257static void recv_cleanup_handler(void *data)
258{
259	struct mad_rmpp_recv *rmpp_recv = data;
260	unsigned long flags;
261
262	spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
263	list_del(&rmpp_recv->list);
264	spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
265	destroy_rmpp_recv(rmpp_recv);
266}
267
268static struct mad_rmpp_recv *
269create_rmpp_recv(struct ib_mad_agent_private *agent,
270		 struct ib_mad_recv_wc *mad_recv_wc)
271{
272	struct mad_rmpp_recv *rmpp_recv;
273	struct ib_mad_hdr *mad_hdr;
274
275	rmpp_recv = kmalloc(sizeof *rmpp_recv, GFP_KERNEL);
276	if (!rmpp_recv)
277		return NULL;
278
279	rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd,
280					     mad_recv_wc->wc,
281					     mad_recv_wc->recv_buf.grh,
282					     agent->agent.port_num);
283	if (IS_ERR(rmpp_recv->ah))
284		goto error;
285
286	rmpp_recv->agent = agent;
287	init_completion(&rmpp_recv->comp);
288	INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv);
289	INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv);
290	spin_lock_init(&rmpp_recv->lock);
291	rmpp_recv->state = RMPP_STATE_ACTIVE;
292	atomic_set(&rmpp_recv->refcount, 1);
293
294	rmpp_recv->rmpp_wc = mad_recv_wc;
295	rmpp_recv->cur_seg_buf = &mad_recv_wc->recv_buf;
296	rmpp_recv->newwin = 1;
297	rmpp_recv->seg_num = 1;
298	rmpp_recv->last_ack = 0;
299	rmpp_recv->repwin = 1;
300
301	mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
302	rmpp_recv->tid = mad_hdr->tid;
303	rmpp_recv->src_qp = mad_recv_wc->wc->src_qp;
304	rmpp_recv->slid = mad_recv_wc->wc->slid;
305	rmpp_recv->mgmt_class = mad_hdr->mgmt_class;
306	rmpp_recv->class_version = mad_hdr->class_version;
307	rmpp_recv->method  = mad_hdr->method;
308	return rmpp_recv;
309
310error:	kfree(rmpp_recv);
311	return NULL;
312}
313
314static struct mad_rmpp_recv *
315find_rmpp_recv(struct ib_mad_agent_private *agent,
316	       struct ib_mad_recv_wc *mad_recv_wc)
317{
318	struct mad_rmpp_recv *rmpp_recv;
319	struct ib_mad_hdr *mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
320
321	list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
322		if (rmpp_recv->tid == mad_hdr->tid &&
323		    rmpp_recv->src_qp == mad_recv_wc->wc->src_qp &&
324		    rmpp_recv->slid == mad_recv_wc->wc->slid &&
325		    rmpp_recv->mgmt_class == mad_hdr->mgmt_class &&
326		    rmpp_recv->class_version == mad_hdr->class_version &&
327		    rmpp_recv->method == mad_hdr->method)
328			return rmpp_recv;
329	}
330	return NULL;
331}
332
333static struct mad_rmpp_recv *
334acquire_rmpp_recv(struct ib_mad_agent_private *agent,
335		  struct ib_mad_recv_wc *mad_recv_wc)
336{
337	struct mad_rmpp_recv *rmpp_recv;
338	unsigned long flags;
339
340	spin_lock_irqsave(&agent->lock, flags);
341	rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
342	if (rmpp_recv)
343		atomic_inc(&rmpp_recv->refcount);
344	spin_unlock_irqrestore(&agent->lock, flags);
345	return rmpp_recv;
346}
347
348static struct mad_rmpp_recv *
349insert_rmpp_recv(struct ib_mad_agent_private *agent,
350		 struct mad_rmpp_recv *rmpp_recv)
351{
352	struct mad_rmpp_recv *cur_rmpp_recv;
353
354	cur_rmpp_recv = find_rmpp_recv(agent, rmpp_recv->rmpp_wc);
355	if (!cur_rmpp_recv)
356		list_add_tail(&rmpp_recv->list, &agent->rmpp_list);
357
358	return cur_rmpp_recv;
359}
360
361static inline int get_last_flag(struct ib_mad_recv_buf *seg)
362{
363	struct ib_rmpp_mad *rmpp_mad;
364
365	rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
366	return ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_LAST;
367}
368
369static inline int get_seg_num(struct ib_mad_recv_buf *seg)
370{
371	struct ib_rmpp_mad *rmpp_mad;
372
373	rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
374	return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
375}
376
377static inline struct ib_mad_recv_buf * get_next_seg(struct list_head *rmpp_list,
378						    struct ib_mad_recv_buf *seg)
379{
380	if (seg->list.next == rmpp_list)
381		return NULL;
382
383	return container_of(seg->list.next, struct ib_mad_recv_buf, list);
384}
385
386static inline int window_size(struct ib_mad_agent_private *agent)
387{
388	return max(agent->qp_info->recv_queue.max_active >> 3, 1);
389}
390
391static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list,
392						  int seg_num)
393{
394        struct ib_mad_recv_buf *seg_buf;
395	int cur_seg_num;
396
397	list_for_each_entry_reverse(seg_buf, rmpp_list, list) {
398		cur_seg_num = get_seg_num(seg_buf);
399		if (seg_num > cur_seg_num)
400			return seg_buf;
401		if (seg_num == cur_seg_num)
402			break;
403	}
404	return NULL;
405}
406
407static void update_seg_num(struct mad_rmpp_recv *rmpp_recv,
408			   struct ib_mad_recv_buf *new_buf)
409{
410	struct list_head *rmpp_list = &rmpp_recv->rmpp_wc->rmpp_list;
411
412	while (new_buf && (get_seg_num(new_buf) == rmpp_recv->seg_num + 1)) {
413		rmpp_recv->cur_seg_buf = new_buf;
414		rmpp_recv->seg_num++;
415		new_buf = get_next_seg(rmpp_list, new_buf);
416	}
417}
418
419static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
420{
421	struct ib_rmpp_mad *rmpp_mad;
422	int hdr_size, data_size, pad;
423
424	rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad;
425
426	hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
427	data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
428	pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
429	if (pad > IB_MGMT_RMPP_DATA || pad < 0)
430		pad = 0;
431
432	return hdr_size + rmpp_recv->seg_num * data_size - pad;
433}
434
435static struct ib_mad_recv_wc * complete_rmpp(struct mad_rmpp_recv *rmpp_recv)
436{
437	struct ib_mad_recv_wc *rmpp_wc;
438
439	ack_recv(rmpp_recv, rmpp_recv->rmpp_wc);
440	if (rmpp_recv->seg_num > 1)
441		cancel_delayed_work(&rmpp_recv->timeout_work);
442
443	rmpp_wc = rmpp_recv->rmpp_wc;
444	rmpp_wc->mad_len = get_mad_len(rmpp_recv);
445	/* 10 seconds until we can find the packet lifetime */
446	queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq,
447			   &rmpp_recv->cleanup_work, msecs_to_jiffies(10000));
448	return rmpp_wc;
449}
450
451static struct ib_mad_recv_wc *
452continue_rmpp(struct ib_mad_agent_private *agent,
453	      struct ib_mad_recv_wc *mad_recv_wc)
454{
455	struct mad_rmpp_recv *rmpp_recv;
456	struct ib_mad_recv_buf *prev_buf;
457	struct ib_mad_recv_wc *done_wc;
458	int seg_num;
459	unsigned long flags;
460
461	rmpp_recv = acquire_rmpp_recv(agent, mad_recv_wc);
462	if (!rmpp_recv)
463		goto drop1;
464
465	seg_num = get_seg_num(&mad_recv_wc->recv_buf);
466
467	spin_lock_irqsave(&rmpp_recv->lock, flags);
468	if ((rmpp_recv->state == RMPP_STATE_TIMEOUT) ||
469	    (seg_num > rmpp_recv->newwin))
470		goto drop3;
471
472	if ((seg_num <= rmpp_recv->last_ack) ||
473	    (rmpp_recv->state == RMPP_STATE_COMPLETE)) {
474		spin_unlock_irqrestore(&rmpp_recv->lock, flags);
475		ack_recv(rmpp_recv, mad_recv_wc);
476		goto drop2;
477	}
478
479	prev_buf = find_seg_location(&rmpp_recv->rmpp_wc->rmpp_list, seg_num);
480	if (!prev_buf)
481		goto drop3;
482
483	done_wc = NULL;
484	list_add(&mad_recv_wc->recv_buf.list, &prev_buf->list);
485	if (rmpp_recv->cur_seg_buf == prev_buf) {
486		update_seg_num(rmpp_recv, &mad_recv_wc->recv_buf);
487		if (get_last_flag(rmpp_recv->cur_seg_buf)) {
488			rmpp_recv->state = RMPP_STATE_COMPLETE;
489			spin_unlock_irqrestore(&rmpp_recv->lock, flags);
490			done_wc = complete_rmpp(rmpp_recv);
491			goto out;
492		} else if (rmpp_recv->seg_num == rmpp_recv->newwin) {
493			rmpp_recv->newwin += window_size(agent);
494			spin_unlock_irqrestore(&rmpp_recv->lock, flags);
495			ack_recv(rmpp_recv, mad_recv_wc);
496			goto out;
497		}
498	}
499	spin_unlock_irqrestore(&rmpp_recv->lock, flags);
500out:
501	deref_rmpp_recv(rmpp_recv);
502	return done_wc;
503
504drop3:	spin_unlock_irqrestore(&rmpp_recv->lock, flags);
505drop2:	deref_rmpp_recv(rmpp_recv);
506drop1:	ib_free_recv_mad(mad_recv_wc);
507	return NULL;
508}
509
510static struct ib_mad_recv_wc *
511start_rmpp(struct ib_mad_agent_private *agent,
512	   struct ib_mad_recv_wc *mad_recv_wc)
513{
514	struct mad_rmpp_recv *rmpp_recv;
515	unsigned long flags;
516
517	rmpp_recv = create_rmpp_recv(agent, mad_recv_wc);
518	if (!rmpp_recv) {
519		ib_free_recv_mad(mad_recv_wc);
520		return NULL;
521	}
522
523	spin_lock_irqsave(&agent->lock, flags);
524	if (insert_rmpp_recv(agent, rmpp_recv)) {
525		spin_unlock_irqrestore(&agent->lock, flags);
526		/* duplicate first MAD */
527		destroy_rmpp_recv(rmpp_recv);
528		return continue_rmpp(agent, mad_recv_wc);
529	}
530	atomic_inc(&rmpp_recv->refcount);
531
532	if (get_last_flag(&mad_recv_wc->recv_buf)) {
533		rmpp_recv->state = RMPP_STATE_COMPLETE;
534		spin_unlock_irqrestore(&agent->lock, flags);
535		complete_rmpp(rmpp_recv);
536	} else {
537		spin_unlock_irqrestore(&agent->lock, flags);
538		/* 40 seconds until we can find the packet lifetimes */
539		queue_delayed_work(agent->qp_info->port_priv->wq,
540				   &rmpp_recv->timeout_work,
541				   msecs_to_jiffies(40000));
542		rmpp_recv->newwin += window_size(agent);
543		ack_recv(rmpp_recv, mad_recv_wc);
544		mad_recv_wc = NULL;
545	}
546	deref_rmpp_recv(rmpp_recv);
547	return mad_recv_wc;
548}
549
550static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
551{
552	struct ib_rmpp_mad *rmpp_mad;
553	int timeout;
554	u32 paylen = 0;
555
556	rmpp_mad = mad_send_wr->send_buf.mad;
557	ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
558	rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(++mad_send_wr->seg_num);
559
560	if (mad_send_wr->seg_num == 1) {
561		rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST;
562		paylen = mad_send_wr->send_buf.seg_count * IB_MGMT_RMPP_DATA -
563			 mad_send_wr->pad;
564	}
565
566	if (mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count) {
567		rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST;
568		paylen = IB_MGMT_RMPP_DATA - mad_send_wr->pad;
569	}
570	rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen);
571
572	/* 2 seconds for an ACK until we can find the packet lifetime */
573	timeout = mad_send_wr->send_buf.timeout_ms;
574	if (!timeout || timeout > 2000)
575		mad_send_wr->timeout = msecs_to_jiffies(2000);
576
577	return ib_send_mad(mad_send_wr);
578}
579
580static void abort_send(struct ib_mad_agent_private *agent,
581		       struct ib_mad_recv_wc *mad_recv_wc, u8 rmpp_status)
582{
583	struct ib_mad_send_wr_private *mad_send_wr;
584	struct ib_mad_send_wc wc;
585	unsigned long flags;
586
587	spin_lock_irqsave(&agent->lock, flags);
588	mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
589	if (!mad_send_wr)
590		goto out;	/* Unmatched send */
591
592	if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
593	    (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
594		goto out;	/* Send is already done */
595
596	ib_mark_mad_done(mad_send_wr);
597	spin_unlock_irqrestore(&agent->lock, flags);
598
599	wc.status = IB_WC_REM_ABORT_ERR;
600	wc.vendor_err = rmpp_status;
601	wc.send_buf = &mad_send_wr->send_buf;
602	ib_mad_complete_send_wr(mad_send_wr, &wc);
603	return;
604out:
605	spin_unlock_irqrestore(&agent->lock, flags);
606}
607
608static inline void adjust_last_ack(struct ib_mad_send_wr_private *wr,
609				   int seg_num)
610{
611	struct list_head *list;
612
613	wr->last_ack = seg_num;
614	list = &wr->last_ack_seg->list;
615	list_for_each_entry(wr->last_ack_seg, list, list)
616		if (wr->last_ack_seg->num == seg_num)
617			break;
618}
619
620static void process_ds_ack(struct ib_mad_agent_private *agent,
621			   struct ib_mad_recv_wc *mad_recv_wc, int newwin)
622{
623	struct mad_rmpp_recv *rmpp_recv;
624
625	rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
626	if (rmpp_recv && rmpp_recv->state == RMPP_STATE_COMPLETE)
627		rmpp_recv->repwin = newwin;
628}
629
630static void process_rmpp_ack(struct ib_mad_agent_private *agent,
631			     struct ib_mad_recv_wc *mad_recv_wc)
632{
633	struct ib_mad_send_wr_private *mad_send_wr;
634	struct ib_rmpp_mad *rmpp_mad;
635	unsigned long flags;
636	int seg_num, newwin, ret;
637
638	rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
639	if (rmpp_mad->rmpp_hdr.rmpp_status) {
640		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
641		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
642		return;
643	}
644
645	seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
646	newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
647	if (newwin < seg_num) {
648		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
649		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
650		return;
651	}
652
653	spin_lock_irqsave(&agent->lock, flags);
654	mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
655	if (!mad_send_wr) {
656		if (!seg_num)
657			process_ds_ack(agent, mad_recv_wc, newwin);
658		goto out;	/* Unmatched or DS RMPP ACK */
659	}
660
661	if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) &&
662	    (mad_send_wr->timeout)) {
663		spin_unlock_irqrestore(&agent->lock, flags);
664		ack_ds_ack(agent, mad_recv_wc);
665		return;		/* Repeated ACK for DS RMPP transaction */
666	}
667
668	if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
669	    (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
670		goto out;	/* Send is already done */
671
672	if (seg_num > mad_send_wr->send_buf.seg_count ||
673	    seg_num > mad_send_wr->newwin) {
674		spin_unlock_irqrestore(&agent->lock, flags);
675		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
676		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
677		return;
678	}
679
680	if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack)
681		goto out;	/* Old ACK */
682
683	if (seg_num > mad_send_wr->last_ack) {
684		adjust_last_ack(mad_send_wr, seg_num);
685		mad_send_wr->retries = mad_send_wr->send_buf.retries;
686	}
687	mad_send_wr->newwin = newwin;
688	if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) {
689		/* If no response is expected, the ACK completes the send */
690		if (!mad_send_wr->send_buf.timeout_ms) {
691			struct ib_mad_send_wc wc;
692
693			ib_mark_mad_done(mad_send_wr);
694			spin_unlock_irqrestore(&agent->lock, flags);
695
696			wc.status = IB_WC_SUCCESS;
697			wc.vendor_err = 0;
698			wc.send_buf = &mad_send_wr->send_buf;
699			ib_mad_complete_send_wr(mad_send_wr, &wc);
700			return;
701		}
702		if (mad_send_wr->refcount == 1)
703			ib_reset_mad_timeout(mad_send_wr,
704					     mad_send_wr->send_buf.timeout_ms);
705		spin_unlock_irqrestore(&agent->lock, flags);
706		ack_ds_ack(agent, mad_recv_wc);
707		return;
708	} else if (mad_send_wr->refcount == 1 &&
709		   mad_send_wr->seg_num < mad_send_wr->newwin &&
710		   mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) {
711		/* Send failure will just result in a timeout/retry */
712		ret = send_next_seg(mad_send_wr);
713		if (ret)
714			goto out;
715
716		mad_send_wr->refcount++;
717		list_move_tail(&mad_send_wr->agent_list,
718			      &mad_send_wr->mad_agent_priv->send_list);
719	}
720out:
721	spin_unlock_irqrestore(&agent->lock, flags);
722}
723
724static struct ib_mad_recv_wc *
725process_rmpp_data(struct ib_mad_agent_private *agent,
726		  struct ib_mad_recv_wc *mad_recv_wc)
727{
728	struct ib_rmpp_hdr *rmpp_hdr;
729	u8 rmpp_status;
730
731	rmpp_hdr = &((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr;
732
733	if (rmpp_hdr->rmpp_status) {
734		rmpp_status = IB_MGMT_RMPP_STATUS_BAD_STATUS;
735		goto bad;
736	}
737
738	if (rmpp_hdr->seg_num == __constant_htonl(1)) {
739		if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) {
740			rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
741			goto bad;
742		}
743		return start_rmpp(agent, mad_recv_wc);
744	} else {
745		if (ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST) {
746			rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
747			goto bad;
748		}
749		return continue_rmpp(agent, mad_recv_wc);
750	}
751bad:
752	nack_recv(agent, mad_recv_wc, rmpp_status);
753	ib_free_recv_mad(mad_recv_wc);
754	return NULL;
755}
756
757static void process_rmpp_stop(struct ib_mad_agent_private *agent,
758			      struct ib_mad_recv_wc *mad_recv_wc)
759{
760	struct ib_rmpp_mad *rmpp_mad;
761
762	rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
763
764	if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) {
765		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
766		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
767	} else
768		abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
769}
770
771static void process_rmpp_abort(struct ib_mad_agent_private *agent,
772			       struct ib_mad_recv_wc *mad_recv_wc)
773{
774	struct ib_rmpp_mad *rmpp_mad;
775
776	rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
777
778	if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN ||
779	    rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) {
780		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
781		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
782	} else
783		abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
784}
785
786struct ib_mad_recv_wc *
787ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
788			struct ib_mad_recv_wc *mad_recv_wc)
789{
790	struct ib_rmpp_mad *rmpp_mad;
791
792	rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
793	if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE))
794		return mad_recv_wc;
795
796	if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) {
797		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
798		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
799		goto out;
800	}
801
802	switch (rmpp_mad->rmpp_hdr.rmpp_type) {
803	case IB_MGMT_RMPP_TYPE_DATA:
804		return process_rmpp_data(agent, mad_recv_wc);
805	case IB_MGMT_RMPP_TYPE_ACK:
806		process_rmpp_ack(agent, mad_recv_wc);
807		break;
808	case IB_MGMT_RMPP_TYPE_STOP:
809		process_rmpp_stop(agent, mad_recv_wc);
810		break;
811	case IB_MGMT_RMPP_TYPE_ABORT:
812		process_rmpp_abort(agent, mad_recv_wc);
813		break;
814	default:
815		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
816		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
817		break;
818	}
819out:
820	ib_free_recv_mad(mad_recv_wc);
821	return NULL;
822}
823
824static int init_newwin(struct ib_mad_send_wr_private *mad_send_wr)
825{
826	struct ib_mad_agent_private *agent = mad_send_wr->mad_agent_priv;
827	struct ib_mad_hdr *mad_hdr = mad_send_wr->send_buf.mad;
828	struct mad_rmpp_recv *rmpp_recv;
829	struct ib_ah_attr ah_attr;
830	unsigned long flags;
831	int newwin = 1;
832
833	if (!(mad_hdr->method & IB_MGMT_METHOD_RESP))
834		goto out;
835
836	spin_lock_irqsave(&agent->lock, flags);
837	list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
838		if (rmpp_recv->tid != mad_hdr->tid ||
839		    rmpp_recv->mgmt_class != mad_hdr->mgmt_class ||
840		    rmpp_recv->class_version != mad_hdr->class_version ||
841		    (rmpp_recv->method & IB_MGMT_METHOD_RESP))
842			continue;
843
844		if (ib_query_ah(mad_send_wr->send_buf.ah, &ah_attr))
845			continue;
846
847		if (rmpp_recv->slid == ah_attr.dlid) {
848			newwin = rmpp_recv->repwin;
849			break;
850		}
851	}
852	spin_unlock_irqrestore(&agent->lock, flags);
853out:
854	return newwin;
855}
856
857int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
858{
859	struct ib_rmpp_mad *rmpp_mad;
860	int ret;
861
862	rmpp_mad = mad_send_wr->send_buf.mad;
863	if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
864	      IB_MGMT_RMPP_FLAG_ACTIVE))
865		return IB_RMPP_RESULT_UNHANDLED;
866
867	if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) {
868		mad_send_wr->seg_num = 1;
869		return IB_RMPP_RESULT_INTERNAL;
870	}
871
872	mad_send_wr->newwin = init_newwin(mad_send_wr);
873
874	/* We need to wait for the final ACK even if there isn't a response */
875	mad_send_wr->refcount += (mad_send_wr->timeout == 0);
876	ret = send_next_seg(mad_send_wr);
877	if (!ret)
878		return IB_RMPP_RESULT_CONSUMED;
879	return ret;
880}
881
882int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
883			    struct ib_mad_send_wc *mad_send_wc)
884{
885	struct ib_rmpp_mad *rmpp_mad;
886	int ret;
887
888	rmpp_mad = mad_send_wr->send_buf.mad;
889	if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
890	      IB_MGMT_RMPP_FLAG_ACTIVE))
891		return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
892
893	if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA)
894		return IB_RMPP_RESULT_INTERNAL;	 /* ACK, STOP, or ABORT */
895
896	if (mad_send_wc->status != IB_WC_SUCCESS ||
897	    mad_send_wr->status != IB_WC_SUCCESS)
898		return IB_RMPP_RESULT_PROCESSED; /* Canceled or send error */
899
900	if (!mad_send_wr->timeout)
901		return IB_RMPP_RESULT_PROCESSED; /* Response received */
902
903	if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) {
904		mad_send_wr->timeout =
905			msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
906		return IB_RMPP_RESULT_PROCESSED; /* Send done */
907	}
908
909	if (mad_send_wr->seg_num == mad_send_wr->newwin ||
910	    mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count)
911		return IB_RMPP_RESULT_PROCESSED; /* Wait for ACK */
912
913	ret = send_next_seg(mad_send_wr);
914	if (ret) {
915		mad_send_wc->status = IB_WC_GENERAL_ERR;
916		return IB_RMPP_RESULT_PROCESSED;
917	}
918	return IB_RMPP_RESULT_CONSUMED;
919}
920
921int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr)
922{
923	struct ib_rmpp_mad *rmpp_mad;
924	int ret;
925
926	rmpp_mad = mad_send_wr->send_buf.mad;
927	if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
928	      IB_MGMT_RMPP_FLAG_ACTIVE))
929		return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
930
931	if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count)
932		return IB_RMPP_RESULT_PROCESSED;
933
934	mad_send_wr->seg_num = mad_send_wr->last_ack;
935	mad_send_wr->cur_seg = mad_send_wr->last_ack_seg;
936
937	ret = send_next_seg(mad_send_wr);
938	if (ret)
939		return IB_RMPP_RESULT_PROCESSED;
940
941	return IB_RMPP_RESULT_CONSUMED;
942}
943