1/*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16#include <net/mac80211.h>
17
18#include "rate.h"
19#include "scb.h"
20#include "phy/phy_hal.h"
21#include "antsel.h"
22#include "main.h"
23#include "ampdu.h"
24#include "debug.h"
25#include "brcms_trace_events.h"
26
27/* max number of mpdus in an ampdu */
28#define AMPDU_MAX_MPDU			32
29/* max number of mpdus in an ampdu to a legacy */
30#define AMPDU_NUM_MPDU_LEGACY		16
31/* max Tx ba window size (in pdu) */
32#define AMPDU_TX_BA_MAX_WSIZE		64
33/* default Tx ba window size (in pdu) */
34#define AMPDU_TX_BA_DEF_WSIZE		64
35/* default Rx ba window size (in pdu) */
36#define AMPDU_RX_BA_DEF_WSIZE		64
37/* max Rx ba window size (in pdu) */
38#define AMPDU_RX_BA_MAX_WSIZE		64
39/* max dur of tx ampdu (in msec) */
40#define	AMPDU_MAX_DUR			5
41/* default tx retry limit */
42#define AMPDU_DEF_RETRY_LIMIT		5
43/* default tx retry limit at reg rate */
44#define AMPDU_DEF_RR_RETRY_LIMIT	2
45/* default ffpld reserved bytes */
46#define AMPDU_DEF_FFPLD_RSVD		2048
47/* # of inis to be freed on detach */
48#define AMPDU_INI_FREE			10
49/* max # of mpdus released at a time */
50#define	AMPDU_SCB_MAX_RELEASE		20
51
52#define NUM_FFPLD_FIFO 4	/* number of fifo concerned by pre-loading */
53#define FFPLD_TX_MAX_UNFL   200	/* default value of the average number of ampdu
54				 * without underflows
55				 */
56#define FFPLD_MPDU_SIZE 1800	/* estimate of maximum mpdu size */
57#define FFPLD_MAX_MCS 23	/* we don't deal with mcs 32 */
58#define FFPLD_PLD_INCR 1000	/* increments in bytes */
59#define FFPLD_MAX_AMPDU_CNT 5000	/* maximum number of ampdu we
60					 * accumulate between resets.
61					 */
62
63#define AMPDU_DELIMITER_LEN	4
64
65/* max allowed number of mpdus in an ampdu (2 streams) */
66#define AMPDU_NUM_MPDU		16
67
68#define TX_SEQ_TO_INDEX(seq) ((seq) % AMPDU_TX_BA_MAX_WSIZE)
69
70/* max possible overhead per mpdu in the ampdu; 3 is for roundup if needed */
71#define AMPDU_MAX_MPDU_OVERHEAD (FCS_LEN + DOT11_ICV_AES_LEN +\
72	AMPDU_DELIMITER_LEN + 3\
73	+ DOT11_A4_HDR_LEN + DOT11_QOS_LEN + DOT11_IV_MAX_LEN)
74
75/* modulo add/sub, bound = 2^k */
76#define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1))
77#define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1))
78
79/* structure to hold tx fifo information and pre-loading state
80 * counters specific to tx underflows of ampdus
81 * some counters might be redundant with the ones in wlc or ampdu structures.
82 * This allows to maintain a specific state independently of
83 * how often and/or when the wlc counters are updated.
84 *
85 * ampdu_pld_size: number of bytes to be pre-loaded
86 * mcs2ampdu_table: per-mcs max # of mpdus in an ampdu
87 * prev_txfunfl: num of underflows last read from the HW macstats counter
88 * accum_txfunfl: num of underflows since we modified pld params
89 * accum_txampdu: num of tx ampdu since we modified pld params
90 * prev_txampdu: previous reading of tx ampdu
91 * dmaxferrate: estimated dma avg xfer rate in kbits/sec
92 */
93struct brcms_fifo_info {
94	u16 ampdu_pld_size;
95	u8 mcs2ampdu_table[FFPLD_MAX_MCS + 1];
96	u16 prev_txfunfl;
97	u32 accum_txfunfl;
98	u32 accum_txampdu;
99	u32 prev_txampdu;
100	u32 dmaxferrate;
101};
102
103/* AMPDU module specific state
104 *
105 * wlc: pointer to main wlc structure
106 * scb_handle: scb cubby handle to retrieve data from scb
107 * ini_enable: per-tid initiator enable/disable of ampdu
108 * ba_tx_wsize: Tx ba window size (in pdu)
109 * ba_rx_wsize: Rx ba window size (in pdu)
110 * retry_limit: mpdu transmit retry limit
111 * rr_retry_limit: mpdu transmit retry limit at regular rate
112 * retry_limit_tid: per-tid mpdu transmit retry limit
113 * rr_retry_limit_tid: per-tid mpdu transmit retry limit at regular rate
114 * mpdu_density: min mpdu spacing (0-7) ==> 2^(x-1)/8 usec
115 * max_pdu: max pdus allowed in ampdu
116 * dur: max duration of an ampdu (in msec)
117 * rx_factor: maximum rx ampdu factor (0-3) ==> 2^(13+x) bytes
118 * ffpld_rsvd: number of bytes to reserve for preload
119 * max_txlen: max size of ampdu per mcs, bw and sgi
120 * mfbr: enable multiple fallback rate
121 * tx_max_funl: underflows should be kept such that
122 *		(tx_max_funfl*underflows) < tx frames
123 * fifo_tb: table of fifo infos
124 */
125struct ampdu_info {
126	struct brcms_c_info *wlc;
127	int scb_handle;
128	u8 ini_enable[AMPDU_MAX_SCB_TID];
129	u8 ba_tx_wsize;
130	u8 ba_rx_wsize;
131	u8 retry_limit;
132	u8 rr_retry_limit;
133	u8 retry_limit_tid[AMPDU_MAX_SCB_TID];
134	u8 rr_retry_limit_tid[AMPDU_MAX_SCB_TID];
135	u8 mpdu_density;
136	s8 max_pdu;
137	u8 dur;
138	u8 rx_factor;
139	u32 ffpld_rsvd;
140	u32 max_txlen[MCS_TABLE_SIZE][2][2];
141	bool mfbr;
142	u32 tx_max_funl;
143	struct brcms_fifo_info fifo_tb[NUM_FFPLD_FIFO];
144};
145
146/* used for flushing ampdu packets */
147struct cb_del_ampdu_pars {
148	struct ieee80211_sta *sta;
149	u16 tid;
150};
151
152static void brcms_c_scb_ampdu_update_max_txlen(struct ampdu_info *ampdu, u8 dur)
153{
154	u32 rate, mcs;
155
156	for (mcs = 0; mcs < MCS_TABLE_SIZE; mcs++) {
157		/* rate is in Kbps; dur is in msec ==> len = (rate * dur) / 8 */
158		/* 20MHz, No SGI */
159		rate = mcs_2_rate(mcs, false, false);
160		ampdu->max_txlen[mcs][0][0] = (rate * dur) >> 3;
161		/* 40 MHz, No SGI */
162		rate = mcs_2_rate(mcs, true, false);
163		ampdu->max_txlen[mcs][1][0] = (rate * dur) >> 3;
164		/* 20MHz, SGI */
165		rate = mcs_2_rate(mcs, false, true);
166		ampdu->max_txlen[mcs][0][1] = (rate * dur) >> 3;
167		/* 40 MHz, SGI */
168		rate = mcs_2_rate(mcs, true, true);
169		ampdu->max_txlen[mcs][1][1] = (rate * dur) >> 3;
170	}
171}
172
173static bool brcms_c_ampdu_cap(struct ampdu_info *ampdu)
174{
175	if (BRCMS_PHY_11N_CAP(ampdu->wlc->band))
176		return true;
177	else
178		return false;
179}
180
181static int brcms_c_ampdu_set(struct ampdu_info *ampdu, bool on)
182{
183	struct brcms_c_info *wlc = ampdu->wlc;
184	struct bcma_device *core = wlc->hw->d11core;
185
186	wlc->pub->_ampdu = false;
187
188	if (on) {
189		if (!(wlc->pub->_n_enab & SUPPORT_11N)) {
190			brcms_err(core, "wl%d: driver not nmode enabled\n",
191				  wlc->pub->unit);
192			return -ENOTSUPP;
193		}
194		if (!brcms_c_ampdu_cap(ampdu)) {
195			brcms_err(core, "wl%d: device not ampdu capable\n",
196				  wlc->pub->unit);
197			return -ENOTSUPP;
198		}
199		wlc->pub->_ampdu = on;
200	}
201
202	return 0;
203}
204
205static void brcms_c_ffpld_init(struct ampdu_info *ampdu)
206{
207	int i, j;
208	struct brcms_fifo_info *fifo;
209
210	for (j = 0; j < NUM_FFPLD_FIFO; j++) {
211		fifo = (ampdu->fifo_tb + j);
212		fifo->ampdu_pld_size = 0;
213		for (i = 0; i <= FFPLD_MAX_MCS; i++)
214			fifo->mcs2ampdu_table[i] = 255;
215		fifo->dmaxferrate = 0;
216		fifo->accum_txampdu = 0;
217		fifo->prev_txfunfl = 0;
218		fifo->accum_txfunfl = 0;
219
220	}
221}
222
223struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc)
224{
225	struct ampdu_info *ampdu;
226	int i;
227
228	ampdu = kzalloc(sizeof(struct ampdu_info), GFP_ATOMIC);
229	if (!ampdu)
230		return NULL;
231
232	ampdu->wlc = wlc;
233
234	for (i = 0; i < AMPDU_MAX_SCB_TID; i++)
235		ampdu->ini_enable[i] = true;
236	/* Disable ampdu for VO by default */
237	ampdu->ini_enable[PRIO_8021D_VO] = false;
238	ampdu->ini_enable[PRIO_8021D_NC] = false;
239
240	/* Disable ampdu for BK by default since not enough fifo space */
241	ampdu->ini_enable[PRIO_8021D_NONE] = false;
242	ampdu->ini_enable[PRIO_8021D_BK] = false;
243
244	ampdu->ba_tx_wsize = AMPDU_TX_BA_DEF_WSIZE;
245	ampdu->ba_rx_wsize = AMPDU_RX_BA_DEF_WSIZE;
246	ampdu->mpdu_density = AMPDU_DEF_MPDU_DENSITY;
247	ampdu->max_pdu = AUTO;
248	ampdu->dur = AMPDU_MAX_DUR;
249
250	ampdu->ffpld_rsvd = AMPDU_DEF_FFPLD_RSVD;
251	/*
252	 * bump max ampdu rcv size to 64k for all 11n
253	 * devices except 4321A0 and 4321A1
254	 */
255	if (BRCMS_ISNPHY(wlc->band) && NREV_LT(wlc->band->phyrev, 2))
256		ampdu->rx_factor = IEEE80211_HT_MAX_AMPDU_32K;
257	else
258		ampdu->rx_factor = IEEE80211_HT_MAX_AMPDU_64K;
259	ampdu->retry_limit = AMPDU_DEF_RETRY_LIMIT;
260	ampdu->rr_retry_limit = AMPDU_DEF_RR_RETRY_LIMIT;
261
262	for (i = 0; i < AMPDU_MAX_SCB_TID; i++) {
263		ampdu->retry_limit_tid[i] = ampdu->retry_limit;
264		ampdu->rr_retry_limit_tid[i] = ampdu->rr_retry_limit;
265	}
266
267	brcms_c_scb_ampdu_update_max_txlen(ampdu, ampdu->dur);
268	ampdu->mfbr = false;
269	/* try to set ampdu to the default value */
270	brcms_c_ampdu_set(ampdu, wlc->pub->_ampdu);
271
272	ampdu->tx_max_funl = FFPLD_TX_MAX_UNFL;
273	brcms_c_ffpld_init(ampdu);
274
275	return ampdu;
276}
277
278void brcms_c_ampdu_detach(struct ampdu_info *ampdu)
279{
280	kfree(ampdu);
281}
282
283static void brcms_c_scb_ampdu_update_config(struct ampdu_info *ampdu,
284					    struct scb *scb)
285{
286	struct scb_ampdu *scb_ampdu = &scb->scb_ampdu;
287	int i;
288
289	scb_ampdu->max_pdu = AMPDU_NUM_MPDU;
290
291	/* go back to legacy size if some preloading is occurring */
292	for (i = 0; i < NUM_FFPLD_FIFO; i++) {
293		if (ampdu->fifo_tb[i].ampdu_pld_size > FFPLD_PLD_INCR)
294			scb_ampdu->max_pdu = AMPDU_NUM_MPDU_LEGACY;
295	}
296
297	/* apply user override */
298	if (ampdu->max_pdu != AUTO)
299		scb_ampdu->max_pdu = (u8) ampdu->max_pdu;
300
301	scb_ampdu->release = min_t(u8, scb_ampdu->max_pdu,
302				   AMPDU_SCB_MAX_RELEASE);
303
304	if (scb_ampdu->max_rx_ampdu_bytes)
305		scb_ampdu->release = min_t(u8, scb_ampdu->release,
306			scb_ampdu->max_rx_ampdu_bytes / 1600);
307
308	scb_ampdu->release = min(scb_ampdu->release,
309				 ampdu->fifo_tb[TX_AC_BE_FIFO].
310				 mcs2ampdu_table[FFPLD_MAX_MCS]);
311}
312
313static void brcms_c_scb_ampdu_update_config_all(struct ampdu_info *ampdu)
314{
315	brcms_c_scb_ampdu_update_config(ampdu, &ampdu->wlc->pri_scb);
316}
317
318static void brcms_c_ffpld_calc_mcs2ampdu_table(struct ampdu_info *ampdu, int f)
319{
320	int i;
321	u32 phy_rate, dma_rate, tmp;
322	u8 max_mpdu;
323	struct brcms_fifo_info *fifo = (ampdu->fifo_tb + f);
324
325	/* recompute the dma rate */
326	/* note : we divide/multiply by 100 to avoid integer overflows */
327	max_mpdu = min_t(u8, fifo->mcs2ampdu_table[FFPLD_MAX_MCS],
328			 AMPDU_NUM_MPDU_LEGACY);
329	phy_rate = mcs_2_rate(FFPLD_MAX_MCS, true, false);
330	dma_rate =
331	    (((phy_rate / 100) *
332	      (max_mpdu * FFPLD_MPDU_SIZE - fifo->ampdu_pld_size))
333	     / (max_mpdu * FFPLD_MPDU_SIZE)) * 100;
334	fifo->dmaxferrate = dma_rate;
335
336	/* fill up the mcs2ampdu table; do not recalc the last mcs */
337	dma_rate = dma_rate >> 7;
338	for (i = 0; i < FFPLD_MAX_MCS; i++) {
339		/* shifting to keep it within integer range */
340		phy_rate = mcs_2_rate(i, true, false) >> 7;
341		if (phy_rate > dma_rate) {
342			tmp = ((fifo->ampdu_pld_size * phy_rate) /
343			       ((phy_rate - dma_rate) * FFPLD_MPDU_SIZE)) + 1;
344			tmp = min_t(u32, tmp, 255);
345			fifo->mcs2ampdu_table[i] = (u8) tmp;
346		}
347	}
348}
349
350/* evaluate the dma transfer rate using the tx underflows as feedback.
351 * If necessary, increase tx fifo preloading. If not enough,
352 * decrease maximum ampdu size for each mcs till underflows stop
353 * Return 1 if pre-loading not active, -1 if not an underflow event,
354 * 0 if pre-loading module took care of the event.
355 */
356static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid)
357{
358	struct ampdu_info *ampdu = wlc->ampdu;
359	u32 phy_rate = mcs_2_rate(FFPLD_MAX_MCS, true, false);
360	u32 txunfl_ratio;
361	u8 max_mpdu;
362	u32 current_ampdu_cnt = 0;
363	u16 max_pld_size;
364	u32 new_txunfl;
365	struct brcms_fifo_info *fifo = (ampdu->fifo_tb + fid);
366	uint xmtfifo_sz;
367	u16 cur_txunfl;
368
369	/* return if we got here for a different reason than underflows */
370	cur_txunfl = brcms_b_read_shm(wlc->hw,
371				      M_UCODE_MACSTAT +
372				      offsetof(struct macstat, txfunfl[fid]));
373	new_txunfl = (u16) (cur_txunfl - fifo->prev_txfunfl);
374	if (new_txunfl == 0) {
375		brcms_dbg_ht(wlc->hw->d11core,
376			     "TX status FRAG set but no tx underflows\n");
377		return -1;
378	}
379	fifo->prev_txfunfl = cur_txunfl;
380
381	if (!ampdu->tx_max_funl)
382		return 1;
383
384	/* check if fifo is big enough */
385	if (brcms_b_xmtfifo_sz_get(wlc->hw, fid, &xmtfifo_sz))
386		return -1;
387
388	if ((TXFIFO_SIZE_UNIT * (u32) xmtfifo_sz) <= ampdu->ffpld_rsvd)
389		return 1;
390
391	max_pld_size = TXFIFO_SIZE_UNIT * xmtfifo_sz - ampdu->ffpld_rsvd;
392	fifo->accum_txfunfl += new_txunfl;
393
394	/* we need to wait for at least 10 underflows */
395	if (fifo->accum_txfunfl < 10)
396		return 0;
397
398	brcms_dbg_ht(wlc->hw->d11core, "ampdu_count %d  tx_underflows %d\n",
399		     current_ampdu_cnt, fifo->accum_txfunfl);
400
401	/*
402	   compute the current ratio of tx unfl per ampdu.
403	   When the current ampdu count becomes too
404	   big while the ratio remains small, we reset
405	   the current count in order to not
406	   introduce too big of a latency in detecting a
407	   large amount of tx underflows later.
408	 */
409
410	txunfl_ratio = current_ampdu_cnt / fifo->accum_txfunfl;
411
412	if (txunfl_ratio > ampdu->tx_max_funl) {
413		if (current_ampdu_cnt >= FFPLD_MAX_AMPDU_CNT)
414			fifo->accum_txfunfl = 0;
415
416		return 0;
417	}
418	max_mpdu = min_t(u8, fifo->mcs2ampdu_table[FFPLD_MAX_MCS],
419			 AMPDU_NUM_MPDU_LEGACY);
420
421	/* In case max value max_pdu is already lower than
422	   the fifo depth, there is nothing more we can do.
423	 */
424
425	if (fifo->ampdu_pld_size >= max_mpdu * FFPLD_MPDU_SIZE) {
426		fifo->accum_txfunfl = 0;
427		return 0;
428	}
429
430	if (fifo->ampdu_pld_size < max_pld_size) {
431
432		/* increment by TX_FIFO_PLD_INC bytes */
433		fifo->ampdu_pld_size += FFPLD_PLD_INCR;
434		if (fifo->ampdu_pld_size > max_pld_size)
435			fifo->ampdu_pld_size = max_pld_size;
436
437		/* update scb release size */
438		brcms_c_scb_ampdu_update_config_all(ampdu);
439
440		/*
441		 * compute a new dma xfer rate for max_mpdu @ max mcs.
442		 * This is the minimum dma rate that can achieve no
443		 * underflow condition for the current mpdu size.
444		 *
445		 * note : we divide/multiply by 100 to avoid integer overflows
446		 */
447		fifo->dmaxferrate =
448		    (((phy_rate / 100) *
449		      (max_mpdu * FFPLD_MPDU_SIZE - fifo->ampdu_pld_size))
450		     / (max_mpdu * FFPLD_MPDU_SIZE)) * 100;
451
452		brcms_dbg_ht(wlc->hw->d11core,
453			     "DMA estimated transfer rate %d; "
454			     "pre-load size %d\n",
455			     fifo->dmaxferrate, fifo->ampdu_pld_size);
456	} else {
457
458		/* decrease ampdu size */
459		if (fifo->mcs2ampdu_table[FFPLD_MAX_MCS] > 1) {
460			if (fifo->mcs2ampdu_table[FFPLD_MAX_MCS] == 255)
461				fifo->mcs2ampdu_table[FFPLD_MAX_MCS] =
462				    AMPDU_NUM_MPDU_LEGACY - 1;
463			else
464				fifo->mcs2ampdu_table[FFPLD_MAX_MCS] -= 1;
465
466			/* recompute the table */
467			brcms_c_ffpld_calc_mcs2ampdu_table(ampdu, fid);
468
469			/* update scb release size */
470			brcms_c_scb_ampdu_update_config_all(ampdu);
471		}
472	}
473	fifo->accum_txfunfl = 0;
474	return 0;
475}
476
477void
478brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
479	u8 ba_wsize,		/* negotiated ba window size (in pdu) */
480	uint max_rx_ampdu_bytes) /* from ht_cap in beacon */
481{
482	struct scb_ampdu *scb_ampdu;
483	struct scb_ampdu_tid_ini *ini;
484	struct ampdu_info *ampdu = wlc->ampdu;
485	struct scb *scb = &wlc->pri_scb;
486	scb_ampdu = &scb->scb_ampdu;
487
488	if (!ampdu->ini_enable[tid]) {
489		brcms_err(wlc->hw->d11core, "%s: Rejecting tid %d\n",
490			  __func__, tid);
491		return;
492	}
493
494	ini = &scb_ampdu->ini[tid];
495	ini->tid = tid;
496	ini->scb = scb_ampdu->scb;
497	ini->ba_wsize = ba_wsize;
498	scb_ampdu->max_rx_ampdu_bytes = max_rx_ampdu_bytes;
499}
500
501void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session,
502				 struct brcms_c_info *wlc)
503{
504	session->wlc = wlc;
505	skb_queue_head_init(&session->skb_list);
506	session->max_ampdu_len = 0;    /* determined from first MPDU */
507	session->max_ampdu_frames = 0; /* determined from first MPDU */
508	session->ampdu_len = 0;
509	session->dma_len = 0;
510}
511
512/*
513 * Preps the given packet for AMPDU based on the session data. If the
514 * frame cannot be accomodated in the current session, -ENOSPC is
515 * returned.
516 */
517int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
518			    struct sk_buff *p)
519{
520	struct brcms_c_info *wlc = session->wlc;
521	struct ampdu_info *ampdu = wlc->ampdu;
522	struct scb *scb = &wlc->pri_scb;
523	struct scb_ampdu *scb_ampdu = &scb->scb_ampdu;
524	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(p);
525	struct ieee80211_tx_rate *txrate = tx_info->status.rates;
526	struct d11txh *txh = (struct d11txh *)p->data;
527	unsigned ampdu_frames;
528	u8 ndelim, tid;
529	u8 *plcp;
530	uint len;
531	u16 mcl;
532	bool fbr_iscck;
533	bool rr;
534
535	ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
536	plcp = (u8 *)(txh + 1);
537	fbr_iscck = !(le16_to_cpu(txh->XtraFrameTypes) & 0x03);
538	len = fbr_iscck ? BRCMS_GET_CCK_PLCP_LEN(txh->FragPLCPFallback) :
539			  BRCMS_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
540	len = roundup(len, 4) + (ndelim + 1) * AMPDU_DELIMITER_LEN;
541
542	ampdu_frames = skb_queue_len(&session->skb_list);
543	if (ampdu_frames != 0) {
544		struct sk_buff *first;
545
546		if (ampdu_frames + 1 > session->max_ampdu_frames ||
547		    session->ampdu_len + len > session->max_ampdu_len)
548			return -ENOSPC;
549
550		/*
551		 * We aren't really out of space if the new frame is of
552		 * a different priority, but we want the same behaviour
553		 * so return -ENOSPC anyway.
554		 *
555		 * XXX: The old AMPDU code did this, but is it really
556		 * necessary?
557		 */
558		first = skb_peek(&session->skb_list);
559		if (p->priority != first->priority)
560			return -ENOSPC;
561	}
562
563	/*
564	 * Now that we're sure this frame can be accomodated, update the
565	 * session information.
566	 */
567	session->ampdu_len += len;
568	session->dma_len += p->len;
569
570	tid = (u8)p->priority;
571
572	/* Handle retry limits */
573	if (txrate[0].count <= ampdu->rr_retry_limit_tid[tid]) {
574		txrate[0].count++;
575		rr = true;
576	} else {
577		txrate[1].count++;
578		rr = false;
579	}
580
581	if (ampdu_frames == 0) {
582		u8 plcp0, plcp3, is40, sgi, mcs;
583		uint fifo = le16_to_cpu(txh->TxFrameID) & TXFID_QUEUE_MASK;
584		struct brcms_fifo_info *f = &ampdu->fifo_tb[fifo];
585
586		if (rr) {
587			plcp0 = plcp[0];
588			plcp3 = plcp[3];
589		} else {
590			plcp0 = txh->FragPLCPFallback[0];
591			plcp3 = txh->FragPLCPFallback[3];
592
593		}
594
595		/* Limit AMPDU size based on MCS */
596		is40 = (plcp0 & MIMO_PLCP_40MHZ) ? 1 : 0;
597		sgi = plcp3_issgi(plcp3) ? 1 : 0;
598		mcs = plcp0 & ~MIMO_PLCP_40MHZ;
599		session->max_ampdu_len = min(scb_ampdu->max_rx_ampdu_bytes,
600					     ampdu->max_txlen[mcs][is40][sgi]);
601
602		session->max_ampdu_frames = scb_ampdu->max_pdu;
603		if (mcs_2_rate(mcs, true, false) >= f->dmaxferrate) {
604			session->max_ampdu_frames =
605				min_t(u16, f->mcs2ampdu_table[mcs],
606				      session->max_ampdu_frames);
607		}
608	}
609
610	/*
611	 * Treat all frames as "middle" frames of AMPDU here. First and
612	 * last frames must be fixed up after all MPDUs have been prepped.
613	 */
614	mcl = le16_to_cpu(txh->MacTxControlLow);
615	mcl &= ~TXC_AMPDU_MASK;
616	mcl |= (TXC_AMPDU_MIDDLE << TXC_AMPDU_SHIFT);
617	mcl &= ~(TXC_STARTMSDU | TXC_SENDRTS | TXC_SENDCTS);
618	txh->MacTxControlLow = cpu_to_le16(mcl);
619	txh->PreloadSize = 0;	/* always default to 0 */
620
621	skb_queue_tail(&session->skb_list, p);
622
623	return 0;
624}
625
626void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session)
627{
628	struct brcms_c_info *wlc = session->wlc;
629	struct ampdu_info *ampdu = wlc->ampdu;
630	struct sk_buff *first, *last;
631	struct d11txh *txh;
632	struct ieee80211_tx_info *tx_info;
633	struct ieee80211_tx_rate *txrate;
634	u8 ndelim;
635	u8 *plcp;
636	uint len;
637	uint fifo;
638	struct brcms_fifo_info *f;
639	u16 mcl;
640	bool fbr;
641	bool fbr_iscck;
642	struct ieee80211_rts *rts;
643	bool use_rts = false, use_cts = false;
644	u16 dma_len = session->dma_len;
645	u16 mimo_ctlchbw = PHY_TXC1_BW_20MHZ;
646	u32 rspec = 0, rspec_fallback = 0;
647	u32 rts_rspec = 0, rts_rspec_fallback = 0;
648	u8 plcp0, plcp3, is40, sgi, mcs;
649	u16 mch;
650	u8 preamble_type = BRCMS_GF_PREAMBLE;
651	u8 fbr_preamble_type = BRCMS_GF_PREAMBLE;
652	u8 rts_preamble_type = BRCMS_LONG_PREAMBLE;
653	u8 rts_fbr_preamble_type = BRCMS_LONG_PREAMBLE;
654
655	if (skb_queue_empty(&session->skb_list))
656		return;
657
658	first = skb_peek(&session->skb_list);
659	last = skb_peek_tail(&session->skb_list);
660
661	/* Need to fix up last MPDU first to adjust AMPDU length */
662	txh = (struct d11txh *)last->data;
663	fifo = le16_to_cpu(txh->TxFrameID) & TXFID_QUEUE_MASK;
664	f = &ampdu->fifo_tb[fifo];
665
666	mcl = le16_to_cpu(txh->MacTxControlLow);
667	mcl &= ~TXC_AMPDU_MASK;
668	mcl |= (TXC_AMPDU_LAST << TXC_AMPDU_SHIFT);
669	txh->MacTxControlLow = cpu_to_le16(mcl);
670
671	/* remove the null delimiter after last mpdu */
672	ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
673	txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM] = 0;
674	session->ampdu_len -= ndelim * AMPDU_DELIMITER_LEN;
675
676	/* remove the pad len from last mpdu */
677	fbr_iscck = ((le16_to_cpu(txh->XtraFrameTypes) & 0x3) == 0);
678	len = fbr_iscck ? BRCMS_GET_CCK_PLCP_LEN(txh->FragPLCPFallback) :
679			  BRCMS_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
680	session->ampdu_len -= roundup(len, 4) - len;
681
682	/* Now fix up the first MPDU */
683	tx_info = IEEE80211_SKB_CB(first);
684	txrate = tx_info->status.rates;
685	txh = (struct d11txh *)first->data;
686	plcp = (u8 *)(txh + 1);
687	rts = (struct ieee80211_rts *)&txh->rts_frame;
688
689	mcl = le16_to_cpu(txh->MacTxControlLow);
690	/* If only one MPDU leave it marked as last */
691	if (first != last) {
692		mcl &= ~TXC_AMPDU_MASK;
693		mcl |= (TXC_AMPDU_FIRST << TXC_AMPDU_SHIFT);
694	}
695	mcl |= TXC_STARTMSDU;
696	if (ieee80211_is_rts(rts->frame_control)) {
697		mcl |= TXC_SENDRTS;
698		use_rts = true;
699	}
700	if (ieee80211_is_cts(rts->frame_control)) {
701		mcl |= TXC_SENDCTS;
702		use_cts = true;
703	}
704	txh->MacTxControlLow = cpu_to_le16(mcl);
705
706	fbr = txrate[1].count > 0;
707	if (!fbr) {
708		plcp0 = plcp[0];
709		plcp3 = plcp[3];
710	} else {
711		plcp0 = txh->FragPLCPFallback[0];
712		plcp3 = txh->FragPLCPFallback[3];
713	}
714	is40 = (plcp0 & MIMO_PLCP_40MHZ) ? 1 : 0;
715	sgi = plcp3_issgi(plcp3) ? 1 : 0;
716	mcs = plcp0 & ~MIMO_PLCP_40MHZ;
717
718	if (is40) {
719		if (CHSPEC_SB_UPPER(wlc_phy_chanspec_get(wlc->band->pi)))
720			mimo_ctlchbw = PHY_TXC1_BW_20MHZ_UP;
721		else
722			mimo_ctlchbw = PHY_TXC1_BW_20MHZ;
723	}
724
725	/* rebuild the rspec and rspec_fallback */
726	rspec = RSPEC_MIMORATE;
727	rspec |= plcp[0] & ~MIMO_PLCP_40MHZ;
728	if (plcp[0] & MIMO_PLCP_40MHZ)
729		rspec |= (PHY_TXC1_BW_40MHZ << RSPEC_BW_SHIFT);
730
731	fbr_iscck = !(le16_to_cpu(txh->XtraFrameTypes) & 0x03);
732	if (fbr_iscck) {
733		rspec_fallback =
734			cck_rspec(cck_phy2mac_rate(txh->FragPLCPFallback[0]));
735	} else {
736		rspec_fallback = RSPEC_MIMORATE;
737		rspec_fallback |= txh->FragPLCPFallback[0] & ~MIMO_PLCP_40MHZ;
738		if (txh->FragPLCPFallback[0] & MIMO_PLCP_40MHZ)
739			rspec_fallback |= PHY_TXC1_BW_40MHZ << RSPEC_BW_SHIFT;
740	}
741
742	if (use_rts || use_cts) {
743		rts_rspec =
744			brcms_c_rspec_to_rts_rspec(wlc, rspec,
745						   false, mimo_ctlchbw);
746		rts_rspec_fallback =
747			brcms_c_rspec_to_rts_rspec(wlc, rspec_fallback,
748						   false, mimo_ctlchbw);
749	}
750
751	BRCMS_SET_MIMO_PLCP_LEN(plcp, session->ampdu_len);
752	/* mark plcp to indicate ampdu */
753	BRCMS_SET_MIMO_PLCP_AMPDU(plcp);
754
755	/* reset the mixed mode header durations */
756	if (txh->MModeLen) {
757		u16 mmodelen = brcms_c_calc_lsig_len(wlc, rspec,
758						     session->ampdu_len);
759		txh->MModeLen = cpu_to_le16(mmodelen);
760		preamble_type = BRCMS_MM_PREAMBLE;
761	}
762	if (txh->MModeFbrLen) {
763		u16 mmfbrlen = brcms_c_calc_lsig_len(wlc, rspec_fallback,
764						     session->ampdu_len);
765		txh->MModeFbrLen = cpu_to_le16(mmfbrlen);
766		fbr_preamble_type = BRCMS_MM_PREAMBLE;
767	}
768
769	/* set the preload length */
770	if (mcs_2_rate(mcs, true, false) >= f->dmaxferrate) {
771		dma_len = min(dma_len, f->ampdu_pld_size);
772		txh->PreloadSize = cpu_to_le16(dma_len);
773	} else {
774		txh->PreloadSize = 0;
775	}
776
777	mch = le16_to_cpu(txh->MacTxControlHigh);
778
779	/* update RTS dur fields */
780	if (use_rts || use_cts) {
781		u16 durid;
782		if ((mch & TXC_PREAMBLE_RTS_MAIN_SHORT) ==
783		    TXC_PREAMBLE_RTS_MAIN_SHORT)
784			rts_preamble_type = BRCMS_SHORT_PREAMBLE;
785
786		if ((mch & TXC_PREAMBLE_RTS_FB_SHORT) ==
787		     TXC_PREAMBLE_RTS_FB_SHORT)
788			rts_fbr_preamble_type = BRCMS_SHORT_PREAMBLE;
789
790		durid = brcms_c_compute_rtscts_dur(wlc, use_cts, rts_rspec,
791						   rspec, rts_preamble_type,
792						   preamble_type,
793						   session->ampdu_len, true);
794		rts->duration = cpu_to_le16(durid);
795		durid = brcms_c_compute_rtscts_dur(wlc, use_cts,
796						   rts_rspec_fallback,
797						   rspec_fallback,
798						   rts_fbr_preamble_type,
799						   fbr_preamble_type,
800						   session->ampdu_len, true);
801		txh->RTSDurFallback = cpu_to_le16(durid);
802		/* set TxFesTimeNormal */
803		txh->TxFesTimeNormal = rts->duration;
804		/* set fallback rate version of TxFesTimeNormal */
805		txh->TxFesTimeFallback = txh->RTSDurFallback;
806	}
807
808	/* set flag and plcp for fallback rate */
809	if (fbr) {
810		mch |= TXC_AMPDU_FBR;
811		txh->MacTxControlHigh = cpu_to_le16(mch);
812		BRCMS_SET_MIMO_PLCP_AMPDU(plcp);
813		BRCMS_SET_MIMO_PLCP_AMPDU(txh->FragPLCPFallback);
814	}
815
816	brcms_dbg_ht(wlc->hw->d11core, "wl%d: count %d ampdu_len %d\n",
817		     wlc->pub->unit, skb_queue_len(&session->skb_list),
818		     session->ampdu_len);
819}
820
821static void
822brcms_c_ampdu_rate_status(struct brcms_c_info *wlc,
823			  struct ieee80211_tx_info *tx_info,
824			  struct tx_status *txs, u8 mcs)
825{
826	struct ieee80211_tx_rate *txrate = tx_info->status.rates;
827	int i;
828
829	/* clear the rest of the rates */
830	for (i = 2; i < IEEE80211_TX_MAX_RATES; i++) {
831		txrate[i].idx = -1;
832		txrate[i].count = 0;
833	}
834}
835
836static void
837brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
838			      struct sk_buff *p, struct tx_status *txs,
839			      u32 s1, u32 s2)
840{
841	struct scb_ampdu *scb_ampdu;
842	struct brcms_c_info *wlc = ampdu->wlc;
843	struct scb_ampdu_tid_ini *ini;
844	u8 bitmap[8], queue, tid;
845	struct d11txh *txh;
846	u8 *plcp;
847	struct ieee80211_hdr *h;
848	u16 seq, start_seq = 0, bindex, index, mcl;
849	u8 mcs = 0;
850	bool ba_recd = false, ack_recd = false;
851	u8 suc_mpdu = 0, tot_mpdu = 0;
852	uint supr_status;
853	bool update_rate = true, retry = true, tx_error = false;
854	u16 mimoantsel = 0;
855	u8 antselid = 0;
856	u8 retry_limit, rr_retry_limit;
857	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(p);
858
859#ifdef DEBUG
860	u8 hole[AMPDU_MAX_MPDU];
861	memset(hole, 0, sizeof(hole));
862#endif
863
864	scb_ampdu = &scb->scb_ampdu;
865	tid = (u8) (p->priority);
866
867	ini = &scb_ampdu->ini[tid];
868	retry_limit = ampdu->retry_limit_tid[tid];
869	rr_retry_limit = ampdu->rr_retry_limit_tid[tid];
870	memset(bitmap, 0, sizeof(bitmap));
871	queue = txs->frameid & TXFID_QUEUE_MASK;
872	supr_status = txs->status & TX_STATUS_SUPR_MASK;
873
874	if (txs->status & TX_STATUS_ACK_RCV) {
875		if (TX_STATUS_SUPR_UF == supr_status)
876			update_rate = false;
877
878		WARN_ON(!(txs->status & TX_STATUS_INTERMEDIATE));
879		start_seq = txs->sequence >> SEQNUM_SHIFT;
880		bitmap[0] = (txs->status & TX_STATUS_BA_BMAP03_MASK) >>
881		    TX_STATUS_BA_BMAP03_SHIFT;
882
883		WARN_ON(s1 & TX_STATUS_INTERMEDIATE);
884		WARN_ON(!(s1 & TX_STATUS_AMPDU));
885
886		bitmap[0] |=
887		    (s1 & TX_STATUS_BA_BMAP47_MASK) <<
888		    TX_STATUS_BA_BMAP47_SHIFT;
889		bitmap[1] = (s1 >> 8) & 0xff;
890		bitmap[2] = (s1 >> 16) & 0xff;
891		bitmap[3] = (s1 >> 24) & 0xff;
892
893		bitmap[4] = s2 & 0xff;
894		bitmap[5] = (s2 >> 8) & 0xff;
895		bitmap[6] = (s2 >> 16) & 0xff;
896		bitmap[7] = (s2 >> 24) & 0xff;
897
898		ba_recd = true;
899	} else {
900		if (supr_status) {
901			update_rate = false;
902			if (supr_status == TX_STATUS_SUPR_BADCH) {
903				brcms_dbg_ht(wlc->hw->d11core,
904					  "%s: Pkt tx suppressed, illegal channel possibly %d\n",
905					  __func__, CHSPEC_CHANNEL(
906					  wlc->default_bss->chanspec));
907			} else {
908				if (supr_status != TX_STATUS_SUPR_FRAG)
909					brcms_err(wlc->hw->d11core,
910						  "%s: supr_status 0x%x\n",
911						  __func__, supr_status);
912			}
913			/* no need to retry for badch; will fail again */
914			if (supr_status == TX_STATUS_SUPR_BADCH ||
915			    supr_status == TX_STATUS_SUPR_EXPTIME) {
916				retry = false;
917			} else if (supr_status == TX_STATUS_SUPR_EXPTIME) {
918				/* TX underflow:
919				 *   try tuning pre-loading or ampdu size
920				 */
921			} else if (supr_status == TX_STATUS_SUPR_FRAG) {
922				/*
923				 * if there were underflows, but pre-loading
924				 * is not active, notify rate adaptation.
925				 */
926				if (brcms_c_ffpld_check_txfunfl(wlc, queue) > 0)
927					tx_error = true;
928			}
929		} else if (txs->phyerr) {
930			update_rate = false;
931			brcms_dbg_ht(wlc->hw->d11core,
932				     "%s: ampdu tx phy error (0x%x)\n",
933				     __func__, txs->phyerr);
934		}
935	}
936
937	/* loop through all pkts and retry if not acked */
938	while (p) {
939		tx_info = IEEE80211_SKB_CB(p);
940		txh = (struct d11txh *) p->data;
941		mcl = le16_to_cpu(txh->MacTxControlLow);
942		plcp = (u8 *) (txh + 1);
943		h = (struct ieee80211_hdr *)(plcp + D11_PHY_HDR_LEN);
944		seq = le16_to_cpu(h->seq_ctrl) >> SEQNUM_SHIFT;
945
946		trace_brcms_txdesc(&wlc->hw->d11core->dev, txh, sizeof(*txh));
947
948		if (tot_mpdu == 0) {
949			mcs = plcp[0] & MIMO_PLCP_MCS_MASK;
950			mimoantsel = le16_to_cpu(txh->ABI_MimoAntSel);
951		}
952
953		index = TX_SEQ_TO_INDEX(seq);
954		ack_recd = false;
955		if (ba_recd) {
956			bindex = MODSUB_POW2(seq, start_seq, SEQNUM_MAX);
957			brcms_dbg_ht(wlc->hw->d11core,
958				     "tid %d seq %d, start_seq %d, bindex %d set %d, index %d\n",
959				     tid, seq, start_seq, bindex,
960				     isset(bitmap, bindex), index);
961			/* if acked then clear bit and free packet */
962			if ((bindex < AMPDU_TX_BA_MAX_WSIZE)
963			    && isset(bitmap, bindex)) {
964				ini->txretry[index] = 0;
965
966				/*
967				 * ampdu_ack_len:
968				 *   number of acked aggregated frames
969				 */
970				/* ampdu_len: number of aggregated frames */
971				brcms_c_ampdu_rate_status(wlc, tx_info, txs,
972							  mcs);
973				tx_info->flags |= IEEE80211_TX_STAT_ACK;
974				tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
975				tx_info->status.ampdu_ack_len =
976					tx_info->status.ampdu_len = 1;
977
978				skb_pull(p, D11_PHY_HDR_LEN);
979				skb_pull(p, D11_TXH_LEN);
980
981				ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
982							    p);
983				ack_recd = true;
984				suc_mpdu++;
985			}
986		}
987		/* either retransmit or send bar if ack not recd */
988		if (!ack_recd) {
989			if (retry && (ini->txretry[index] < (int)retry_limit)) {
990				int ret;
991				ini->txretry[index]++;
992				ret = brcms_c_txfifo(wlc, queue, p);
993				/*
994				 * We shouldn't be out of space in the DMA
995				 * ring here since we're reinserting a frame
996				 * that was just pulled out.
997				 */
998				WARN_ONCE(ret, "queue %d out of txds\n", queue);
999			} else {
1000				/* Retry timeout */
1001				ieee80211_tx_info_clear_status(tx_info);
1002				tx_info->status.ampdu_ack_len = 0;
1003				tx_info->status.ampdu_len = 1;
1004				tx_info->flags |=
1005				    IEEE80211_TX_STAT_AMPDU_NO_BACK;
1006				skb_pull(p, D11_PHY_HDR_LEN);
1007				skb_pull(p, D11_TXH_LEN);
1008				brcms_dbg_ht(wlc->hw->d11core,
1009					     "BA Timeout, seq %d\n",
1010					     seq);
1011				ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
1012							    p);
1013			}
1014		}
1015		tot_mpdu++;
1016
1017		/* break out if last packet of ampdu */
1018		if (((mcl & TXC_AMPDU_MASK) >> TXC_AMPDU_SHIFT) ==
1019		    TXC_AMPDU_LAST)
1020			break;
1021
1022		p = dma_getnexttxp(wlc->hw->di[queue], DMA_RANGE_TRANSMITTED);
1023	}
1024
1025	/* update rate state */
1026	antselid = brcms_c_antsel_antsel2id(wlc->asi, mimoantsel);
1027}
1028
1029void
1030brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
1031		     struct sk_buff *p, struct tx_status *txs)
1032{
1033	struct scb_ampdu *scb_ampdu;
1034	struct brcms_c_info *wlc = ampdu->wlc;
1035	struct scb_ampdu_tid_ini *ini;
1036	u32 s1 = 0, s2 = 0;
1037	struct ieee80211_tx_info *tx_info;
1038
1039	tx_info = IEEE80211_SKB_CB(p);
1040
1041	/* BMAC_NOTE: For the split driver, second level txstatus comes later
1042	 * So if the ACK was received then wait for the second level else just
1043	 * call the first one
1044	 */
1045	if (txs->status & TX_STATUS_ACK_RCV) {
1046		u8 status_delay = 0;
1047
1048		/* wait till the next 8 bytes of txstatus is available */
1049		s1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus));
1050		while ((s1 & TXS_V) == 0) {
1051			udelay(1);
1052			status_delay++;
1053			if (status_delay > 10)
1054				return; /* error condition */
1055			s1 = bcma_read32(wlc->hw->d11core,
1056					 D11REGOFFS(frmtxstatus));
1057		}
1058
1059		s2 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus2));
1060	}
1061
1062	if (scb) {
1063		scb_ampdu = &scb->scb_ampdu;
1064		ini = &scb_ampdu->ini[p->priority];
1065		brcms_c_ampdu_dotxstatus_complete(ampdu, scb, p, txs, s1, s2);
1066	} else {
1067		/* loop through all pkts and free */
1068		u8 queue = txs->frameid & TXFID_QUEUE_MASK;
1069		struct d11txh *txh;
1070		u16 mcl;
1071		while (p) {
1072			tx_info = IEEE80211_SKB_CB(p);
1073			txh = (struct d11txh *) p->data;
1074			trace_brcms_txdesc(&wlc->hw->d11core->dev, txh,
1075					   sizeof(*txh));
1076			mcl = le16_to_cpu(txh->MacTxControlLow);
1077			brcmu_pkt_buf_free_skb(p);
1078			/* break out if last packet of ampdu */
1079			if (((mcl & TXC_AMPDU_MASK) >> TXC_AMPDU_SHIFT) ==
1080			    TXC_AMPDU_LAST)
1081				break;
1082			p = dma_getnexttxp(wlc->hw->di[queue],
1083					   DMA_RANGE_TRANSMITTED);
1084		}
1085	}
1086}
1087
1088void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc)
1089{
1090	char template[T_RAM_ACCESS_SZ * 2];
1091
1092	/* driver needs to write the ta in the template; ta is at offset 16 */
1093	memset(template, 0, sizeof(template));
1094	memcpy(template, wlc->pub->cur_etheraddr, ETH_ALEN);
1095	brcms_b_write_template_ram(wlc->hw, (T_BA_TPL_BASE + 16),
1096				  (T_RAM_ACCESS_SZ * 2),
1097				  template);
1098}
1099
1100bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid)
1101{
1102	return wlc->ampdu->ini_enable[tid];
1103}
1104
1105void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu)
1106{
1107	struct brcms_c_info *wlc = ampdu->wlc;
1108
1109	/*
1110	 * Extend ucode internal watchdog timer to
1111	 * match larger received frames
1112	 */
1113	if ((ampdu->rx_factor & IEEE80211_HT_AMPDU_PARM_FACTOR) ==
1114	    IEEE80211_HT_MAX_AMPDU_64K) {
1115		brcms_b_write_shm(wlc->hw, M_MIMO_MAXSYM, MIMO_MAXSYM_MAX);
1116		brcms_b_write_shm(wlc->hw, M_WATCHDOG_8TU, WATCHDOG_8TU_MAX);
1117	} else {
1118		brcms_b_write_shm(wlc->hw, M_MIMO_MAXSYM, MIMO_MAXSYM_DEF);
1119		brcms_b_write_shm(wlc->hw, M_WATCHDOG_8TU, WATCHDOG_8TU_DEF);
1120	}
1121}
1122
1123/*
1124 * callback function that helps invalidating ampdu packets in a DMA queue
1125 */
1126static void dma_cb_fn_ampdu(void *txi, void *arg_a)
1127{
1128	struct ieee80211_sta *sta = arg_a;
1129	struct ieee80211_tx_info *tx_info = (struct ieee80211_tx_info *)txi;
1130
1131	if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
1132	    (tx_info->rate_driver_data[0] == sta || sta == NULL))
1133		tx_info->rate_driver_data[0] = NULL;
1134}
1135
1136/*
1137 * When a remote party is no longer available for ampdu communication, any
1138 * pending tx ampdu packets in the driver have to be flushed.
1139 */
1140void brcms_c_ampdu_flush(struct brcms_c_info *wlc,
1141		     struct ieee80211_sta *sta, u16 tid)
1142{
1143	brcms_c_inval_dma_pkts(wlc->hw, sta, dma_cb_fn_ampdu);
1144}
1145